summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBenjamin Tissoires <benjamin.tissoires@redhat.com>2022-10-05 10:21:55 +0100
committerBenjamin Tissoires <benjamin.tissoires@redhat.com>2022-10-05 10:21:55 +0100
commitedd1533d3ccd82dd5d600986d27d524e6be4c5fd (patch)
tree1ac5ae82ea63114d5c13212e2819531e4507f800 /drivers
parent7d8fe4cfc54b5fb2093e12cffa8ca74d3c88e0fa (diff)
parent98d67f250472cdd0f8d083830be3ec9dbb0c65a8 (diff)
downloadlinux-edd1533d3ccd82dd5d600986d27d524e6be4c5fd.tar.bz2
Merge branch 'for-6.1/logitech' into for-linus
- Add hanlding of all Bluetooth HID++ devices and fixes in hid++ (Bastien Nocera)
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/braille/braille_console.c2
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/accessibility/speakup/serialio.h3
-rw-r--r--drivers/acpi/arm64/iort.c360
-rw-r--r--drivers/acpi/pci_mcfg.c13
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/property.c463
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/utils.c38
-rw-r--r--drivers/acpi/viot.c6
-rw-r--r--drivers/android/binder_alloc.c68
-rw-r--r--drivers/android/binder_alloc.h2
-rw-r--r--drivers/android/binder_alloc_selftest.c2
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-scsi.c1
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/atm/idt77252.c1
-rw-r--r--drivers/block/Kconfig9
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c49
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/null_blk/main.c108
-rw-r--r--drivers/block/null_blk/null_blk.h2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rbd.c6
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c201
-rw-r--r--drivers/block/rnbd/rnbd-clt.h18
-rw-r--r--drivers/block/rnbd/rnbd-srv.c20
-rw-r--r--drivers/block/rnbd/rnbd-srv.h4
-rw-r--r--drivers/block/sx8.c1582
-rw-r--r--drivers/block/ublk_drv.c381
-rw-r--r--drivers/block/virtio_blk.c24
-rw-r--r--drivers/block/xen-blkback/xenbus.c20
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/zram/zcomp.c11
-rw-r--r--drivers/block/zram/zram_drv.c48
-rw-r--r--drivers/block/zram/zram_drv.h1
-rw-r--r--drivers/char/agp/intel-gtt.c17
-rw-r--r--drivers/char/hw_random/powernv-rng.c2
-rw-r--r--drivers/char/hw_random/s390-trng.c2
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm.h1
-rw-r--r--drivers/char/tpm/tpm1-cmd.c7
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.c14
-rw-r--r--drivers/char/tpm/tpm_tis_core.h10
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c390
-rw-r--r--drivers/clk/clk-devres.c91
-rw-r--r--drivers/clk/clk-fixed-factor.c56
-rw-r--r--drivers/clk/clk.c48
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c36
-rw-r--r--drivers/clk/imx/clk-imx93.c6
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-g3d.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c22
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c22
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c10
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c12
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c22
-rw-r--r--drivers/clk/mediatek/clk-mt7629-eth.c10
-rw-r--r--drivers/clk/mediatek/clk-mt7629-hif.c12
-rw-r--r--drivers/clk/mediatek/clk-mt8135.c22
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c22
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c18
-rw-r--r--drivers/clk/mediatek/clk-mt8186-infra_ao.c23
-rw-r--r--drivers/clk/mediatek/clk-mt8192-msdc.c21
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c29
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c24
-rw-r--r--drivers/clk/mediatek/clk-mtk.c7
-rw-r--r--drivers/clk/mediatek/clk-mtk.h9
-rw-r--r--drivers/clk/mediatek/reset.c198
-rw-r--r--drivers/clk/mediatek/reset.h82
-rw-r--r--drivers/clk/meson/axg-audio.c36
-rw-r--r--drivers/clk/qcom/Kconfig22
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c4
-rw-r--r--drivers/clk/qcom/camcc-sm8250.c16
-rw-r--r--drivers/clk/qcom/camcc-sm8450.c2856
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c144
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h11
-rw-r--r--drivers/clk/qcom/clk-hfpll.c15
-rw-r--r--drivers/clk/qcom/clk-krait.c23
-rw-r--r--drivers/clk/qcom/clk-krait.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c16
-rw-r--r--drivers/clk/qcom/clk-regmap-phy-mux.c62
-rw-r--r--drivers/clk/qcom/clk-regmap-phy-mux.h33
-rw-r--r--drivers/clk/qcom/clk-rpm.c24
-rw-r--r--drivers/clk/qcom/clk-rpmh.c5
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c64
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c104
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c35
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c47
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c6
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c8
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c49
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c142
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c2
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c49
-rw-r--r--drivers/clk/qcom/gdsc.c36
-rw-r--r--drivers/clk/qcom/gdsc.h4
-rw-r--r--drivers/clk/qcom/gpucc-sm8350.c637
-rw-r--r--drivers/clk/qcom/krait-cc.c8
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c1052
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c4
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c22
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c20
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c31
-rw-r--r--drivers/clk/renesas/clk-r8a7779.c27
-rw-r--r--drivers/clk/renesas/clk-rz.c33
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c26
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c10
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c32
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c32
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c17
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c5
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c2
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c15
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c77
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c113
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c80
-rw-r--r--drivers/clk/sunxi/Kconfig4
-rw-r--r--drivers/clk/ti/clk-44xx.c210
-rw-r--r--drivers/clk/ti/clk-54xx.c160
-rw-r--r--drivers/clk/ti/clkctrl.c4
-rw-r--r--drivers/clk/x86/Makefile4
-rw-r--r--drivers/clocksource/timer-riscv.c40
-rw-r--r--drivers/comedi/drivers/comedi_isadma.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c19
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c12
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c14
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c109
-rw-r--r--drivers/cpufreq/sti-cpufreq.c27
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c31
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c4
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c12
-rw-r--r--drivers/cpufreq/ti-cpufreq.c42
-rw-r--r--drivers/cpuidle/cpuidle.c6
-rw-r--r--drivers/cxl/Kconfig9
-rw-r--r--drivers/cxl/acpi.c243
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/core.h51
-rw-r--r--drivers/cxl/core/hdm.c691
-rw-r--r--drivers/cxl/core/mbox.c95
-rw-r--r--drivers/cxl/core/memdev.c4
-rw-r--r--drivers/cxl/core/pci.c181
-rw-r--r--drivers/cxl/core/pmem.c4
-rw-r--r--drivers/cxl/core/port.c738
-rw-r--r--drivers/cxl/core/region.c1896
-rw-r--r--drivers/cxl/cxl.h312
-rw-r--r--drivers/cxl/cxlmem.h42
-rw-r--r--drivers/cxl/cxlpci.h1
-rw-r--r--drivers/cxl/mem.c49
-rw-r--r--drivers/cxl/pci.c46
-rw-r--r--drivers/cxl/pmem.c259
-rw-r--r--drivers/cxl/port.c53
-rw-r--r--drivers/dax/super.c67
-rw-r--r--drivers/devfreq/exynos-bus.c21
-rw-r--r--drivers/devfreq/tegra30-devfreq.c22
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/apple-admac.c818
-rw-r--r--drivers/dma/at_xdmac.c2
-rw-r--r--drivers/dma/dma-axi-dmac.c16
-rw-r--r--drivers/dma/dma-jz4780.c2
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmatest.c45
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c11
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c141
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h31
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c83
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c49
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.h4
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c18
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.h8
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c3
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/fsl-edma-common.c3
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c38
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c2
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mv_xor_v2.c2
-rw-r--r--drivers/dma/owl-dma.c2
-rw-r--r--drivers/dma/s3c24xx-dma.c2
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c44
-rw-r--r--drivers/dma/sh/rz-dmac.c17
-rw-r--r--drivers/dma/sprd-dma.c5
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/stm32-mdma.c5
-rw-r--r--drivers/dma/sun4i-dma.c32
-rw-r--r--drivers/dma/tegra186-gpc-dma.c26
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c122
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c6
-rw-r--r--drivers/edac/Kconfig1
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/pnd2_edac.c62
-rw-r--r--drivers/edac/ppc4xx_edac.c1
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c107
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c13
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c36
-rw-r--r--drivers/gpio/Kconfig22
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c249
-rw-r--r--drivers/gpio/gpio-104-idi-48.c157
-rw-r--r--drivers/gpio/gpio-104-idio-16.c60
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c19
-rw-r--r--drivers/gpio/gpio-adnp.c19
-rw-r--r--drivers/gpio/gpio-adp5588.c26
-rw-r--r--drivers/gpio/gpio-brcmstb.c9
-rw-r--r--drivers/gpio/gpio-davinci.c83
-rw-r--r--drivers/gpio/gpio-gpio-mm.c202
-rw-r--r--drivers/gpio/gpio-i8255.c287
-rw-r--r--drivers/gpio/gpio-i8255.h46
-rw-r--r--drivers/gpio/gpio-lp3943.c16
-rw-r--r--drivers/gpio/gpio-pca9570.c2
-rw-r--r--drivers/gpio/gpio-pch.c43
-rw-r--r--drivers/gpio/gpio-rockchip.c3
-rw-r--r--drivers/gpio/gpio-twl4030.c18
-rw-r--r--drivers/gpio/gpio-ucb1400.c20
-rw-r--r--drivers/gpio/gpio-vr41xx.c541
-rw-r--r--drivers/gpio/gpio-ws16c48.c120
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c3
-rw-r--r--drivers/gpio/gpiolib-cdev.c291
-rw-r--r--drivers/gpio/gpiolib-devres.c32
-rw-r--r--drivers/gpio/gpiolib-of.c13
-rw-r--r--drivers/gpio/gpiolib.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c342
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c376
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c812
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h106
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c15
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c14
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c2
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c21
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c1
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_prime.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c77
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c120
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c33
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h1
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c5
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c2
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.c12
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c5
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c22
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c18
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c164
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c38
-rw-r--r--drivers/hv/connection.c11
-rw-r--r--drivers/hv/hv_balloon.c135
-rw-r--r--drivers/hv/hyperv_vmbus.h7
-rw-r--r--drivers/hv/vmbus_drv.c27
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/nct6775-core.c3
-rw-r--r--drivers/hwmon/nct6775-platform.c2
-rw-r--r--drivers/hwmon/nct6775.h2
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c6
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h8
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-altera.c2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c2
-rw-r--r--drivers/i2c/busses/i2c-au1550.c2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c2
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c43
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c2
-rw-r--r--drivers/i2c/busses/i2c-icy.c2
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c20
-rw-r--r--drivers/i2c/busses/i2c-kempld.c1
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c2
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c7
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c45
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-opal.c4
-rw-r--r--drivers/i2c/busses/i2c-parport.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c7
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-riic.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-scmi.c9
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-simtec.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c2
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-wmt.c2
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/i2c/i2c-smbus.c2
-rw-r--r--drivers/infiniband/Kconfig15
-rw-r--r--drivers/infiniband/core/cma.c230
-rw-r--r--drivers/infiniband/core/cma_priv.h1
-rw-r--r--drivers/infiniband/core/rdma_core.c2
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c2
-rw-r--r--drivers/infiniband/core/rw.c45
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c25
-rw-r--r--drivers/infiniband/hw/erdma/Kconfig12
-rw-r--r--drivers/infiniband/hw/erdma/Makefile4
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h287
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c1430
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.h167
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c493
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c205
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c329
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h508
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c608
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c566
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c1460
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h342
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig2
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c4
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c2
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c248
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h13
-rw-r--r--drivers/infiniband/hw/irdma/cm.c11
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c8
-rw-r--r--drivers/infiniband/hw/irdma/hw.c33
-rw-r--r--drivers/infiniband/hw/irdma/main.h2
-rw-r--r--drivers/infiniband/hw/irdma/utils.c1
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c16
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c165
-rw-r--r--drivers/infiniband/hw/mlx5/main.c38
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h79
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c514
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c78
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c8
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c23
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c49
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c213
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c106
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c36
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c137
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c236
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c78
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h27
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c14
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c50
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h21
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c32
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c32
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h15
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c156
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h18
-rw-r--r--drivers/input/input-core-private.h16
-rw-r--r--drivers/input/input-mt.c48
-rw-r--r--drivers/input/input.c149
-rw-r--r--drivers/input/joystick/adc-joystick.c15
-rw-r--r--drivers/input/joystick/sensehat-joystick.c4
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c206
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c89
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c18
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c98
-rw-r--r--drivers/input/keyboard/omap4-keypad.c26
-rw-r--r--drivers/input/misc/iqs7222.c178
-rw-r--r--drivers/input/mouse/cyapa_gen6.c2
-rw-r--r--drivers/input/mouse/gpio_mouse.c2
-rw-r--r--drivers/input/serio/gscps2.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h1270
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c96
-rw-r--r--drivers/input/touchscreen/exc3000.c7
-rw-r--r--drivers/input/touchscreen/goodix.c22
-rw-r--r--drivers/input/touchscreen/zinitix.c112
-rw-r--r--drivers/iommu/Kconfig10
-rw-r--r--drivers/iommu/amd/amd_iommu.h18
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h186
-rw-r--r--drivers/iommu/amd/init.c942
-rw-r--r--drivers/iommu/amd/io_pgtable.c6
-rw-r--r--drivers/iommu/amd/iommu.c585
-rw-r--r--drivers/iommu/amd/iommu_v2.c67
-rw-r--r--drivers/iommu/amd/quirks.c4
-rw-r--r--drivers/iommu/apple-dart.c4
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c144
-rw-r--r--drivers/iommu/arm/arm-smmu/Makefile1
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c142
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c34
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h28
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c73
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h1
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c18
-rw-r--r--drivers/iommu/dma-iommu.c124
-rw-r--r--drivers/iommu/exynos-iommu.c182
-rw-r--r--drivers/iommu/fsl_pamu_domain.c5
-rw-r--r--drivers/iommu/hyperv-iommu.c4
-rw-r--r--drivers/iommu/intel/cap_audit.c2
-rw-r--r--drivers/iommu/intel/debugfs.c51
-rw-r--r--drivers/iommu/intel/dmar.c41
-rw-r--r--drivers/iommu/intel/iommu.c447
-rw-r--r--drivers/iommu/intel/iommu.h839
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/intel/pasid.c107
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/perf.c2
-rw-r--r--drivers/iommu/intel/svm.c11
-rw-r--r--drivers/iommu/intel/trace.c2
-rw-r--r--drivers/iommu/intel/trace.h99
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c75
-rw-r--r--drivers/iommu/iommu.c59
-rw-r--r--drivers/iommu/iova.c12
-rw-r--r--drivers/iommu/msm_iommu.c7
-rw-r--r--drivers/iommu/mtk_iommu.c71
-rw-r--r--drivers/iommu/mtk_iommu_v1.c5
-rw-r--r--drivers/iommu/sprd-iommu.c11
-rw-r--r--drivers/iommu/sun50i-iommu.c3
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c3
-rw-r--r--drivers/iommu/virtio-iommu.c31
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c2
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c13
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c40
-rw-r--r--drivers/irqchip/irq-riscv-intc.c7
-rw-r--r--drivers/irqchip/irq-sifive-plic.c7
-rw-r--r--drivers/irqchip/irq-tegra.c10
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/blink/Kconfig14
-rw-r--r--drivers/leds/blink/Makefile1
-rw-r--r--drivers/leds/blink/leds-bcm63138.c307
-rw-r--r--drivers/leds/leds-is31fl319x.c529
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c8
-rw-r--r--drivers/leds/simple/Kconfig6
-rw-r--r--drivers/leds/simple/Makefile1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.c105
-rw-r--r--drivers/leds/simple/simatic-ipc-leds.c80
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/mailbox/imx-mailbox.c40
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c11
-rw-r--r--drivers/md/bcache/Kconfig2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/dm-bufio.c45
-rw-r--r--drivers/md/dm-ebs-target.c3
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/dm-raid.c1
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-target.c172
-rw-r--r--drivers/md/dm-verity.h6
-rw-r--r--drivers/md/dm-writecache.c3
-rw-r--r--drivers/md/dm-zoned-metadata.c4
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/md-autodetect.c21
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md.c424
-rw-r--r--drivers/md/md.h19
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c3
-rw-r--r--drivers/md/raid10.c18
-rw-r--r--drivers/md/raid5-cache.c40
-rw-r--r--drivers/md/raid5-log.h77
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c729
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/i2c/tda1997x.c1
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c10
-rw-r--r--drivers/memory/tegra/tegra124-emc.c11
-rw-r--r--drivers/memstick/core/ms_block.c15
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/mfd/Kconfig6
-rw-r--r--drivers/mfd/asic3.c9
-rw-r--r--drivers/mfd/axp20x.c9
-rw-r--r--drivers/mfd/cros_ec_dev.c9
-rw-r--r--drivers/mfd/db8500-prcmu.c2
-rw-r--r--drivers/mfd/dln2.c17
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c194
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c27
-rw-r--r--drivers/mfd/lpc_ich.c161
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/max77714.c4
-rw-r--r--drivers/mfd/mt6358-irq.c24
-rw-r--r--drivers/mfd/mt6397-core.c91
-rw-r--r--drivers/mfd/mt6397-irq.c9
-rw-r--r--drivers/mfd/qcom-pm8008.c53
-rw-r--r--drivers/mfd/syscon.c3
-rw-r--r--drivers/mfd/t7l66xb.c6
-rw-r--r--drivers/mfd/tc6393xb.c5
-rw-r--r--drivers/mfd/twl-core.c323
-rw-r--r--drivers/mfd/ucb1400_core.c6
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mmc/core/block.c32
-rw-r--r--drivers/mmc/core/bus.c4
-rw-r--r--drivers/mmc/core/core.c10
-rw-r--r--drivers/mmc/core/debugfs.c80
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/quirks.h4
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/core/sdio.c30
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/cavium-octeon.c1
-rw-r--r--drivers/mmc/host/cavium-thunderx.c4
-rw-r--r--drivers/mmc/host/cqhci-core.c9
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c4
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c4
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c4
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c6
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c94
-rw-r--r--drivers/mmc/host/mxcmmc.c4
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/renesas_sdhi.h1
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c42
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c11
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c76
-rw-r--r--drivers/mmc/host/sdhci-msm.c29
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c5
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c9
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c209
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c7
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c34
-rw-r--r--drivers/mmc/host/sdhci-st.c5
-rw-r--r--drivers/mmc/host/sdhci.c59
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.h6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c28
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c8
-rw-r--r--drivers/mtd/devices/powernv_flash.c4
-rw-r--r--drivers/mtd/devices/spear_smi.c10
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c23
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c6
-rw-r--r--drivers/mtd/hyperbus/hyperbus-core.c8
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c13
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c4
-rw-r--r--drivers/mtd/maps/physmap-core.c13
-rw-r--r--drivers/mtd/maps/physmap-versatile.c2
-rw-r--r--drivers/mtd/mtdchar.c13
-rw-r--r--drivers/mtd/mtdcore.c63
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c16
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c9
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c17
-rw-r--r--drivers/mtd/nand/raw/omap2.c6
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c306
-rw-r--r--drivers/mtd/nand/raw/sm_common.c2
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c5
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/ato.c86
-rw-r--r--drivers/mtd/nand/spi/core.c1
-rw-r--r--drivers/mtd/parsers/Kconfig9
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/ofpart_bcm4908.c3
-rw-r--r--drivers/mtd/parsers/redboot.c1
-rw-r--r--drivers/mtd/parsers/scpart.c249
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c8
-rw-r--r--drivers/mtd/spi-nor/core.c70
-rw-r--r--drivers/mtd/spi-nor/core.h21
-rw-r--r--drivers/mtd/spi-nor/debugfs.c2
-rw-r--r--drivers/mtd/spi-nor/esmt.c2
-rw-r--r--drivers/mtd/spi-nor/issi.c31
-rw-r--r--drivers/mtd/spi-nor/micron-st.c12
-rw-r--r--drivers/mtd/spi-nor/otp.c12
-rw-r--r--drivers/mtd/spi-nor/sfdp.c34
-rw-r--r--drivers/mtd/spi-nor/spansion.c185
-rw-r--r--drivers/mtd/spi-nor/xilinx.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c41
-rw-r--r--drivers/net/bonding/bond_alb.c10
-rw-r--r--drivers/net/bonding/bond_main.c47
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c18
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c53
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c3
-rw-r--r--drivers/net/dsa/ocelot/felix.c3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c573
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c553
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c8
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/freescale/fec.h10
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c42
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c35
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c59
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h18
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c8
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c42
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c62
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c55
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c468
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c95
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig6
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h2
-rw-r--r--drivers/net/geneve.c15
-rw-r--r--drivers/net/ipa/ipa_mem.c2
-rw-r--r--drivers/net/ipa/ipa_reg.h2
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/macsec.c69
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/phy-c45.c34
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/tap.c20
-rw-r--r--drivers/net/usb/ax88179_178a.c26
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c27
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c292
-rw-r--r--drivers/net/vxlan/vxlan_core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h7
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h14
-rw-r--r--drivers/net/wireless/ath/trace.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h12
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h3
-rw-r--r--drivers/nfc/pn533/uart.c1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c48
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c6
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c12
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h7
-rw-r--r--drivers/ntb/test/ntb_tool.c8
-rw-r--r--drivers/nvdimm/pmem.c17
-rw-r--r--drivers/nvdimm/region_devs.c28
-rw-r--r--drivers/nvdimm/virtio_pmem.c9
-rw-r--r--drivers/nvme/Kconfig1
-rw-r--r--drivers/nvme/Makefile1
-rw-r--r--drivers/nvme/common/Kconfig4
-rw-r--r--drivers/nvme/common/Makefile7
-rw-r--r--drivers/nvme/common/auth.c483
-rw-r--r--drivers/nvme/host/Kconfig15
-rw-r--r--drivers/nvme/host/Makefile4
-rw-r--r--drivers/nvme/host/apple.c28
-rw-r--r--drivers/nvme/host/auth.c1017
-rw-r--r--drivers/nvme/host/constants.c3
-rw-r--r--drivers/nvme/host/core.c493
-rw-r--r--drivers/nvme/host/fabrics.c102
-rw-r--r--drivers/nvme/host/fabrics.h7
-rw-r--r--drivers/nvme/host/fc.c5
-rw-r--r--drivers/nvme/host/multipath.c9
-rw-r--r--drivers/nvme/host/nvme.h41
-rw-r--r--drivers/nvme/host/pci.c228
-rw-r--r--drivers/nvme/host/rdma.c106
-rw-r--r--drivers/nvme/host/tcp.c98
-rw-r--r--drivers/nvme/host/trace.c32
-rw-r--r--drivers/nvme/host/trace.h2
-rw-r--r--drivers/nvme/target/Kconfig15
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/nvme/target/auth.c525
-rw-r--r--drivers/nvme/target/configfs.c136
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c544
-rw-r--r--drivers/nvme/target/fabrics-cmd.c55
-rw-r--r--drivers/nvme/target/loop.c8
-rw-r--r--drivers/nvme/target/nvmet.h75
-rw-r--r--drivers/nvme/target/rdma.c2
-rw-r--r--drivers/nvme/target/tcp.c3
-rw-r--r--drivers/of/address.c17
-rw-r--r--drivers/of/base.c2
-rw-r--r--drivers/of/device.c5
-rw-r--r--drivers/of/fdt.c25
-rw-r--r--drivers/of/kexec.c17
-rw-r--r--drivers/of/of_reserved_mem.c3
-rw-r--r--drivers/of/overlay.c20
-rw-r--r--drivers/of/unittest.c17
-rw-r--r--drivers/opp/core.c1571
-rw-r--r--drivers/opp/cpu.c12
-rw-r--r--drivers/opp/debugfs.c27
-rw-r--r--drivers/opp/of.c150
-rw-r--r--drivers/opp/opp.h56
-rw-r--r--drivers/opp/ti-opp-supply.c77
-rw-r--r--drivers/parisc/ccio-dma.c13
-rw-r--r--drivers/parisc/lba_pci.c6
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--drivers/pci/Kconfig8
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c6
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c22
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c19
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c670
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c34
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c12
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c2
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c92
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c404
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c25
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c472
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h178
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c36
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c431
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194-acpi.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c684
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c6
-rw-r--r--drivers/pci/controller/pci-aardvark.c112
-rw-r--r--drivers/pci/controller/pci-loongson.c206
-rw-r--r--drivers/pci/controller/pci-mvebu.c4
-rw-r--r--drivers/pci/controller/pci-rcar-gen2.c1
-rw-r--r--drivers/pci/controller/pci-tegra.c9
-rw-r--r--drivers/pci/controller/pci-xgene.c2
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c443
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c62
-rw-r--r--drivers/pci/controller/pcie-mediatek.c8
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c4
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c8
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c60
-rw-r--r--drivers/pci/controller/vmd.c13
-rw-r--r--drivers/pci/doe.c536
-rw-r--r--drivers/pci/endpoint/functions/Kconfig12
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c117
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c1442
-rw-r--r--drivers/pci/mmap.c44
-rw-r--r--drivers/pci/p2pdma.c93
-rw-r--r--drivers/pci/pci-acpi.c5
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer.c15
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/pci/pcie/err.c12
-rw-r--r--drivers/pci/pcie/portdrv_core.c9
-rw-r--r--drivers/pci/probe.c92
-rw-r--r--drivers/pci/proc.c7
-rw-r--r--drivers/pci/quirks.c24
-rw-r--r--drivers/pci/switch/switchtec.c7
-rw-r--r--drivers/perf/riscv_pmu.c1
-rw-r--r--drivers/perf/riscv_pmu_legacy.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c30
-rw-r--r--drivers/phy/samsung/phy-exynos-pcie.c25
-rw-r--r--drivers/pinctrl/Kconfig2
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c21
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx93.c1
-rw-r--r--drivers/pinctrl/intel/Kconfig8
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c18
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c16
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c28
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h25
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c14
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c417
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c296
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c10
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c242
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1376
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c7
-rw-r--r--drivers/pinctrl/pinctrl-at91.c10
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c14
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c64
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c4
-rw-r--r--drivers/pinctrl/pinctrl-starfive.c5
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c11
-rw-r--r--drivers/pinctrl/qcom/Kconfig19
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c956
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c1544
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c3
-rw-r--r--drivers/pinctrl/renesas/Kconfig18
-rw-r--r--drivers/pinctrl/renesas/Makefile2
-rw-r--r--drivers/pinctrl/renesas/core.c6
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779f0.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c4262
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c1119
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h9
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h8
-rw-r--r--drivers/pinctrl/sunxi/Kconfig8
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c840
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c22
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c25
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c156
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h109
-rw-r--r--drivers/platform/Kconfig5
-rw-r--r--drivers/platform/chrome/Kconfig11
-rw-r--r--drivers/platform/chrome/Makefile5
-rw-r--r--drivers/platform/chrome/cros_ec.c11
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c473
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test.c2753
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h8
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c93
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c196
-rw-r--r--drivers/platform/chrome/cros_kunit_util.c130
-rw-r--r--drivers/platform/chrome/cros_kunit_util.h48
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c23
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c82
-rw-r--r--drivers/platform/olpc/olpc-ec.c2
-rw-r--r--drivers/platform/surface/Kconfig58
-rw-r--r--drivers/platform/surface/Makefile2
-rw-r--r--drivers/platform/surface/aggregator/Kconfig2
-rw-r--r--drivers/platform/surface/aggregator/Makefile2
-rw-r--r--drivers/platform/surface/aggregator/bus.c151
-rw-r--r--drivers/platform/surface/aggregator/bus.h2
-rw-r--r--drivers/platform/surface/aggregator/controller.c55
-rw-r--r--drivers/platform/surface/aggregator/controller.h2
-rw-r--r--drivers/platform/surface/aggregator/core.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_msgb.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.h2
-rw-r--r--drivers/platform/surface/aggregator/trace.h82
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c29
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_hub.c371
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c362
-rw-r--r--drivers/platform/surface/surface_aggregator_tabletsw.c533
-rw-r--r--drivers/platform/surface/surface_dtx.c2
-rw-r--r--drivers/platform/surface/surface_gpe.c14
-rw-r--r--drivers/platform/surface/surface_hotplug.c2
-rw-r--r--drivers/platform/surface/surface_platform_profile.c2
-rw-r--r--drivers/platform/x86/Kconfig52
-rw-r--r--drivers/platform/x86/Makefile9
-rw-r--r--drivers/platform/x86/acer-wmi.c7
-rw-r--r--drivers/platform/x86/amd/Kconfig31
-rw-r--r--drivers/platform/x86/amd/Makefile10
-rw-r--r--drivers/platform/x86/amd/hsmp.c (renamed from drivers/platform/x86/amd_hsmp.c)0
-rw-r--r--drivers/platform/x86/amd/pmc.c (renamed from drivers/platform/x86/amd-pmc.c)0
-rw-r--r--drivers/platform/x86/apple-gmux.c5
-rw-r--r--drivers/platform/x86/asus-wmi.c25
-rw-r--r--drivers/platform/x86/compal-laptop.c4
-rw-r--r--drivers/platform/x86/dell/Kconfig1
-rw-r--r--drivers/platform/x86/intel/pmt/class.c23
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c18
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c39
-rw-r--r--drivers/platform/x86/intel/vsec.c130
-rw-r--r--drivers/platform/x86/intel/vsec.h11
-rw-r--r--drivers/platform/x86/mlx-platform.c491
-rw-r--r--drivers/platform/x86/p2sb.c133
-rw-r--r--drivers/platform/x86/panasonic-laptop.c28
-rw-r--r--drivers/platform/x86/pmc_atom.c19
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c70
-rw-r--r--drivers/platform/x86/simatic-ipc.c43
-rw-r--r--drivers/platform/x86/sony-laptop.c7
-rw-r--r--drivers/platform/x86/system76_acpi.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c168
-rw-r--r--drivers/pnp/resource.c5
-rw-r--r--drivers/power/reset/Kconfig6
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-reset.c184
-rw-r--r--drivers/power/reset/pwr-mlxbf.c97
-rw-r--r--drivers/power/supply/ab8500-chargalg.h4
-rw-r--r--drivers/power/supply/ab8500_btemp.c1
-rw-r--r--drivers/power/supply/ab8500_chargalg.c70
-rw-r--r--drivers/power/supply/ab8500_charger.c48
-rw-r--r--drivers/power/supply/ab8500_fg.c3
-rw-r--r--drivers/power/supply/bq24257_charger.c2
-rw-r--r--drivers/power/supply/cros_peripheral_charger.c2
-rw-r--r--drivers/power/supply/goldfish_battery.c4
-rw-r--r--drivers/power/supply/lp8788-charger.c2
-rw-r--r--drivers/power/supply/max77976_charger.c4
-rw-r--r--drivers/power/supply/olpc_battery.c5
-rw-r--r--drivers/power/supply/pm2301_charger.h492
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/power/supply/surface_battery.c4
-rw-r--r--drivers/power/supply/surface_charger.c4
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/cros-ec-regulator.c36
-rw-r--r--drivers/remoteproc/imx_rproc.c7
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c3
-rw-r--r--drivers/remoteproc/mtk_scp.c23
-rw-r--r--drivers/remoteproc/omap_remoteproc.c6
-rw-r--r--drivers/remoteproc/pru_rproc.c1
-rw-r--r--drivers/remoteproc/qcom_common.c4
-rw-r--r--drivers/remoteproc/qcom_q6v5.c4
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c54
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c105
-rw-r--r--drivers/remoteproc/qcom_sysmon.c16
-rw-r--r--drivers/remoteproc/qcom_wcnss.c10
-rw-r--r--drivers/remoteproc/remoteproc_core.c28
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c12
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c2
-rw-r--r--drivers/reset/Kconfig13
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-tps380x.c126
-rw-r--r--drivers/rpmsg/mtk_rpmsg.c2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c10
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c2
-rw-r--r--drivers/rpmsg/qcom_smd.c9
-rw-r--r--drivers/rpmsg/rpmsg_char.c7
-rw-r--r--drivers/rpmsg/rpmsg_core.c3
-rw-r--r--drivers/rpmsg/rpmsg_internal.h4
-rw-r--r--drivers/rtc/Kconfig41
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/class.c6
-rw-r--r--drivers/rtc/dev.c8
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c5
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c5
-rw-r--r--drivers/rtc/rtc-bq32k.c5
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-core.h5
-rw-r--r--drivers/rtc/rtc-cros-ec.c4
-rw-r--r--drivers/rtc/rtc-ds1374.c5
-rw-r--r--drivers/rtc/rtc-ds1672.c5
-rw-r--r--drivers/rtc/rtc-ds3232.c5
-rw-r--r--drivers/rtc/rtc-em3027.c5
-rw-r--r--drivers/rtc/rtc-fm3130.c5
-rw-r--r--drivers/rtc/rtc-hym8563.c5
-rw-r--r--drivers/rtc/rtc-isl12022.c5
-rw-r--r--drivers/rtc/rtc-isl1208.c10
-rw-r--r--drivers/rtc/rtc-max6900.c5
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c8
-rw-r--r--drivers/rtc/rtc-mpfs.c323
-rw-r--r--drivers/rtc/rtc-nct3018y.c553
-rw-r--r--drivers/rtc/rtc-pcf8523.c5
-rw-r--r--drivers/rtc/rtc-pcf85363.c5
-rw-r--r--drivers/rtc/rtc-pcf8563.c5
-rw-r--r--drivers/rtc/rtc-pcf8583.c5
-rw-r--r--drivers/rtc/rtc-rv3029c2.c5
-rw-r--r--drivers/rtc/rtc-rv8803.c98
-rw-r--r--drivers/rtc/rtc-rx6110.c5
-rw-r--r--drivers/rtc/rtc-rx8025.c22
-rw-r--r--drivers/rtc/rtc-rx8581.c5
-rw-r--r--drivers/rtc/rtc-s35390a.c5
-rw-r--r--drivers/rtc/rtc-sd3078.c5
-rw-r--r--drivers/rtc/rtc-spear.c2
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-ti-k3.c680
-rw-r--r--drivers/rtc/rtc-vr41xx.c363
-rw-r--r--drivers/rtc/rtc-x1205.c5
-rw-r--r--drivers/rtc/rtc-zynqmp.c115
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dasd_diag.c1
-rw-r--r--drivers/s390/block/dasd_eckd.c1
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/uvdevice.c5
-rw-r--r--drivers/s390/char/zcore.c55
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c205
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h12
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c58
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c99
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c114
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h13
-rw-r--r--drivers/s390/crypto/ap_bus.c34
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/ap_queue.c2
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c124
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c1544
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h54
-rw-r--r--drivers/s390/net/qeth_core_main.c168
-rw-r--r--drivers/s390/net/qeth_ethtool.c12
-rw-r--r--drivers/s390/scsi/zfcp_diag.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c29
-rw-r--r--drivers/s390/scsi/zfcp_fc.h6
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c4
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c3
-rw-r--r--drivers/scsi/BusLogic.c35
-rw-r--r--drivers/scsi/FlashPoint.c4
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a2091.c63
-rw-r--r--drivers/scsi/a3000.c53
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c21
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c4
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h441
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h136
-rw-r--r--drivers/scsi/dpt/dptsig.h336
-rw-r--r--drivers/scsi/dpt/osd_defs.h79
-rw-r--r--drivers/scsi/dpt/osd_util.h358
-rw-r--r--drivers/scsi/dpt/sys_info.h417
-rw-r--r--drivers/scsi/dpt_i2o.c3545
-rw-r--r--drivers/scsi/dpti.h331
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/fnic/fnic_main.c45
-rw-r--r--drivers/scsi/gvp11.c95
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c49
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c16
-rw-r--r--drivers/scsi/hosts.c27
-rw-r--r--drivers/scsi/iscsi_tcp.c74
-rw-r--r--drivers/scsi/iscsi_tcp.h2
-rw-r--r--drivers/scsi/libiscsi.c313
-rw-r--r--drivers/scsi/libiscsi_tcp.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c67
-rw-r--r--drivers/scsi/libsas/sas_init.c4
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/lpfc/lpfc.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c324
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c12
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mesh.c7
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h73
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c67
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c291
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c7
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c73
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c10
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c46
-rw-r--r--drivers/scsi/qedi/qedi_main.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h36
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c585
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h106
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c131
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c138
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c103
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c45
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_lib.c65
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/scsi_sysfs.c29
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c98
-rw-r--r--drivers/scsi/scsi_transport_sas.c6
-rw-r--r--drivers/scsi/sd.c7
-rw-r--r--drivers/scsi/sg.c53
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h27
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c405
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c11
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h4
-rw-r--r--drivers/scsi/snic/snic_fwint.h2
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c3
-rw-r--r--drivers/soc/tegra/common.c49
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/soundwire/intel.c16
-rw-r--r--drivers/spi/spi-meson-spicc.c129
-rw-r--r--drivers/spi/spi-mpc52xx.c2
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/target/iscsi/iscsi_target.c57
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c122
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c113
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c160
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/target_core_alua.c5
-rw-r--r--drivers/target/target_core_configfs.c27
-rw-r--r--drivers/target/target_core_device.c38
-rw-r--r--drivers/target/target_core_file.c37
-rw-r--r--drivers/target/target_core_iblock.c13
-rw-r--r--drivers/target/target_core_pr.c28
-rw-r--r--drivers/target/target_core_sbc.c99
-rw-r--r--drivers/target/target_core_stat.c10
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/tee/tee_shm.c3
-rw-r--r--drivers/thermal/Kconfig4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c9
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c2
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/thermal/thermal_sysfs.c10
-rw-r--r--drivers/tty/amiserial.c20
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/tty/n_gsm.c757
-rw-r--r--drivers/tty/n_tty.c92
-rw-r--r--drivers/tty/serial/8250/8250.h24
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c7
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c24
-rw-r--r--drivers/tty/serial/8250/8250_core.c8
-rw-r--r--drivers/tty/serial/8250/8250_dw.c68
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c152
-rw-r--r--drivers/tty/serial/8250/8250_early.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c25
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c31
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c4
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c2
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c28
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c7
-rw-r--r--drivers/tty/serial/8250/8250_pci.c135
-rw-r--r--drivers/tty/serial/8250/8250_pericom.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c157
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig18
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl011.c15
-rw-r--r--drivers/tty/serial/ar933x_uart.c27
-rw-r--r--drivers/tty/serial/atmel_serial.c103
-rw-r--r--drivers/tty/serial/earlycon.c3
-rw-r--r--drivers/tty/serial/fsl_lpuart.c67
-rw-r--r--drivers/tty/serial/imx.c21
-rw-r--r--drivers/tty/serial/kgdboc.c2
-rw-r--r--drivers/tty/serial/max310x.c272
-rw-r--r--drivers/tty/serial/mcf.c10
-rw-r--r--drivers/tty/serial/meson_uart.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c4
-rw-r--r--drivers/tty/serial/msm_serial.c550
-rw-r--r--drivers/tty/serial/mux.c6
-rw-r--r--drivers/tty/serial/mvebu-uart.c11
-rw-r--r--drivers/tty/serial/omap-serial.c18
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pch_uart.c7
-rw-r--r--drivers/tty/serial/pic32_uart.c4
-rw-r--r--drivers/tty/serial/pmac_zilog.c1
-rw-r--r--drivers/tty/serial/pxa.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c91
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c90
-rw-r--r--drivers/tty/serial/sc16is7xx.c10
-rw-r--r--drivers/tty/serial/serial-tegra.c5
-rw-r--r--drivers/tty/serial/serial_core.c452
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c48
-rw-r--r--drivers/tty/serial/sifive.c10
-rw-r--r--drivers/tty/serial/st-asc.c1
-rw-r--r--drivers/tty/serial/stm32-usart.c79
-rw-r--r--drivers/tty/serial/stm32-usart.h68
-rw-r--r--drivers/tty/serial/sunsu.c4
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/vr41xx_siu.c934
-rw-r--r--drivers/tty/tty_buffer.c59
-rw-r--r--drivers/tty/tty_io.c2
-rw-r--r--drivers/tty/tty_ioctl.c4
-rw-r--r--drivers/tty/tty_port.c21
-rw-r--r--drivers/tty/vt/Makefile2
-rw-r--r--drivers/tty/vt/consolemap.c684
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped6
-rw-r--r--drivers/tty/vt/selection.c3
-rw-r--r--drivers/tty/vt/vt.c16
-rw-r--r--drivers/ufs/core/ufshcd-priv.h6
-rw-r--r--drivers/ufs/core/ufshcd.c102
-rw-r--r--drivers/ufs/host/Kconfig12
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ufs-exynos.c182
-rw-r--r--drivers/ufs/host/ufs-exynos.h1
-rw-r--r--drivers/ufs/host/ufs-mediatek.c324
-rw-r--r--drivers/ufs/host/ufs-mediatek.h74
-rw-r--r--drivers/ufs/host/ufs-qcom.c23
-rw-r--r--drivers/ufs/host/ufs-renesas.c412
-rw-r--r--drivers/ufs/host/ufshcd-pci.c18
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c15
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h6
-rw-r--r--drivers/usb/chipidea/trace.h4
-rw-r--r--drivers/usb/core/hcd.c17
-rw-r--r--drivers/usb/host/ohci-sa1111.c25
-rw-r--r--drivers/usb/host/xhci-trace.h4
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c2
-rw-r--r--drivers/usb/mtu3/mtu3_trace.h6
-rw-r--r--drivers/usb/musb/musb_trace.h4
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c14
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c144
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h11
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c173
-rw-r--r--drivers/vdpa/vdpa.c14
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c18
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h1
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c176
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c3
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c102
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h8
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c180
-rw-r--r--drivers/vfio/Makefile2
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_private.h2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c11
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c14
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h4
-rw-r--r--drivers/vfio/pci/mlx5/main.c11
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c7
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h21
-rw-r--r--drivers/vfio/vfio.h17
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c14
-rw-r--r--drivers/vfio/vfio_iommu_type1.c197
-rw-r--r--drivers/vfio/vfio_main.c (renamed from drivers/vfio/vfio.c)192
-rw-r--r--drivers/vhost/scsi.c89
-rw-r--r--drivers/vhost/vdpa.c38
-rw-r--r--drivers/vhost/vringh.c78
-rw-r--r--drivers/video/backlight/lp855x_bl.c21
-rw-r--r--drivers/video/backlight/platform_lcd.c10
-rw-r--r--drivers/video/backlight/rt4831-backlight.c33
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/console/vgacon.c12
-rw-r--r--drivers/video/fbdev/68328fb.c7
-rw-r--r--drivers/video/fbdev/amba-clcd.c24
-rw-r--r--drivers/video/fbdev/amifb.c15
-rw-r--r--drivers/video/fbdev/arkfb.c9
-rw-r--r--drivers/video/fbdev/atafb.c103
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c2
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c48
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/chipsfb.c1
-rw-r--r--drivers/video/fbdev/cirrusfb.c4
-rw-r--r--drivers/video/fbdev/clps711x-fb.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c37
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c4
-rw-r--r--drivers/video/fbdev/cyber2000fb.c8
-rw-r--r--drivers/video/fbdev/dnfb.c2
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/fm2fb.c4
-rw-r--r--drivers/video/fbdev/geode/gx1fb_core.c6
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/hpfb.c4
-rw-r--r--drivers/video/fbdev/i740fb.c11
-rw-r--r--drivers/video/fbdev/imxfb.c136
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c6
-rw-r--r--drivers/video/fbdev/offb.c1
-rw-r--r--drivers/video/fbdev/omap/hwa742.c3
-rw-r--r--drivers/video/fbdev/omap/omapfb.h9
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c9
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c2
-rw-r--r--drivers/video/fbdev/pxafb.c2
-rw-r--r--drivers/video/fbdev/q40fb.c2
-rw-r--r--drivers/video/fbdev/s3fb.c4
-rw-r--r--drivers/video/fbdev/sa1100fb.c41
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/sis/init.c4
-rw-r--r--drivers/video/fbdev/sis/sis_main.c278
-rw-r--r--drivers/video/fbdev/skeletonfb.c6
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c2
-rw-r--r--drivers/video/fbdev/sstfb.c2
-rw-r--r--drivers/video/fbdev/sunxvr1000.c2
-rw-r--r--drivers/video/fbdev/sunxvr2500.c2
-rw-r--r--drivers/video/fbdev/sunxvr500.c2
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tdfxfb.c4
-rw-r--r--drivers/video/fbdev/tgafb.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c2
-rw-r--r--drivers/video/fbdev/valkyriefb.c10
-rw-r--r--drivers/video/fbdev/vt8623fb.c2
-rw-r--r--drivers/virtio/Kconfig11
-rw-r--r--drivers/virtio/virtio.c4
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_mem.c6
-rw-r--r--drivers/virtio/virtio_mmio.c5
-rw-r--r--drivers/virtio/virtio_pci_common.c12
-rw-r--r--drivers/virtio/virtio_pci_legacy.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c136
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c39
-rw-r--r--drivers/virtio/virtio_ring.c778
-rw-r--r--drivers/virtio/virtio_vdpa.c2
-rw-r--r--drivers/watchdog/Kconfig9
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c2
-rw-r--r--drivers/watchdog/bcm7038_wdt.c8
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/dw_wdt.c8
-rw-r--r--drivers/watchdog/f71808e_wdt.c4
-rw-r--r--drivers/watchdog/max77620_wdt.c4
-rw-r--r--drivers/watchdog/mtk_wdt.c10
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pm8916_wdt.c41
-rw-r--r--drivers/watchdog/pseries-wdt.c239
-rw-r--r--drivers/watchdog/realtek_otto_wdt.c1
-rw-r--r--drivers/watchdog/s3c2410_wdt.c9
-rw-r--r--drivers/watchdog/sama5d4_wdt.c8
-rw-r--r--drivers/watchdog/simatic-ipc-wdt.c15
-rw-r--r--drivers/watchdog/sp5100_tco.c1
-rw-r--r--drivers/watchdog/sp805_wdt.c5
-rw-r--r--drivers/watchdog/st_lpc_wdt.c9
-rw-r--r--drivers/watchdog/tegra_wdt.c14
-rw-r--r--drivers/watchdog/wdat_wdt.c7
-rw-r--r--drivers/xen/events/events_base.c53
-rw-r--r--drivers/xen/privcmd.c21
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c2
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
1624 files changed, 77536 insertions, 32360 deletions
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index fdc6b593f500..c4d54a5326b1 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -131,7 +131,7 @@ static void vc_refresh(struct vc_data *vc)
for (i = 0; i < WIDTH; i++) {
u16 glyph = screen_glyph(vc,
2 * (vc_x + i) + vc_y * vc->vc_size_row);
- buf[i] = inverse_translate(vc, glyph, 1);
+ buf[i] = inverse_translate(vc, glyph, true);
}
braille_write(buf);
}
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index d726537fa16c..f52265293482 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -470,7 +470,7 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
c |= 0x100;
}
- ch = inverse_translate(vc, c, 1);
+ ch = inverse_translate(vc, c, true);
*attribs = (w & 0xff00) >> 8;
}
return ch;
diff --git a/drivers/accessibility/speakup/serialio.h b/drivers/accessibility/speakup/serialio.h
index 6f8f86f161bb..b4f9a1925b81 100644
--- a/drivers/accessibility/speakup/serialio.h
+++ b/drivers/accessibility/speakup/serialio.h
@@ -33,9 +33,8 @@ struct old_serial_port {
#define NUM_DISABLE_TIMEOUTS 3
/* buffer timeout in ms */
#define SPK_TIMEOUT 100
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
#define spk_serial_tx_busy() \
- ((inb(speakup_info.port_tts + UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
+ (!uart_lsr_tx_empty(inb(speakup_info.port_tts + UART_LSR)))
#endif
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index f2f8f05662de..ca2aed86b540 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -788,6 +788,294 @@ void acpi_configure_pmsi_domain(struct device *dev)
}
#ifdef CONFIG_IOMMU_API
+static void iort_rmr_free(struct device *dev,
+ struct iommu_resv_region *region)
+{
+ struct iommu_iort_rmr_data *rmr_data;
+
+ rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
+ kfree(rmr_data->sids);
+ kfree(rmr_data);
+}
+
+static struct iommu_iort_rmr_data *iort_rmr_alloc(
+ struct acpi_iort_rmr_desc *rmr_desc,
+ int prot, enum iommu_resv_type type,
+ u32 *sids, u32 num_sids)
+{
+ struct iommu_iort_rmr_data *rmr_data;
+ struct iommu_resv_region *region;
+ u32 *sids_copy;
+ u64 addr = rmr_desc->base_address, size = rmr_desc->length;
+
+ rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
+ if (!rmr_data)
+ return NULL;
+
+ /* Create a copy of SIDs array to associate with this rmr_data */
+ sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
+ if (!sids_copy) {
+ kfree(rmr_data);
+ return NULL;
+ }
+ rmr_data->sids = sids_copy;
+ rmr_data->num_sids = num_sids;
+
+ if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
+ /* PAGE align base addr and size */
+ addr &= PAGE_MASK;
+ size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
+
+ pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
+ rmr_desc->base_address,
+ rmr_desc->base_address + rmr_desc->length - 1,
+ addr, addr + size - 1);
+ }
+
+ region = &rmr_data->rr;
+ INIT_LIST_HEAD(&region->list);
+ region->start = addr;
+ region->length = size;
+ region->prot = prot;
+ region->type = type;
+ region->free = iort_rmr_free;
+
+ return rmr_data;
+}
+
+static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
+ u32 count)
+{
+ int i, j;
+
+ for (i = 0; i < count; i++) {
+ u64 end, start = desc[i].base_address, length = desc[i].length;
+
+ if (!length) {
+ pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
+ start);
+ continue;
+ }
+
+ end = start + length - 1;
+
+ /* Check for address overlap */
+ for (j = i + 1; j < count; j++) {
+ u64 e_start = desc[j].base_address;
+ u64 e_end = e_start + desc[j].length - 1;
+
+ if (start <= e_end && end >= e_start)
+ pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
+ start, end);
+ }
+ }
+}
+
+/*
+ * Please note, we will keep the already allocated RMR reserve
+ * regions in case of a memory allocation failure.
+ */
+static void iort_get_rmrs(struct acpi_iort_node *node,
+ struct acpi_iort_node *smmu,
+ u32 *sids, u32 num_sids,
+ struct list_head *head)
+{
+ struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
+ struct acpi_iort_rmr_desc *rmr_desc;
+ int i;
+
+ rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
+ rmr->rmr_offset);
+
+ iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
+
+ for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
+ struct iommu_iort_rmr_data *rmr_data;
+ enum iommu_resv_type type;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+ if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
+ type = IOMMU_RESV_DIRECT_RELAXABLE;
+ else
+ type = IOMMU_RESV_DIRECT;
+
+ if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
+ prot |= IOMMU_PRIV;
+
+ /* Attributes 0x00 - 0x03 represents device memory */
+ if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
+ ACPI_IORT_RMR_ATTR_DEVICE_GRE)
+ prot |= IOMMU_MMIO;
+ else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
+ ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
+ prot |= IOMMU_CACHE;
+
+ rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
+ sids, num_sids);
+ if (!rmr_data)
+ return;
+
+ list_add_tail(&rmr_data->rr.list, head);
+ }
+}
+
+static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
+ u32 new_count)
+{
+ u32 *new_sids;
+ u32 total_count = count + new_count;
+ int i;
+
+ new_sids = krealloc_array(sids, count + new_count,
+ sizeof(*new_sids), GFP_KERNEL);
+ if (!new_sids)
+ return NULL;
+
+ for (i = count; i < total_count; i++)
+ new_sids[i] = id_start++;
+
+ return new_sids;
+}
+
+static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
+ u32 id_count)
+{
+ int i;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ /*
+ * Make sure the kernel has preserved the boot firmware PCIe
+ * configuration. This is required to ensure that the RMR PCIe
+ * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
+ */
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+
+ if (!host->preserve_config)
+ return false;
+ }
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ if (fwspec->ids[i] >= id_start &&
+ fwspec->ids[i] <= id_start + id_count)
+ return true;
+ }
+
+ return false;
+}
+
+static void iort_node_get_rmr_info(struct acpi_iort_node *node,
+ struct acpi_iort_node *iommu,
+ struct device *dev, struct list_head *head)
+{
+ struct acpi_iort_node *smmu = NULL;
+ struct acpi_iort_rmr *rmr;
+ struct acpi_iort_id_mapping *map;
+ u32 *sids = NULL;
+ u32 num_sids = 0;
+ int i;
+
+ if (!node->mapping_offset || !node->mapping_count) {
+ pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
+ node);
+ return;
+ }
+
+ rmr = (struct acpi_iort_rmr *)node->node_data;
+ if (!rmr->rmr_offset || !rmr->rmr_count)
+ return;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
+ node->mapping_offset);
+
+ /*
+ * Go through the ID mappings and see if we have a match for SMMU
+ * and dev(if !NULL). If found, get the sids for the Node.
+ * Please note, id_count is equal to the number of IDs in the
+ * range minus one.
+ */
+ for (i = 0; i < node->mapping_count; i++, map++) {
+ struct acpi_iort_node *parent;
+
+ if (!map->id_count)
+ continue;
+
+ parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
+ map->output_reference);
+ if (parent != iommu)
+ continue;
+
+ /* If dev is valid, check RMR node corresponds to the dev SID */
+ if (dev && !iort_rmr_has_dev(dev, map->output_base,
+ map->id_count))
+ continue;
+
+ /* Retrieve SIDs associated with the Node. */
+ sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
+ map->id_count + 1);
+ if (!sids)
+ return;
+
+ num_sids += map->id_count + 1;
+ }
+
+ if (!sids)
+ return;
+
+ iort_get_rmrs(node, smmu, sids, num_sids, head);
+ kfree(sids);
+}
+
+static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
+ struct list_head *head)
+{
+ struct acpi_table_iort *iort;
+ struct acpi_iort_node *iort_node, *iort_end;
+ int i;
+
+ /* Only supports ARM DEN 0049E.d onwards */
+ if (iort_table->revision < 5)
+ return;
+
+ iort = (struct acpi_table_iort *)iort_table;
+
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort->node_offset);
+ iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort_table->length);
+
+ for (i = 0; i < iort->node_count; i++) {
+ if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
+ "IORT node pointer overflows, bad table!\n"))
+ return;
+
+ if (iort_node->type == ACPI_IORT_NODE_RMR)
+ iort_node_get_rmr_info(iort_node, iommu, dev, head);
+
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
+ iort_node->length);
+ }
+}
+
+/*
+ * Populate the RMR list associated with a given IOMMU and dev(if provided).
+ * If dev is NULL, the function populates all the RMRs associated with the
+ * given IOMMU.
+ */
+static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
+ struct device *dev,
+ struct list_head *head)
+{
+ struct acpi_iort_node *iommu;
+
+ iommu = iort_get_iort_node(iommu_fwnode);
+ if (!iommu)
+ return;
+
+ iort_find_rmrs(iommu, dev, head);
+}
+
static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
{
struct acpi_iort_node *iommu;
@@ -806,27 +1094,22 @@ static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
return NULL;
}
-/**
- * iort_iommu_msi_get_resv_regions - Reserved region driver helper
- * @dev: Device from iommu_get_resv_regions()
- * @head: Reserved region list from iommu_get_resv_regions()
- *
- * Returns: Number of msi reserved regions on success (0 if platform
- * doesn't require the reservation or no associated msi regions),
- * appropriate error value otherwise. The ITS interrupt translation
- * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
- * are the msi reserved regions.
+/*
+ * Retrieve platform specific HW MSI reserve regions.
+ * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
+ * associated with the device are the HW MSI reserved regions.
*/
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
+static void iort_iommu_msi_get_resv_regions(struct device *dev,
+ struct list_head *head)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct acpi_iort_its_group *its;
struct acpi_iort_node *iommu_node, *its_node = NULL;
- int i, resv = 0;
+ int i;
iommu_node = iort_get_msi_resv_iommu(dev);
if (!iommu_node)
- return 0;
+ return;
/*
* Current logic to reserve ITS regions relies on HW topologies
@@ -846,7 +1129,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
}
if (!its_node)
- return 0;
+ return;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)its_node->node_data;
@@ -860,15 +1143,52 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
prot, IOMMU_RESV_MSI);
- if (region) {
+ if (region)
list_add_tail(&region->list, head);
- resv++;
- }
}
}
+}
+
+/**
+ * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
+ * @dev: Device from iommu_get_resv_regions()
+ * @head: Reserved region list from iommu_get_resv_regions()
+ */
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ iort_iommu_msi_get_resv_regions(dev, head);
+ iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
+}
+
+/**
+ * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
+ * associated StreamIDs information.
+ * @iommu_fwnode: fwnode associated with IOMMU
+ * @head: Resereved region list
+ */
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head)
+{
+ iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
+}
+EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
+
+/**
+ * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
+ * @iommu_fwnode: fwnode associated with IOMMU
+ * @head: Resereved region list
+ */
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
- return (resv == its->its_count) ? resv : -ENODEV;
+ list_for_each_entry_safe(entry, next, head, list)
+ entry->free(NULL, entry);
}
+EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
static inline bool iort_iommu_driver_enabled(u8 type)
{
@@ -1034,8 +1354,8 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
}
#else
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
-{ return 0; }
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{ }
int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
{ return -ENODEV; }
#endif
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 53cab975f612..860014b89b8e 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -41,6 +41,8 @@ struct mcfg_fixup {
static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+#ifdef CONFIG_ARM64
+
#define AL_ECAM(table_id, rev, seg, ops) \
{ "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
@@ -169,6 +171,17 @@ static struct mcfg_fixup mcfg_quirks[] = {
ALTRA_ECAM_QUIRK(1, 13),
ALTRA_ECAM_QUIRK(1, 14),
ALTRA_ECAM_QUIRK(1, 15),
+#endif /* ARM64 */
+
+#ifdef CONFIG_LOONGARCH
+#define LOONGSON_ECAM_MCFG(table_id, seg) \
+ { "LOONGS", table_id, 1, seg, MCFG_BUS_ANY, &loongson_pci_ecam_ops }
+
+ LOONGSON_ECAM_MCFG("\0", 0),
+ LOONGSON_ECAM_MCFG("LOONGSON", 0),
+ LOONGSON_ECAM_MCFG("\0", 1),
+ LOONGSON_ECAM_MCFG("LOONGSON", 1),
+#endif /* LOONGARCH */
};
static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index db6ac540e924..e534fd49a67e 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -151,7 +151,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
- struct acpi_processor *pr = per_cpu(processors, policy->cpu);
+ struct acpi_processor *pr = per_cpu(processors, cpu);
if (pr)
freq_qos_remove_request(&pr->thermal_req);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e764f9ac9cf8..d4c168ce428c 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -55,14 +55,19 @@ static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
+static const guid_t buffer_prop_guid =
+ GUID_INIT(0xedb12dd0, 0x363d, 0x4085,
+ 0xa3, 0xd2, 0x49, 0x52, 0x2c, 0xa1, 0x60, 0xc4);
+
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
- const union acpi_object *desc,
+ union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent);
-static bool acpi_extract_properties(const union acpi_object *desc,
+static bool acpi_extract_properties(acpi_handle handle,
+ union acpi_object *desc,
struct acpi_device_data *data);
-static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
+static bool acpi_nondev_subnode_extract(union acpi_object *desc,
acpi_handle handle,
const union acpi_object *link,
struct list_head *list,
@@ -81,7 +86,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(desc, &dn->data);
+ result = acpi_extract_properties(handle, desc, &dn->data);
if (handle) {
acpi_handle scope;
@@ -155,16 +160,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
return acpi_nondev_subnode_data_ok(handle, link, list, parent);
}
-static int acpi_add_nondev_subnodes(acpi_handle scope,
- const union acpi_object *links,
- struct list_head *list,
- struct fwnode_handle *parent)
+static bool acpi_add_nondev_subnodes(acpi_handle scope,
+ union acpi_object *links,
+ struct list_head *list,
+ struct fwnode_handle *parent)
{
bool ret = false;
int i;
for (i = 0; i < links->package.count; i++) {
- const union acpi_object *link, *desc;
+ union acpi_object *link, *desc;
acpi_handle handle;
bool result;
@@ -204,7 +209,7 @@ static int acpi_add_nondev_subnodes(acpi_handle scope,
}
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
- const union acpi_object *desc,
+ union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent)
{
@@ -212,7 +217,8 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
/* Look for the ACPI data subnodes GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *guid, *links;
+ const union acpi_object *guid;
+ union acpi_object *links;
guid = &desc->package.elements[i];
links = &desc->package.elements[i + 1];
@@ -325,7 +331,7 @@ static bool acpi_is_property_guid(const guid_t *guid)
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
- const union acpi_object *properties)
+ union acpi_object *properties)
{
struct acpi_device_properties *props;
@@ -340,7 +346,141 @@ acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
return props;
}
-static bool acpi_extract_properties(const union acpi_object *desc,
+static void acpi_nondev_subnode_tag(acpi_handle handle, void *context)
+{
+}
+
+static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
+{
+ struct acpi_data_node *dn;
+
+ list_for_each_entry(dn, &data->subnodes, sibling) {
+ acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
+
+ acpi_untie_nondev_subnodes(&dn->data);
+ }
+}
+
+static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
+{
+ struct acpi_data_node *dn;
+
+ list_for_each_entry(dn, &data->subnodes, sibling) {
+ acpi_status status;
+ bool ret;
+
+ status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
+ if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+ acpi_handle_err(dn->handle, "Can't tag data node\n");
+ return false;
+ }
+
+ ret = acpi_tie_nondev_subnodes(&dn->data);
+ if (!ret)
+ return ret;
+ }
+
+ return true;
+}
+
+static void acpi_data_add_buffer_props(acpi_handle handle,
+ struct acpi_device_data *data,
+ union acpi_object *properties)
+{
+ struct acpi_device_properties *props;
+ union acpi_object *package;
+ size_t alloc_size;
+ unsigned int i;
+ u32 *count;
+
+ if (check_mul_overflow((size_t)properties->package.count,
+ sizeof(*package) + sizeof(void *),
+ &alloc_size) ||
+ check_add_overflow(sizeof(*props) + sizeof(*package), alloc_size,
+ &alloc_size)) {
+ acpi_handle_warn(handle,
+ "can't allocate memory for %u buffer props",
+ properties->package.count);
+ return;
+ }
+
+ props = kvzalloc(alloc_size, GFP_KERNEL);
+ if (!props)
+ return;
+
+ props->guid = &buffer_prop_guid;
+ props->bufs = (void *)(props + 1);
+ props->properties = (void *)(props->bufs + properties->package.count);
+
+ /* Outer package */
+ package = props->properties;
+ package->type = ACPI_TYPE_PACKAGE;
+ package->package.elements = package + 1;
+ count = &package->package.count;
+ *count = 0;
+
+ /* Inner packages */
+ package++;
+
+ for (i = 0; i < properties->package.count; i++) {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ union acpi_object *property = &properties->package.elements[i];
+ union acpi_object *prop, *obj, *buf_obj;
+ acpi_status status;
+
+ if (property->type != ACPI_TYPE_PACKAGE ||
+ property->package.count != 2) {
+ acpi_handle_warn(handle,
+ "buffer property %u has %u entries\n",
+ i, property->package.count);
+ continue;
+ }
+
+ prop = &property->package.elements[0];
+ obj = &property->package.elements[1];
+
+ if (prop->type != ACPI_TYPE_STRING ||
+ obj->type != ACPI_TYPE_STRING) {
+ acpi_handle_warn(handle,
+ "wrong object types %u and %u\n",
+ prop->type, obj->type);
+ continue;
+ }
+
+ status = acpi_evaluate_object_typed(handle, obj->string.pointer,
+ NULL, &buf,
+ ACPI_TYPE_BUFFER);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_warn(handle,
+ "can't evaluate \"%*pE\" as buffer\n",
+ obj->string.length,
+ obj->string.pointer);
+ continue;
+ }
+
+ package->type = ACPI_TYPE_PACKAGE;
+ package->package.elements = prop;
+ package->package.count = 2;
+
+ buf_obj = buf.pointer;
+
+ /* Replace the string object with a buffer object */
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = buf_obj->buffer.length;
+ obj->buffer.pointer = buf_obj->buffer.pointer;
+
+ props->bufs[i] = buf.pointer;
+ package++;
+ (*count)++;
+ }
+
+ if (*count)
+ list_add(&props->list, &data->properties);
+ else
+ kvfree(props);
+}
+
+static bool acpi_extract_properties(acpi_handle scope, union acpi_object *desc,
struct acpi_device_data *data)
{
int i;
@@ -350,7 +490,8 @@ static bool acpi_extract_properties(const union acpi_object *desc,
/* Look for the device properties GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *guid, *properties;
+ const union acpi_object *guid;
+ union acpi_object *properties;
guid = &desc->package.elements[i];
properties = &desc->package.elements[i + 1];
@@ -364,6 +505,12 @@ static bool acpi_extract_properties(const union acpi_object *desc,
properties->type != ACPI_TYPE_PACKAGE)
break;
+ if (guid_equal((guid_t *)guid->buffer.pointer,
+ &buffer_prop_guid)) {
+ acpi_data_add_buffer_props(scope, data, properties);
+ continue;
+ }
+
if (!acpi_is_property_guid((guid_t *)guid->buffer.pointer))
continue;
@@ -410,7 +557,7 @@ void acpi_init_properties(struct acpi_device *adev)
if (ACPI_FAILURE(status))
goto out;
- if (acpi_extract_properties(buf.pointer, &adev->data)) {
+ if (acpi_extract_properties(adev->handle, buf.pointer, &adev->data)) {
adev->data.pointer = buf.pointer;
if (acpi_of)
acpi_init_of_compatible(adev);
@@ -422,6 +569,9 @@ void acpi_init_properties(struct acpi_device *adev)
if (!adev->data.pointer) {
acpi_handle_debug(adev->handle, "Invalid _DSD data, skipping\n");
ACPI_FREE(buf.pointer);
+ } else {
+ if (!acpi_tie_nondev_subnodes(&adev->data))
+ acpi_untie_nondev_subnodes(&adev->data);
}
out:
@@ -438,8 +588,14 @@ static void acpi_free_device_properties(struct list_head *list)
struct acpi_device_properties *props, *tmp;
list_for_each_entry_safe(props, tmp, list, list) {
+ u32 i;
+
list_del(&props->list);
- kfree(props);
+ /* Buffer data properties were separately allocated */
+ if (props->bufs)
+ for (i = 0; i < props->properties->package.count; i++)
+ ACPI_FREE(props->bufs[i]);
+ kvfree(props);
}
}
@@ -462,6 +618,7 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
void acpi_free_properties(struct acpi_device *adev)
{
+ acpi_untie_nondev_subnodes(&adev->data);
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
@@ -633,6 +790,58 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
return NULL;
}
+static int acpi_get_ref_args(struct fwnode_reference_args *args,
+ struct fwnode_handle *ref_fwnode,
+ const union acpi_object **element,
+ const union acpi_object *end, size_t num_args)
+{
+ u32 nargs = 0, i;
+
+ /*
+ * Find the referred data extension node under the
+ * referred device node.
+ */
+ for (; *element < end && (*element)->type == ACPI_TYPE_STRING;
+ (*element)++) {
+ const char *child_name = (*element)->string.pointer;
+
+ ref_fwnode = acpi_fwnode_get_named_child_node(ref_fwnode, child_name);
+ if (!ref_fwnode)
+ return -EINVAL;
+ }
+
+ /*
+ * Assume the following integer elements are all args. Stop counting on
+ * the first reference or end of the package arguments. In case of
+ * neither reference, nor integer, return an error, we can't parse it.
+ */
+ for (i = 0; (*element) + i < end && i < num_args; i++) {
+ acpi_object_type type = (*element)[i].type;
+
+ if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ break;
+
+ if (type == ACPI_TYPE_INTEGER)
+ nargs++;
+ else
+ return -EINVAL;
+ }
+
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
+ return -EINVAL;
+
+ if (args) {
+ args->fwnode = ref_fwnode;
+ args->nargs = nargs;
+ for (i = 0; i < nargs; i++)
+ args->args[i] = (*element)[i].integer.value;
+ }
+
+ (*element) += nargs;
+
+ return 0;
+}
+
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
@@ -686,11 +895,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (ret)
return ret == -EINVAL ? -ENOENT : -EINVAL;
- /*
- * The simplest case is when the value is a single reference. Just
- * return that reference then.
- */
- if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ switch (obj->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /* Plain single reference without arguments. */
if (index)
return -ENOENT;
@@ -701,19 +908,21 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * If it is not a single reference, then it is a package of
+ * references followed by number of ints as follows:
+ *
+ * Package () { REF, INT, REF, INT, INT }
+ *
+ * The index argument is then used to determine which reference
+ * the caller wants (along with the arguments).
+ */
+ break;
+ default:
+ return -EINVAL;
}
- /*
- * If it is not a single reference, then it is a package of
- * references followed by number of ints as follows:
- *
- * Package () { REF, INT, REF, INT, INT }
- *
- * The index argument is then used to determine which reference
- * the caller wants (along with the arguments).
- */
- if (obj->type != ACPI_TYPE_PACKAGE)
- return -EINVAL;
if (index >= obj->package.count)
return -ENOENT;
@@ -721,66 +930,30 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
end = element + obj->package.count;
while (element < end) {
- u32 nargs, i;
-
- if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
- struct fwnode_handle *ref_fwnode;
-
+ switch (element->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
device = acpi_fetch_acpi_dev(element->reference.handle);
if (!device)
return -EINVAL;
- nargs = 0;
element++;
- /*
- * Find the referred data extension node under the
- * referred device node.
- */
- for (ref_fwnode = acpi_fwnode_handle(device);
- element < end && element->type == ACPI_TYPE_STRING;
- element++) {
- ref_fwnode = acpi_fwnode_get_named_child_node(
- ref_fwnode, element->string.pointer);
- if (!ref_fwnode)
- return -EINVAL;
- }
-
- /*
- * Assume the following integer elements are all args.
- * Stop counting on the first reference or end of the
- * package arguments. In case of neither reference,
- * nor integer, return an error, we can't parse it.
- */
- for (i = 0; element + i < end && i < num_args; i++) {
- int type = element[i].type;
-
- if (type == ACPI_TYPE_LOCAL_REFERENCE)
- break;
- if (type == ACPI_TYPE_INTEGER)
- nargs++;
- else
- return -EINVAL;
- }
-
- if (nargs > NR_FWNODE_REFERENCE_ARGS)
- return -EINVAL;
-
- if (idx == index) {
- args->fwnode = ref_fwnode;
- args->nargs = nargs;
- for (i = 0; i < nargs; i++)
- args->args[i] = element[i].integer.value;
+ ret = acpi_get_ref_args(idx == index ? args : NULL,
+ acpi_fwnode_handle(device),
+ &element, end, num_args);
+ if (ret < 0)
+ return ret;
+ if (idx == index)
return 0;
- }
- element += nargs;
- } else if (element->type == ACPI_TYPE_INTEGER) {
+ break;
+ case ACPI_TYPE_INTEGER:
if (idx == index)
return -ENOENT;
element++;
- } else {
+ break;
+ default:
return -EINVAL;
}
@@ -852,67 +1025,36 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
return ret;
}
-static int acpi_copy_property_array_u8(const union acpi_object *items, u8 *val,
- size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U8_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u16(const union acpi_object *items,
- u16 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U16_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u32(const union acpi_object *items,
- u32 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U32_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u64(const union acpi_object *items,
- u64 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
+#define acpi_copy_property_array_uint(items, val, nval) \
+ ({ \
+ typeof(items) __items = items; \
+ typeof(val) __val = val; \
+ typeof(nval) __nval = nval; \
+ size_t i; \
+ int ret = 0; \
+ \
+ for (i = 0; i < __nval; i++) { \
+ if (__items->type == ACPI_TYPE_BUFFER) { \
+ __val[i] = __items->buffer.pointer[i]; \
+ continue; \
+ } \
+ if (__items[i].type != ACPI_TYPE_INTEGER) { \
+ ret = -EPROTO; \
+ break; \
+ } \
+ if (__items[i].integer.value > _Generic(__val, \
+ u8 *: U8_MAX, \
+ u16 *: U16_MAX, \
+ u32 *: U32_MAX, \
+ u64 *: U64_MAX)) { \
+ ret = -EOVERFLOW; \
+ break; \
+ } \
+ \
+ __val[i] = __items[i].integer.value; \
+ } \
+ ret; \
+ })
static int acpi_copy_property_array_string(const union acpi_object *items,
char **val, size_t nval)
@@ -954,31 +1096,54 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
+ if (ret && proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64)
+ ret = acpi_data_get_property(data, propname, ACPI_TYPE_BUFFER,
+ &obj);
if (ret)
return ret;
- if (!val)
+ if (!val) {
+ if (obj->type == ACPI_TYPE_BUFFER)
+ return obj->buffer.length;
+
return obj->package.count;
+ }
- if (proptype != DEV_PROP_STRING && nval > obj->package.count)
- return -EOVERFLOW;
+ switch (proptype) {
+ case DEV_PROP_STRING:
+ break;
+ case DEV_PROP_U8 ... DEV_PROP_U64:
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (nval > obj->buffer.length)
+ return -EOVERFLOW;
+ break;
+ }
+ fallthrough;
+ default:
+ if (nval > obj->package.count)
+ return -EOVERFLOW;
+ break;
+ }
if (nval == 0)
return -EINVAL;
- items = obj->package.elements;
+ if (obj->type != ACPI_TYPE_BUFFER)
+ items = obj->package.elements;
+ else
+ items = obj;
switch (proptype) {
case DEV_PROP_U8:
- ret = acpi_copy_property_array_u8(items, (u8 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u8 *)val, nval);
break;
case DEV_PROP_U16:
- ret = acpi_copy_property_array_u16(items, (u16 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u16 *)val, nval);
break;
case DEV_PROP_U32:
- ret = acpi_copy_property_array_u32(items, (u32 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u32 *)val, nval);
break;
case DEV_PROP_U64:
- ret = acpi_copy_property_array_u64(items, (u64 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
ret = acpi_copy_property_array_string(
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b100e6ca9bb4..42cec8120f18 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1722,6 +1722,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"INT3515", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
+ {"CLSA0101", },
/*
* Some ACPI devs contain SerialBus resources even though they are not
* attached to a serial bus at all.
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 3a9773a09e19..5a7b8065e77f 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -291,6 +291,44 @@ int acpi_get_local_address(acpi_handle handle, u32 *addr)
}
EXPORT_SYMBOL(acpi_get_local_address);
+#define ACPI_MAX_SUB_BUF_SIZE 9
+
+const char *acpi_get_subsystem_id(acpi_handle handle)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ const char *sub;
+ size_t len;
+
+ status = acpi_evaluate_object(handle, METHOD_NAME__SUB, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "Reading ACPI _SUB failed: %#x\n", status);
+ return ERR_PTR(-ENODATA);
+ }
+
+ obj = buffer.pointer;
+ if (obj->type == ACPI_TYPE_STRING) {
+ len = strlen(obj->string.pointer);
+ if (len < ACPI_MAX_SUB_BUF_SIZE && len > 0) {
+ sub = kstrdup(obj->string.pointer, GFP_KERNEL);
+ if (!sub)
+ sub = ERR_PTR(-ENOMEM);
+ } else {
+ acpi_handle_err(handle, "ACPI _SUB Length %zu is Invalid\n", len);
+ sub = ERR_PTR(-ENODATA);
+ }
+ } else {
+ acpi_handle_warn(handle, "Warning ACPI _SUB did not return a string\n");
+ sub = ERR_PTR(-ENODATA);
+ }
+
+ acpi_os_free(buffer.pointer);
+
+ return sub;
+}
+EXPORT_SYMBOL_GPL(acpi_get_subsystem_id);
+
acpi_status
acpi_evaluate_reference(acpi_handle handle,
acpi_string pathname,
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index 647f11cf165d..6132092dab2a 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -88,7 +88,7 @@ static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
return -ENODEV;
}
- fwnode = pdev->dev.fwnode;
+ fwnode = dev_fwnode(&pdev->dev);
if (!fwnode) {
/*
* PCI devices aren't necessarily described by ACPI. Create a
@@ -101,7 +101,7 @@ static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
}
set_primary_fwnode(&pdev->dev, fwnode);
}
- viommu->fwnode = pdev->dev.fwnode;
+ viommu->fwnode = dev_fwnode(&pdev->dev);
pci_dev_put(pdev);
return 0;
}
@@ -314,7 +314,7 @@ static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
return -ENODEV;
/* We're not translating ourself */
- if (viommu->fwnode == dev->fwnode)
+ if (device_match_fwnode(dev, viommu->fwnode))
return -EINVAL;
ops = iommu_ops_from_fwnode(viommu->fwnode);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 5649a0371a1f..51f4e1c5cd01 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
if (mm) {
mmap_read_lock(mm);
- vma = alloc->vma;
+ vma = vma_lookup(mm, alloc->vma_addr);
}
if (!vma && need_mm) {
@@ -313,16 +313,22 @@ err_no_vma:
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
- if (vma)
- alloc->vma_vm_mm = vma->vm_mm;
+ unsigned long vm_start = 0;
+
/*
- * If we see alloc->vma is not NULL, buffer data structures set up
- * completely. Look at smp_rmb side binder_alloc_get_vma.
- * We also want to guarantee new alloc->vma_vm_mm is always visible
- * if alloc->vma is set.
+ * Allow clearing the vma with holding just the read lock to allow
+ * munmapping downgrade of the write lock before freeing and closing the
+ * file using binder_alloc_vma_close().
*/
- smp_wmb();
- alloc->vma = vma;
+ if (vma) {
+ vm_start = vma->vm_start;
+ alloc->vma_vm_mm = vma->vm_mm;
+ mmap_assert_write_locked(alloc->vma_vm_mm);
+ } else {
+ mmap_assert_locked(alloc->vma_vm_mm);
+ }
+
+ alloc->vma_addr = vm_start;
}
static inline struct vm_area_struct *binder_alloc_get_vma(
@@ -330,11 +336,9 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
{
struct vm_area_struct *vma = NULL;
- if (alloc->vma) {
- /* Look at description in binder_alloc_set_vma */
- smp_rmb();
- vma = alloc->vma;
- }
+ if (alloc->vma_addr)
+ vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
+
return vma;
}
@@ -398,12 +402,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
+ mmap_read_lock(alloc->vma_vm_mm);
if (!binder_alloc_get_vma(alloc)) {
+ mmap_read_unlock(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
+ mmap_read_unlock(alloc->vma_vm_mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -817,7 +824,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
- BUG_ON(alloc->vma);
+ BUG_ON(alloc->vma_addr &&
+ vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -924,17 +932,25 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
- }
+
+ mmap_read_lock(alloc->vma_vm_mm);
+ if (binder_alloc_get_vma(alloc) == NULL) {
+ mmap_read_unlock(alloc->vma_vm_mm);
+ goto uninitialized;
}
+
+ mmap_read_unlock(alloc->vma_vm_mm);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+
+uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
@@ -1084,7 +1100,7 @@ int binder_alloc_shrinker_init(void)
int ret = list_lru_init(&binder_alloc_lru);
if (ret == 0) {
- ret = register_shrinker(&binder_shrinker);
+ ret = register_shrinker(&binder_shrinker, "android-binder");
if (ret)
list_lru_destroy(&binder_alloc_lru);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 7dea57a84c79..1e4fd37af5e0 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -100,7 +100,7 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
- struct vm_area_struct *vma;
+ unsigned long vma_addr;
struct mm_struct *vma_vm_mm;
void __user *buffer;
struct list_head buffers;
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index c2b323bc3b3a..43a881073a42 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
+ if (!binder_selftest_run || !alloc->vma_addr)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index ef4508d72c02..7c128c89b454 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2122,6 +2122,7 @@ const char *ata_get_cmd_name(u8 command)
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9b999c0e8c37..29e2f55c6faa 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1060,6 +1060,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
dev->flags |= ATA_DFLAG_NO_UNLOAD;
/* configure max sectors */
+ dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
blk_queue_max_hw_sectors(q, dev->max_sectors);
if (dev->class == ATA_DEV_ATAPI) {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 03b6ae37a578..6559b606736d 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -683,7 +683,7 @@ static int mpc52xx_ata_probe(struct platform_device *op)
struct bcom_task *dmatsk;
/* Get ipb frequency */
- ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
if (!ipb_freq) {
dev_err(&op->dev, "could not determine IPB bus frequency\n");
return -ENODEV;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 81ce81a75fc6..681cb3786794 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3752,6 +3752,7 @@ static void __exit idt77252_exit(void)
card = idt77252_chain;
dev = card->atmdev;
idt77252_chain = card->next;
+ del_timer_sync(&card->tst_timer);
if (dev->phy->stop)
dev->phy->stop(dev);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e19fcab016ba..db1b4b202646 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -248,15 +248,6 @@ config BLK_DEV_NBD
If unsure, say N.
-config BLK_DEV_SX8
- tristate "Promise SATA SX8 support"
- depends on PCI
- help
- Saying Y or M here will enable support for the
- Promise SATA SX8 controllers.
-
- Use devices /dev/sx8/$N and /dev/sx8/$Np$M.
-
config BLK_DEV_RAM
tristate "RAM block device support"
help
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index be631352567e..101612cba303 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -26,8 +26,6 @@ obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
-obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
-
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 603f6828dd79..7d9db33363de 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -974,25 +974,58 @@ static void drbd_bm_endio(struct bio *bio)
}
}
+/* For the layout, see comment above drbd_md_set_sector_offsets(). */
+static inline sector_t drbd_md_last_bitmap_sector(struct drbd_backing_dev *bdev)
+{
+ switch (bdev->md.meta_dev_idx) {
+ case DRBD_MD_INDEX_INTERNAL:
+ case DRBD_MD_INDEX_FLEX_INT:
+ return bdev->md.md_offset + bdev->md.al_offset -1;
+ case DRBD_MD_INDEX_FLEX_EXT:
+ default:
+ return bdev->md.md_offset + bdev->md.md_size_sect -1;
+ }
+}
+
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
struct drbd_device *device = ctx->device;
enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE;
- struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
- GFP_NOIO, &drbd_md_io_bio_set);
struct drbd_bitmap *b = device->bitmap;
+ struct bio *bio;
struct page *page;
+ sector_t last_bm_sect;
+ sector_t first_bm_sect;
+ sector_t on_disk_sector;
unsigned int len;
- sector_t on_disk_sector =
- device->ldev->md.md_offset + device->ldev->md.bm_offset;
- on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
+ first_bm_sect = device->ldev->md.md_offset + device->ldev->md.bm_offset;
+ on_disk_sector = first_bm_sect + (((sector_t)page_nr) << (PAGE_SHIFT-SECTOR_SHIFT));
/* this might happen with very small
* flexible external meta data device,
* or with PAGE_SIZE > 4k */
- len = min_t(unsigned int, PAGE_SIZE,
- (drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
+ last_bm_sect = drbd_md_last_bitmap_sector(device->ldev);
+ if (first_bm_sect <= on_disk_sector && last_bm_sect >= on_disk_sector) {
+ sector_t len_sect = last_bm_sect - on_disk_sector + 1;
+ if (len_sect < PAGE_SIZE/SECTOR_SIZE)
+ len = (unsigned int)len_sect*SECTOR_SIZE;
+ else
+ len = PAGE_SIZE;
+ } else {
+ if (__ratelimit(&drbd_ratelimit_state)) {
+ drbd_err(device, "Invalid offset during on-disk bitmap access: "
+ "page idx %u, sector %llu\n", page_nr, on_disk_sector);
+ }
+ ctx->error = -EIO;
+ bm_set_page_io_err(b->bm_pages[page_nr]);
+ if (atomic_dec_and_test(&ctx->in_flight)) {
+ ctx->done = 1;
+ wake_up(&device->misc_wait);
+ kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
+ }
+ return;
+ }
/* serialize IO on this page */
bm_page_lock_io(device, page_nr);
@@ -1007,6 +1040,8 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
+ bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, GFP_NOIO,
+ &drbd_md_io_bio_set);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 6d8dd14458c6..8f7f144e54f3 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1608,7 +1608,7 @@ void drbd_submit_bio(struct bio *bio)
{
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
/*
* what we "blindly" assume:
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e3c0ba93c1a3..ad92192c7d61 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -979,6 +979,11 @@ loop_set_status_from_info(struct loop_device *lo,
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
+
+ /* loff_t vars have been assigned __u64 */
+ if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
+ return -EOVERFLOW;
+
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
lo->lo_flags = info->lo_flags;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index f5d098a148cb..2a709daefbc4 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -11,6 +11,8 @@
* (part of code stolen from loop.c)
*/
+#define pr_fmt(fmt) "nbd: " fmt
+
#include <linux/major.h>
#include <linux/blkdev.h>
@@ -1950,7 +1952,7 @@ again:
test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
- pr_err("nbd: device at index %d is going down\n",
+ pr_err("device at index %d is going down\n",
index);
return -EINVAL;
}
@@ -1961,7 +1963,7 @@ again:
if (!nbd) {
nbd = nbd_dev_add(index, 2);
if (IS_ERR(nbd)) {
- pr_err("nbd: failed to add new device\n");
+ pr_err("failed to add new device\n");
return PTR_ERR(nbd);
}
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 8b224ede2e33..c451c477978f 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -201,6 +201,22 @@ static bool g_use_per_node_hctx;
module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
+static bool g_memory_backed;
+module_param_named(memory_backed, g_memory_backed, bool, 0444);
+MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
+
+static bool g_discard;
+module_param_named(discard, g_discard, bool, 0444);
+MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false");
+
+static unsigned long g_cache_size;
+module_param_named(cache_size, g_cache_size, ulong, 0444);
+MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+
+static unsigned int g_mbps;
+module_param_named(mbps, g_mbps, uint, 0444);
+MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
+
static bool g_zoned;
module_param_named(zoned, g_zoned, bool, S_IRUGO);
MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
@@ -409,6 +425,8 @@ NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
+NULLB_DEVICE_ATTR(no_sched, bool, NULL);
+NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -532,6 +550,8 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_max_open,
&nullb_device_attr_zone_max_active,
&nullb_device_attr_virt_boundary,
+ &nullb_device_attr_no_sched,
+ &nullb_device_attr_shared_tag_bitmap,
NULL,
};
@@ -588,7 +608,13 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
- "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors,virt_boundary\n");
+ "badblocks,blocking,blocksize,cache_size,"
+ "completion_nsec,discard,home_node,hw_queue_depth,"
+ "irqmode,max_sectors,mbps,memory_backed,no_sched,"
+ "poll_queues,power,queue_mode,shared_tag_bitmap,size,"
+ "submit_queues,use_per_node_hctx,virt_boundary,zoned,"
+ "zone_capacity,zone_max_active,zone_max_open,"
+ "zone_nr_conv,zone_size\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -650,6 +676,10 @@ static struct nullb_device *null_alloc_dev(void)
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
+ dev->memory_backed = g_memory_backed;
+ dev->discard = g_discard;
+ dev->cache_size = g_cache_size;
+ dev->mbps = g_mbps;
dev->use_per_node_hctx = g_use_per_node_hctx;
dev->zoned = g_zoned;
dev->zone_size = g_zone_size;
@@ -658,6 +688,8 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_max_open = g_zone_max_open;
dev->zone_max_active = g_zone_max_active;
dev->virt_boundary = g_virt_boundary;
+ dev->no_sched = g_no_sched;
+ dev->shared_tag_bitmap = g_shared_tag_bitmap;
return dev;
}
@@ -1655,7 +1687,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
static void cleanup_queue(struct nullb_queue *nq)
{
- kfree(nq->tag_map);
+ bitmap_free(nq->tag_map);
kfree(nq->cmds);
}
@@ -1782,14 +1814,13 @@ static const struct block_device_operations null_rq_ops = {
static int setup_commands(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
- int i, tag_size;
+ int i;
nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
if (!nq->cmds)
return -ENOMEM;
- tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
- nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
+ nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL);
if (!nq->tag_map) {
kfree(nq->cmds);
return -ENOMEM;
@@ -1866,31 +1897,48 @@ static int null_gendisk_register(struct nullb *nullb)
static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
+ unsigned int flags = BLK_MQ_F_SHOULD_MERGE;
+ int hw_queues, numa_node;
+ unsigned int queue_depth;
int poll_queues;
+ if (nullb) {
+ hw_queues = nullb->dev->submit_queues;
+ poll_queues = nullb->dev->poll_queues;
+ queue_depth = nullb->dev->hw_queue_depth;
+ numa_node = nullb->dev->home_node;
+ if (nullb->dev->no_sched)
+ flags |= BLK_MQ_F_NO_SCHED;
+ if (nullb->dev->shared_tag_bitmap)
+ flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (nullb->dev->blocking)
+ flags |= BLK_MQ_F_BLOCKING;
+ } else {
+ hw_queues = g_submit_queues;
+ poll_queues = g_poll_queues;
+ queue_depth = g_hw_queue_depth;
+ numa_node = g_home_node;
+ if (g_no_sched)
+ flags |= BLK_MQ_F_NO_SCHED;
+ if (g_shared_tag_bitmap)
+ flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (g_blocking)
+ flags |= BLK_MQ_F_BLOCKING;
+ }
+
set->ops = &null_mq_ops;
- set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
- g_submit_queues;
- poll_queues = nullb ? nullb->dev->poll_queues : g_poll_queues;
- if (poll_queues)
- set->nr_hw_queues += poll_queues;
- set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
- g_hw_queue_depth;
- set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
set->cmd_size = sizeof(struct nullb_cmd);
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- if (g_no_sched)
- set->flags |= BLK_MQ_F_NO_SCHED;
- if (g_shared_tag_bitmap)
- set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ set->flags = flags;
set->driver_data = nullb;
- if (poll_queues)
+ set->nr_hw_queues = hw_queues;
+ set->queue_depth = queue_depth;
+ set->numa_node = numa_node;
+ if (poll_queues) {
+ set->nr_hw_queues += poll_queues;
set->nr_maps = 3;
- else
+ } else {
set->nr_maps = 1;
-
- if ((nullb && nullb->dev->blocking) || g_blocking)
- set->flags |= BLK_MQ_F_BLOCKING;
+ }
return blk_mq_alloc_tag_set(set);
}
@@ -2042,8 +2090,13 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
- nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
- dev->index = nullb->index;
+ rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ if (rv < 0) {
+ mutex_unlock(&lock);
+ goto out_cleanup_zone;
+ }
+ nullb->index = rv;
+ dev->index = rv;
mutex_unlock(&lock);
blk_queue_logical_block_size(nullb->q, dev->blocksize);
@@ -2069,7 +2122,7 @@ static int null_add_dev(struct nullb_device *dev)
rv = null_gendisk_register(nullb);
if (rv)
- goto out_cleanup_zone;
+ goto out_ida_free;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
@@ -2078,6 +2131,9 @@ static int null_add_dev(struct nullb_device *dev)
pr_info("disk %s created\n", nullb->disk_name);
return 0;
+
+out_ida_free:
+ ida_free(&nullb_indexes, nullb->index);
out_cleanup_zone:
null_free_zoned_dev(dev);
out_cleanup_disk:
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 6fbf0a1b2622..94ff68052b1e 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -113,6 +113,8 @@ struct nullb_device {
bool discard; /* if support discard */
bool zoned; /* if device is zoned */
bool virt_boundary; /* virtual boundary on/off for the device */
+ bool no_sched; /* no IO scheduler for the device */
+ bool shared_tag_bitmap; /* use hostwide shared tags */
};
struct nullb {
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 01a15dbd9cde..4cea3b08087e 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2399,7 +2399,7 @@ static void pkt_submit_bio(struct bio *bio)
struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
struct bio *split;
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_iter.bi_sector,
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index d1e0fefec90b..e1d080f680ed 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -586,7 +586,7 @@ static void ps3vram_submit_bio(struct bio *bio)
dev_dbg(&dev->core, "%s\n", __func__);
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0d8ec2fe5740..f9e39301c4af 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1297,7 +1297,7 @@ static void rbd_osd_submit(struct ceph_osd_request *osd_req)
dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
__func__, osd_req, obj_req, obj_req->ex.oe_objno,
obj_req->ex.oe_off, obj_req->ex.oe_len);
- ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
+ ceph_osdc_start_request(osd_req->r_osdc, osd_req);
}
/*
@@ -2081,7 +2081,7 @@ static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
if (ret)
return ret;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
return 0;
}
@@ -4768,7 +4768,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
if (ret)
goto out_req;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0)
ceph_copy_from_page_vector(pages, buf, 0, ret);
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index 2be5d87a3ca6..e7c7d9a68168 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -376,7 +376,7 @@ static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
if (ret)
return ret;
- ret = rnbd_clt_resize_disk(dev, (size_t)sectors);
+ ret = rnbd_clt_resize_disk(dev, sectors);
if (ret)
return ret;
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index b8d9e2824d9c..04da33a22ef4 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -68,39 +68,18 @@ static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev)
return refcount_inc_not_zero(&dev->refcount);
}
-static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
- const struct rnbd_msg_open_rsp *rsp)
+static void rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
+ sector_t new_nsectors)
{
- struct rnbd_clt_session *sess = dev->sess;
-
- if (!rsp->logical_block_size)
- return -EINVAL;
-
- dev->device_id = le32_to_cpu(rsp->device_id);
- dev->nsectors = le64_to_cpu(rsp->nsectors);
- dev->logical_block_size = le16_to_cpu(rsp->logical_block_size);
- dev->physical_block_size = le16_to_cpu(rsp->physical_block_size);
- dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors);
- dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
- dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
- dev->secure_discard = le16_to_cpu(rsp->secure_discard);
- dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
- dev->fua = !!(rsp->cache_policy & RNBD_FUA);
-
- dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
- dev->max_segments = sess->max_segments;
-
- return 0;
-}
+ if (get_capacity(dev->gd) == new_nsectors)
+ return;
-static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
- size_t new_nsectors)
-{
- rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
- dev->nsectors, new_nsectors);
- dev->nsectors = new_nsectors;
- set_capacity_and_notify(dev->gd, dev->nsectors);
- return 0;
+ /*
+ * If the size changed, we need to revalidate it
+ */
+ rnbd_clt_info(dev, "Device size changed from %llu to %llu sectors\n",
+ get_capacity(dev->gd), new_nsectors);
+ set_capacity_and_notify(dev->gd, new_nsectors);
}
static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
@@ -119,19 +98,16 @@ static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) {
u64 nsectors = le64_to_cpu(rsp->nsectors);
- /*
- * If the device was remapped and the size changed in the
- * meantime we need to revalidate it
- */
- if (dev->nsectors != nsectors)
- rnbd_clt_change_capacity(dev, nsectors);
+ rnbd_clt_change_capacity(dev, nsectors);
gd_kobj = &disk_to_dev(dev->gd)->kobj;
kobject_uevent(gd_kobj, KOBJ_ONLINE);
rnbd_clt_info(dev, "Device online, device remapped successfully\n");
}
- err = rnbd_clt_set_dev_attr(dev, rsp);
- if (err)
+ if (!rsp->logical_block_size) {
+ err = -EINVAL;
goto out;
+ }
+ dev->device_id = le32_to_cpu(rsp->device_id);
dev->dev_state = DEV_STATE_MAPPED;
out:
@@ -140,7 +116,7 @@ out:
return err;
}
-int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, sector_t newsize)
{
int ret = 0;
@@ -150,7 +126,7 @@ int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
ret = -ENOENT;
goto out;
}
- ret = rnbd_clt_change_capacity(dev, newsize);
+ rnbd_clt_change_capacity(dev, newsize);
out:
mutex_unlock(&dev->lock);
@@ -507,6 +483,11 @@ static void msg_open_conf(struct work_struct *work)
struct rnbd_msg_open_rsp *rsp = iu->buf;
struct rnbd_clt_dev *dev = iu->dev;
int errno = iu->errno;
+ bool from_map = false;
+
+ /* INIT state is only triggered from rnbd_clt_map_device */
+ if (dev->dev_state == DEV_STATE_INIT)
+ from_map = true;
if (errno) {
rnbd_clt_err(dev,
@@ -523,7 +504,9 @@ static void msg_open_conf(struct work_struct *work)
send_msg_close(dev, device_id, RTRS_PERMIT_NOWAIT);
}
}
- kfree(rsp);
+ /* We free rsp in rnbd_clt_map_device for map scenario */
+ if (!from_map)
+ kfree(rsp);
wake_up_iu_comp(iu, errno);
rnbd_put_iu(dev->sess, iu);
rnbd_clt_put_dev(dev);
@@ -942,7 +925,7 @@ static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
{
struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
- if (dev->read_only && (mode & FMODE_WRITE))
+ if (get_disk_ro(dev->gd) && (mode & FMODE_WRITE))
return -EPERM;
if (dev->dev_state == DEV_STATE_UNMAPPED ||
@@ -963,10 +946,10 @@ static int rnbd_client_getgeo(struct block_device *block_device,
struct hd_geometry *geo)
{
u64 size;
- struct rnbd_clt_dev *dev;
+ struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+ struct queue_limits *limit = &dev->queue->limits;
- dev = block_device->bd_disk->private_data;
- size = dev->size * (dev->logical_block_size / SECTOR_SIZE);
+ size = dev->size * (limit->logical_block_size / SECTOR_SIZE);
geo->cylinders = size >> 6; /* size/64 */
geo->heads = 4;
geo->sectors = 16;
@@ -1350,11 +1333,15 @@ static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
}
}
-static void setup_request_queue(struct rnbd_clt_dev *dev)
+static void setup_request_queue(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp)
{
- blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
- blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
- blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
+ blk_queue_logical_block_size(dev->queue,
+ le16_to_cpu(rsp->logical_block_size));
+ blk_queue_physical_block_size(dev->queue,
+ le16_to_cpu(rsp->physical_block_size));
+ blk_queue_max_hw_sectors(dev->queue,
+ dev->sess->max_io_size / SECTOR_SIZE);
/*
* we don't support discards to "discontiguous" segments
@@ -1362,21 +1349,27 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
*/
blk_queue_max_discard_segments(dev->queue, 1);
- blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
- dev->queue->limits.discard_granularity = dev->discard_granularity;
- dev->queue->limits.discard_alignment = dev->discard_alignment;
- if (dev->secure_discard)
+ blk_queue_max_discard_sectors(dev->queue,
+ le32_to_cpu(rsp->max_discard_sectors));
+ dev->queue->limits.discard_granularity =
+ le32_to_cpu(rsp->discard_granularity);
+ dev->queue->limits.discard_alignment =
+ le32_to_cpu(rsp->discard_alignment);
+ if (le16_to_cpu(rsp->secure_discard))
blk_queue_max_secure_erase_sectors(dev->queue,
- dev->max_discard_sectors);
+ le32_to_cpu(rsp->max_discard_sectors));
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
- blk_queue_max_segments(dev->queue, dev->max_segments);
+ blk_queue_max_segments(dev->queue, dev->sess->max_segments);
blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
- blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
+ blk_queue_write_cache(dev->queue,
+ !!(rsp->cache_policy & RNBD_WRITEBACK),
+ !!(rsp->cache_policy & RNBD_FUA));
}
-static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
+static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp, int idx)
{
int err;
@@ -1388,19 +1381,15 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
dev->gd->private_data = dev;
snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d",
idx);
- pr_debug("disk_name=%s, capacity=%zu\n",
+ pr_debug("disk_name=%s, capacity=%llu\n",
dev->gd->disk_name,
- dev->nsectors * (dev->logical_block_size / SECTOR_SIZE)
- );
+ le64_to_cpu(rsp->nsectors) *
+ (le16_to_cpu(rsp->logical_block_size) / SECTOR_SIZE));
- set_capacity(dev->gd, dev->nsectors);
+ set_capacity(dev->gd, le64_to_cpu(rsp->nsectors));
- if (dev->access_mode == RNBD_ACCESS_RO) {
- dev->read_only = true;
+ if (dev->access_mode == RNBD_ACCESS_RO)
set_disk_ro(dev->gd, true);
- } else {
- dev->read_only = false;
- }
/*
* Network device does not need rotational
@@ -1413,11 +1402,13 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
return err;
}
-static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
+static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp)
{
int idx = dev->clt_device_id;
- dev->size = dev->nsectors * dev->logical_block_size;
+ dev->size = le64_to_cpu(rsp->nsectors) *
+ le16_to_cpu(rsp->logical_block_size);
dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, dev);
if (IS_ERR(dev->gd))
@@ -1425,8 +1416,8 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
dev->queue = dev->gd->queue;
rnbd_init_mq_hw_queues(dev);
- setup_request_queue(dev);
- return rnbd_clt_setup_gen_disk(dev, idx);
+ setup_request_queue(dev, rsp);
+ return rnbd_clt_setup_gen_disk(dev, rsp, idx);
}
static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
@@ -1562,7 +1553,14 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
{
struct rnbd_clt_session *sess;
struct rnbd_clt_dev *dev;
- int ret;
+ int ret, errno;
+ struct rnbd_msg_open_rsp *rsp;
+ struct rnbd_msg_open msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
if (exists_devpath(pathname, sessname))
return ERR_PTR(-EEXIST);
@@ -1582,17 +1580,47 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
ret = -EEXIST;
goto put_dev;
}
- ret = send_msg_open(dev, RTRS_PERMIT_WAIT);
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp) {
+ ret = -ENOMEM;
+ goto del_dev;
+ }
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu) {
+ ret = -ENOMEM;
+ kfree(rsp);
+ goto del_dev;
+ }
+ iu->buf = rsp;
+ iu->dev = dev;
+ sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
+ msg.access_mode = dev->access_mode;
+ strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
+
+ WARN_ON(!rnbd_clt_get_dev(dev));
+ ret = send_usr_msg(sess->rtrs, READ, iu,
+ &vec, sizeof(*rsp), iu->sgt.sgl, 1,
+ msg_open_conf, &errno, RTRS_PERMIT_WAIT);
+ if (ret) {
+ rnbd_clt_put_dev(dev);
+ rnbd_put_iu(sess, iu);
+ } else {
+ ret = errno;
+ }
if (ret) {
rnbd_clt_err(dev,
"map_device: failed, can't open remote device, err: %d\n",
ret);
- goto del_dev;
+ goto put_iu;
}
mutex_lock(&dev->lock);
pr_debug("Opened remote device: session=%s, path='%s'\n",
sess->sessname, pathname);
- ret = rnbd_client_setup_device(dev);
+ ret = rnbd_client_setup_device(dev, rsp);
if (ret) {
rnbd_clt_err(dev,
"map_device: Failed to configure device, err: %d\n",
@@ -1602,21 +1630,30 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
}
rnbd_clt_info(dev,
- "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
- dev->gd->disk_name, dev->nsectors,
- dev->logical_block_size, dev->physical_block_size,
- dev->max_discard_sectors,
- dev->discard_granularity, dev->discard_alignment,
- dev->secure_discard, dev->max_segments,
- dev->max_hw_sectors, dev->wc, dev->fua);
+ "map_device: Device mapped as %s (nsectors: %llu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
+ dev->gd->disk_name, le64_to_cpu(rsp->nsectors),
+ le16_to_cpu(rsp->logical_block_size),
+ le16_to_cpu(rsp->physical_block_size),
+ le32_to_cpu(rsp->max_discard_sectors),
+ le32_to_cpu(rsp->discard_granularity),
+ le32_to_cpu(rsp->discard_alignment),
+ le16_to_cpu(rsp->secure_discard),
+ sess->max_segments, sess->max_io_size / SECTOR_SIZE,
+ !!(rsp->cache_policy & RNBD_WRITEBACK),
+ !!(rsp->cache_policy & RNBD_FUA));
mutex_unlock(&dev->lock);
+ kfree(rsp);
+ rnbd_put_iu(sess, iu);
rnbd_clt_put_sess(sess);
return dev;
send_close:
send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT);
+put_iu:
+ kfree(rsp);
+ rnbd_put_iu(sess, iu);
del_dev:
delete_dev(dev);
put_dev:
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index 2e2e8c4a85c1..a48e040abe63 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -106,6 +106,7 @@ struct rnbd_queue {
};
struct rnbd_clt_dev {
+ struct kobject kobj;
struct rnbd_clt_session *sess;
struct request_queue *queue;
struct rnbd_queue *hw_queues;
@@ -114,27 +115,14 @@ struct rnbd_clt_dev {
u32 clt_device_id;
struct mutex lock;
enum rnbd_clt_dev_state dev_state;
+ refcount_t refcount;
char *pathname;
enum rnbd_access_mode access_mode;
u32 nr_poll_queues;
- bool read_only;
- bool wc;
- bool fua;
- u32 max_hw_sectors;
- u32 max_discard_sectors;
- u32 discard_granularity;
- u32 discard_alignment;
- u16 secure_discard;
- u16 physical_block_size;
- u16 logical_block_size;
- u16 max_segments;
- size_t nsectors;
u64 size; /* device size in bytes */
struct list_head list;
struct gendisk *gd;
- struct kobject kobj;
char *blk_symlink_name;
- refcount_t refcount;
struct work_struct unmap_on_rmmod_work;
};
@@ -150,7 +138,7 @@ int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
const struct attribute *sysfs_self);
int rnbd_clt_remap_device(struct rnbd_clt_dev *dev);
-int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize);
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, sector_t newsize);
/* rnbd-clt-sysfs.c */
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 0713014bf423..5e08da277ddf 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -224,7 +224,6 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
wait_for_completion(&dc); /* wait for inflights to drop to zero */
rnbd_dev_close(sess_dev->rnbd_dev);
- list_del(&sess_dev->sess_list);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (sess_dev->open_flags & FMODE_WRITE)
@@ -239,14 +238,14 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
static void destroy_sess(struct rnbd_srv_session *srv_sess)
{
- struct rnbd_srv_sess_dev *sess_dev, *tmp;
+ struct rnbd_srv_sess_dev *sess_dev;
+ unsigned long index;
- if (list_empty(&srv_sess->sess_dev_list))
+ if (xa_empty(&srv_sess->index_idr))
goto out;
mutex_lock(&srv_sess->lock);
- list_for_each_entry_safe(sess_dev, tmp, &srv_sess->sess_dev_list,
- sess_list)
+ xa_for_each(&srv_sess->index_idr, index, sess_dev)
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
mutex_unlock(&srv_sess->lock);
@@ -281,7 +280,6 @@ static int create_sess(struct rtrs_srv_sess *rtrs)
srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
- INIT_LIST_HEAD(&srv_sess->sess_dev_list);
mutex_init(&srv_sess->lock);
mutex_lock(&sess_lock);
list_add(&srv_sess->list, &sess_list);
@@ -323,10 +321,11 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev,
{
struct rnbd_srv_session *sess = sess_dev->sess;
- sess_dev->keep_id = true;
/* It is already started to close by client's close message. */
if (!mutex_trylock(&sess->lock))
return;
+
+ sess_dev->keep_id = true;
/* first remove sysfs itself to avoid deadlock */
sysfs_remove_file_self(&sess_dev->kobj, &attr->attr);
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
@@ -666,11 +665,12 @@ static struct rnbd_srv_sess_dev *
find_srv_sess_dev(struct rnbd_srv_session *srv_sess, const char *dev_name)
{
struct rnbd_srv_sess_dev *sess_dev;
+ unsigned long index;
- if (list_empty(&srv_sess->sess_dev_list))
+ if (xa_empty(&srv_sess->index_idr))
return NULL;
- list_for_each_entry(sess_dev, &srv_sess->sess_dev_list, sess_list)
+ xa_for_each(&srv_sess->index_idr, index, sess_dev)
if (!strcmp(sess_dev->pathname, dev_name))
return sess_dev;
@@ -780,8 +780,6 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
list_add(&srv_sess_dev->dev_list, &srv_dev->sess_dev_list);
mutex_unlock(&srv_dev->lock);
- list_add(&srv_sess_dev->sess_list, &srv_sess->sess_dev_list);
-
rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->id);
kfree(full_path);
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 6926f9069dc4..081bceaf4ae9 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -25,8 +25,6 @@ struct rnbd_srv_session {
int queue_depth;
struct xarray index_idr;
- /* List of struct rnbd_srv_sess_dev */
- struct list_head sess_dev_list;
struct mutex lock;
u8 ver;
};
@@ -48,8 +46,6 @@ struct rnbd_srv_dev {
struct rnbd_srv_sess_dev {
/* Entry inside rnbd_srv_dev struct */
struct list_head dev_list;
- /* Entry inside rnbd_srv_session struct */
- struct list_head sess_list;
struct rnbd_dev *rnbd_dev;
struct rnbd_srv_session *sess;
struct rnbd_srv_dev *dev;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
deleted file mode 100644
index 0e1a484cab0b..000000000000
--- a/drivers/block/sx8.c
+++ /dev/null
@@ -1,1582 +0,0 @@
-/*
- * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
- *
- * Copyright 2004-2005 Red Hat, Inc.
- *
- * Author/maintainer: Jeff Garzik <jgarzik@pobox.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/blk-mq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/compiler.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/ktime.h>
-#include <linux/hdreg.h>
-#include <linux/dma-mapping.h>
-#include <linux/completion.h>
-#include <linux/scatterlist.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-
-#if 0
-#define CARM_DEBUG
-#define CARM_VERBOSE_DEBUG
-#else
-#undef CARM_DEBUG
-#undef CARM_VERBOSE_DEBUG
-#endif
-#undef CARM_NDEBUG
-
-#define DRV_NAME "sx8"
-#define DRV_VERSION "1.0"
-#define PFX DRV_NAME ": "
-
-MODULE_AUTHOR("Jeff Garzik");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Promise SATA SX8 block driver");
-MODULE_VERSION(DRV_VERSION);
-
-/*
- * SX8 hardware has a single message queue for all ATA ports.
- * When this driver was written, the hardware (firmware?) would
- * corrupt data eventually, if more than one request was outstanding.
- * As one can imagine, having 8 ports bottlenecking on a single
- * command hurts performance.
- *
- * Based on user reports, later versions of the hardware (firmware?)
- * seem to be able to survive with more than one command queued.
- *
- * Therefore, we default to the safe option -- 1 command -- but
- * allow the user to increase this.
- *
- * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ),
- * but problems seem to occur when you exceed ~30, even on newer hardware.
- */
-static int max_queue = 1;
-module_param(max_queue, int, 0444);
-MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)");
-
-
-#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN)
-
-/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
-#define TAG_ENCODE(tag) (((tag) << 16) | 0xf)
-#define TAG_DECODE(tag) (((tag) >> 16) & 0x1f)
-#define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32))
-
-/* note: prints function name for you */
-#ifdef CARM_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#ifdef CARM_VERBOSE_DEBUG
-#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define VPRINTK(fmt, args...)
-#endif /* CARM_VERBOSE_DEBUG */
-#else
-#define DPRINTK(fmt, args...)
-#define VPRINTK(fmt, args...)
-#endif /* CARM_DEBUG */
-
-#ifdef CARM_NDEBUG
-#define assert(expr)
-#else
-#define assert(expr) \
- if(unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- }
-#endif
-
-/* defines only for the constants which don't work well as enums */
-struct carm_host;
-
-enum {
- /* adapter-wide limits */
- CARM_MAX_PORTS = 8,
- CARM_SHM_SIZE = (4096 << 7),
- CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS,
- CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1,
-
- /* command message queue limits */
- CARM_MAX_REQ = 64, /* max command msgs per host */
- CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */
-
- /* S/G limits, host-wide and per-request */
- CARM_MAX_REQ_SG = 32, /* max s/g entries per request */
- CARM_MAX_HOST_SG = 600, /* max s/g entries per host */
- CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */
-
- /* hardware registers */
- CARM_IHQP = 0x1c,
- CARM_INT_STAT = 0x10, /* interrupt status */
- CARM_INT_MASK = 0x14, /* interrupt mask */
- CARM_HMUC = 0x18, /* host message unit control */
- RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */
- RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */
- RBUF_BYTE_SZ = 0x28,
- CARM_RESP_IDX = 0x2c,
- CARM_CMS0 = 0x30, /* command message size reg 0 */
- CARM_LMUC = 0x48,
- CARM_HMPHA = 0x6c,
- CARM_INITC = 0xb5,
-
- /* bits in CARM_INT_{STAT,MASK} */
- INT_RESERVED = 0xfffffff0,
- INT_WATCHDOG = (1 << 3), /* watchdog timer */
- INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */
- INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */
- INT_RESPONSE = (1 << 0), /* response msg available */
- INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW,
- INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW |
- INT_RESPONSE,
-
- /* command messages, and related register bits */
- CARM_HAVE_RESP = 0x01,
- CARM_MSG_READ = 1,
- CARM_MSG_WRITE = 2,
- CARM_MSG_VERIFY = 3,
- CARM_MSG_GET_CAPACITY = 4,
- CARM_MSG_FLUSH = 5,
- CARM_MSG_IOCTL = 6,
- CARM_MSG_ARRAY = 8,
- CARM_MSG_MISC = 9,
- CARM_CME = (1 << 2),
- CARM_RME = (1 << 1),
- CARM_WZBC = (1 << 0),
- CARM_RMI = (1 << 0),
- CARM_Q_FULL = (1 << 3),
- CARM_MSG_SIZE = 288,
- CARM_Q_LEN = 48,
-
- /* CARM_MSG_IOCTL messages */
- CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */
- CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */
- CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */
-
- IOC_SCAN_CHAN_NODEV = 0x1f,
- IOC_SCAN_CHAN_OFFSET = 0x40,
-
- /* CARM_MSG_ARRAY messages */
- CARM_ARRAY_INFO = 0,
-
- ARRAY_NO_EXIST = (1 << 31),
-
- /* response messages */
- RMSG_SZ = 8, /* sizeof(struct carm_response) */
- RMSG_Q_LEN = 48, /* resp. msg list length */
- RMSG_OK = 1, /* bit indicating msg was successful */
- /* length of entire resp. msg buffer */
- RBUF_LEN = RMSG_SZ * RMSG_Q_LEN,
-
- PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */
-
- /* CARM_MSG_MISC messages */
- MISC_GET_FW_VER = 2,
- MISC_ALLOC_MEM = 3,
- MISC_SET_TIME = 5,
-
- /* MISC_GET_FW_VER feature bits */
- FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */
- FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */
- FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */
-
- /* carm_host flags */
- FL_NON_RAID = FW_VER_NON_RAID,
- FL_4PORT = FW_VER_4PORT,
- FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT),
- FL_DYN_MAJOR = (1 << 17),
-};
-
-enum {
- CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */
-};
-
-enum scatter_gather_types {
- SGT_32BIT = 0,
- SGT_64BIT = 1,
-};
-
-enum host_states {
- HST_INVALID, /* invalid state; never used */
- HST_ALLOC_BUF, /* setting up master SHM area */
- HST_ERROR, /* we never leave here */
- HST_PORT_SCAN, /* start dev scan */
- HST_DEV_SCAN_START, /* start per-device probe */
- HST_DEV_SCAN, /* continue per-device probe */
- HST_DEV_ACTIVATE, /* activate devices we found */
- HST_PROBE_FINISHED, /* probe is complete */
- HST_PROBE_START, /* initiate probe */
- HST_SYNC_TIME, /* tell firmware what time it is */
- HST_GET_FW_VER, /* get firmware version, adapter port cnt */
-};
-
-#ifdef CARM_DEBUG
-static const char *state_name[] = {
- "HST_INVALID",
- "HST_ALLOC_BUF",
- "HST_ERROR",
- "HST_PORT_SCAN",
- "HST_DEV_SCAN_START",
- "HST_DEV_SCAN",
- "HST_DEV_ACTIVATE",
- "HST_PROBE_FINISHED",
- "HST_PROBE_START",
- "HST_SYNC_TIME",
- "HST_GET_FW_VER",
-};
-#endif
-
-struct carm_port {
- unsigned int port_no;
- struct gendisk *disk;
- struct carm_host *host;
-
- /* attached device characteristics */
- u64 capacity;
- char name[41];
- u16 dev_geom_head;
- u16 dev_geom_sect;
- u16 dev_geom_cyl;
-};
-
-struct carm_request {
- int n_elem;
- unsigned int msg_type;
- unsigned int msg_subtype;
- unsigned int msg_bucket;
- struct scatterlist sg[CARM_MAX_REQ_SG];
-};
-
-struct carm_host {
- unsigned long flags;
- void __iomem *mmio;
- void *shm;
- dma_addr_t shm_dma;
-
- int major;
- int id;
- char name[32];
-
- spinlock_t lock;
- struct pci_dev *pdev;
- unsigned int state;
- u32 fw_ver;
-
- struct blk_mq_tag_set tag_set;
- struct request_queue *oob_q;
- unsigned int n_oob;
-
- unsigned int hw_sg_used;
-
- unsigned int resp_idx;
-
- unsigned int wait_q_prod;
- unsigned int wait_q_cons;
- struct request_queue *wait_q[CARM_MAX_WAIT_Q];
-
- void *msg_base;
- dma_addr_t msg_dma;
-
- int cur_scan_dev;
- unsigned long dev_active;
- unsigned long dev_present;
- struct carm_port port[CARM_MAX_PORTS];
-
- struct work_struct fsm_task;
-
- int probe_err;
- struct completion probe_comp;
-};
-
-struct carm_response {
- __le32 ret_handle;
- __le32 status;
-} __attribute__((packed));
-
-struct carm_msg_sg {
- __le32 start;
- __le32 len;
-} __attribute__((packed));
-
-struct carm_msg_rw {
- u8 type;
- u8 id;
- u8 sg_count;
- u8 sg_type;
- __le32 handle;
- __le32 lba;
- __le16 lba_count;
- __le16 lba_high;
- struct carm_msg_sg sg[32];
-} __attribute__((packed));
-
-struct carm_msg_allocbuf {
- u8 type;
- u8 subtype;
- u8 n_sg;
- u8 sg_type;
- __le32 handle;
- __le32 addr;
- __le32 len;
- __le32 evt_pool;
- __le32 n_evt;
- __le32 rbuf_pool;
- __le32 n_rbuf;
- __le32 msg_pool;
- __le32 n_msg;
- struct carm_msg_sg sg[8];
-} __attribute__((packed));
-
-struct carm_msg_ioctl {
- u8 type;
- u8 subtype;
- u8 array_id;
- u8 reserved1;
- __le32 handle;
- __le32 data_addr;
- u32 reserved2;
-} __attribute__((packed));
-
-struct carm_msg_sync_time {
- u8 type;
- u8 subtype;
- u16 reserved1;
- __le32 handle;
- u32 reserved2;
- __le32 timestamp;
-} __attribute__((packed));
-
-struct carm_msg_get_fw_ver {
- u8 type;
- u8 subtype;
- u16 reserved1;
- __le32 handle;
- __le32 data_addr;
- u32 reserved2;
-} __attribute__((packed));
-
-struct carm_fw_ver {
- __le32 version;
- u8 features;
- u8 reserved1;
- u16 reserved2;
-} __attribute__((packed));
-
-struct carm_array_info {
- __le32 size;
-
- __le16 size_hi;
- __le16 stripe_size;
-
- __le32 mode;
-
- __le16 stripe_blk_sz;
- __le16 reserved1;
-
- __le16 cyl;
- __le16 head;
-
- __le16 sect;
- u8 array_id;
- u8 reserved2;
-
- char name[40];
-
- __le32 array_status;
-
- /* device list continues beyond this point? */
-} __attribute__((packed));
-
-static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static void carm_remove_one (struct pci_dev *pdev);
-static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static const struct pci_device_id carm_pci_tbl[] = {
- { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { } /* terminate list */
-};
-MODULE_DEVICE_TABLE(pci, carm_pci_tbl);
-
-static struct pci_driver carm_driver = {
- .name = DRV_NAME,
- .id_table = carm_pci_tbl,
- .probe = carm_init_one,
- .remove = carm_remove_one,
-};
-
-static const struct block_device_operations carm_bd_ops = {
- .owner = THIS_MODULE,
- .getgeo = carm_bdev_getgeo,
-};
-
-static unsigned int carm_host_id;
-static unsigned long carm_major_alloc;
-
-
-
-static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct carm_port *port = bdev->bd_disk->private_data;
-
- geo->heads = (u8) port->dev_geom_head;
- geo->sectors = (u8) port->dev_geom_sect;
- geo->cylinders = port->dev_geom_cyl;
- return 0;
-}
-
-static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
-
-static inline int carm_lookup_bucket(u32 msg_size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
- if (msg_size <= msg_sizes[i])
- return i;
-
- return -ENOENT;
-}
-
-static void carm_init_buckets(void __iomem *mmio)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
- writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i));
-}
-
-static inline void *carm_ref_msg(struct carm_host *host,
- unsigned int msg_idx)
-{
- return host->msg_base + (msg_idx * CARM_MSG_SIZE);
-}
-
-static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
- unsigned int msg_idx)
-{
- return host->msg_dma + (msg_idx * CARM_MSG_SIZE);
-}
-
-static int carm_send_msg(struct carm_host *host,
- struct carm_request *crq, unsigned tag)
-{
- void __iomem *mmio = host->mmio;
- u32 msg = (u32) carm_ref_msg_dma(host, tag);
- u32 cm_bucket = crq->msg_bucket;
- u32 tmp;
- int rc = 0;
-
- VPRINTK("ENTER\n");
-
- tmp = readl(mmio + CARM_HMUC);
- if (tmp & CARM_Q_FULL) {
-#if 0
- tmp = readl(mmio + CARM_INT_MASK);
- tmp |= INT_Q_AVAILABLE;
- writel(tmp, mmio + CARM_INT_MASK);
- readl(mmio + CARM_INT_MASK); /* flush */
-#endif
- DPRINTK("host msg queue full\n");
- rc = -EBUSY;
- } else {
- writel(msg | (cm_bucket << 1), mmio + CARM_IHQP);
- readl(mmio + CARM_IHQP); /* flush */
- }
-
- return rc;
-}
-
-static int carm_array_info (struct carm_host *host, unsigned int array_idx)
-{
- struct carm_msg_ioctl *ioc;
- u32 msg_data;
- dma_addr_t msg_dma;
- struct carm_request *crq;
- struct request *rq;
- int rc;
-
- rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(rq)) {
- rc = -ENOMEM;
- goto err_out;
- }
- crq = blk_mq_rq_to_pdu(rq);
-
- ioc = carm_ref_msg(host, rq->tag);
- msg_dma = carm_ref_msg_dma(host, rq->tag);
- msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
-
- crq->msg_type = CARM_MSG_ARRAY;
- crq->msg_subtype = CARM_ARRAY_INFO;
- rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) +
- sizeof(struct carm_array_info));
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_ARRAY;
- ioc->subtype = CARM_ARRAY_INFO;
- ioc->array_id = (u8) array_idx;
- ioc->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- spin_lock_irq(&host->lock);
- assert(host->state == HST_DEV_SCAN_START ||
- host->state == HST_DEV_SCAN);
- spin_unlock_irq(&host->lock);
-
- DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(rq, true);
-
- return 0;
-
-err_out:
- spin_lock_irq(&host->lock);
- host->state = HST_ERROR;
- spin_unlock_irq(&host->lock);
- return rc;
-}
-
-typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
-
-static int carm_send_special (struct carm_host *host, carm_sspc_t func)
-{
- struct request *rq;
- struct carm_request *crq;
- struct carm_msg_ioctl *ioc;
- void *mem;
- unsigned int msg_size;
- int rc;
-
- rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(rq))
- return -ENOMEM;
- crq = blk_mq_rq_to_pdu(rq);
-
- mem = carm_ref_msg(host, rq->tag);
-
- msg_size = func(host, rq->tag, mem);
-
- ioc = mem;
- crq->msg_type = ioc->type;
- crq->msg_subtype = ioc->subtype;
- rc = carm_lookup_bucket(msg_size);
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-
- DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(rq, true);
-
- return 0;
-}
-
-static unsigned int carm_fill_sync_time(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_sync_time *st = mem;
-
- time64_t tv = ktime_get_real_seconds();
-
- memset(st, 0, sizeof(*st));
- st->type = CARM_MSG_MISC;
- st->subtype = MISC_SET_TIME;
- st->handle = cpu_to_le32(TAG_ENCODE(idx));
- st->timestamp = cpu_to_le32(tv);
-
- return sizeof(struct carm_msg_sync_time);
-}
-
-static unsigned int carm_fill_alloc_buf(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_allocbuf *ab = mem;
-
- memset(ab, 0, sizeof(*ab));
- ab->type = CARM_MSG_MISC;
- ab->subtype = MISC_ALLOC_MEM;
- ab->handle = cpu_to_le32(TAG_ENCODE(idx));
- ab->n_sg = 1;
- ab->sg_type = SGT_32BIT;
- ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
- ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1);
- ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024));
- ab->n_evt = cpu_to_le32(1024);
- ab->rbuf_pool = cpu_to_le32(host->shm_dma);
- ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN);
- ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN);
- ab->n_msg = cpu_to_le32(CARM_Q_LEN);
- ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
- ab->sg[0].len = cpu_to_le32(65536);
-
- return sizeof(struct carm_msg_allocbuf);
-}
-
-static unsigned int carm_fill_scan_channels(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_ioctl *ioc = mem;
- u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) +
- IOC_SCAN_CHAN_OFFSET);
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_IOCTL;
- ioc->subtype = CARM_IOC_SCAN_CHAN;
- ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- /* fill output data area with "no device" default values */
- mem += IOC_SCAN_CHAN_OFFSET;
- memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS);
-
- return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS;
-}
-
-static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_get_fw_ver *ioc = mem;
- u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc));
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_MISC;
- ioc->subtype = MISC_GET_FW_VER;
- ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- return sizeof(struct carm_msg_get_fw_ver) +
- sizeof(struct carm_fw_ver);
-}
-
-static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
-{
- unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
-
- blk_mq_stop_hw_queues(q);
- VPRINTK("STOPPED QUEUE %p\n", q);
-
- host->wait_q[idx] = q;
- host->wait_q_prod++;
- BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
-}
-
-static inline struct request_queue *carm_pop_q(struct carm_host *host)
-{
- unsigned int idx;
-
- if (host->wait_q_prod == host->wait_q_cons)
- return NULL;
-
- idx = host->wait_q_cons % CARM_MAX_WAIT_Q;
- host->wait_q_cons++;
-
- return host->wait_q[idx];
-}
-
-static inline void carm_round_robin(struct carm_host *host)
-{
- struct request_queue *q = carm_pop_q(host);
- if (q) {
- blk_mq_start_hw_queues(q);
- VPRINTK("STARTED QUEUE %p\n", q);
- }
-}
-
-static inline enum dma_data_direction carm_rq_dir(struct request *rq)
-{
- return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-}
-
-static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct request_queue *q = hctx->queue;
- struct request *rq = bd->rq;
- struct carm_port *port = q->queuedata;
- struct carm_host *host = port->host;
- struct carm_request *crq = blk_mq_rq_to_pdu(rq);
- struct carm_msg_rw *msg;
- struct scatterlist *sg;
- int i, n_elem = 0, rc;
- unsigned int msg_size;
- u32 tmp;
-
- crq->n_elem = 0;
- sg_init_table(crq->sg, CARM_MAX_REQ_SG);
-
- blk_mq_start_request(rq);
-
- spin_lock_irq(&host->lock);
- if (req_op(rq) == REQ_OP_DRV_OUT)
- goto send_msg;
-
- /* get scatterlist from block layer */
- sg = &crq->sg[0];
- n_elem = blk_rq_map_sg(q, rq, sg);
- if (n_elem <= 0)
- goto out_ioerr;
-
- /* map scatterlist to PCI bus addresses */
- n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq));
- if (n_elem <= 0)
- goto out_ioerr;
-
- /* obey global hardware limit on S/G entries */
- if (host->hw_sg_used >= CARM_MAX_HOST_SG - n_elem)
- goto out_resource;
-
- crq->n_elem = n_elem;
- host->hw_sg_used += n_elem;
-
- /*
- * build read/write message
- */
-
- VPRINTK("build msg\n");
- msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag);
-
- if (rq_data_dir(rq) == WRITE) {
- msg->type = CARM_MSG_WRITE;
- crq->msg_type = CARM_MSG_WRITE;
- } else {
- msg->type = CARM_MSG_READ;
- crq->msg_type = CARM_MSG_READ;
- }
-
- msg->id = port->port_no;
- msg->sg_count = n_elem;
- msg->sg_type = SGT_32BIT;
- msg->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
- msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
- tmp = (blk_rq_pos(rq) >> 16) >> 16;
- msg->lba_high = cpu_to_le16( (u16) tmp );
- msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
-
- msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
- for (i = 0; i < n_elem; i++) {
- struct carm_msg_sg *carm_sg = &msg->sg[i];
- carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i]));
- carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i]));
- msg_size += sizeof(struct carm_msg_sg);
- }
-
- rc = carm_lookup_bucket(msg_size);
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-send_msg:
- /*
- * queue read/write message to hardware
- */
- VPRINTK("send msg, tag == %u\n", rq->tag);
- rc = carm_send_msg(host, crq, rq->tag);
- if (rc) {
- host->hw_sg_used -= n_elem;
- goto out_resource;
- }
-
- spin_unlock_irq(&host->lock);
- return BLK_STS_OK;
-out_resource:
- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq));
- carm_push_q(host, q);
- spin_unlock_irq(&host->lock);
- return BLK_STS_DEV_RESOURCE;
-out_ioerr:
- carm_round_robin(host);
- spin_unlock_irq(&host->lock);
- return BLK_STS_IOERR;
-}
-
-static void carm_handle_array_info(struct carm_host *host,
- struct carm_request *crq, u8 *mem,
- blk_status_t error)
-{
- struct carm_port *port;
- u8 *msg_data = mem + sizeof(struct carm_array_info);
- struct carm_array_info *desc = (struct carm_array_info *) msg_data;
- u64 lo, hi;
- int cur_port;
- size_t slen;
-
- DPRINTK("ENTER\n");
-
- if (error)
- goto out;
- if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
- goto out;
-
- cur_port = host->cur_scan_dev;
-
- /* should never occur */
- if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) {
- printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n",
- cur_port, (int) desc->array_id);
- goto out;
- }
-
- port = &host->port[cur_port];
-
- lo = (u64) le32_to_cpu(desc->size);
- hi = (u64) le16_to_cpu(desc->size_hi);
-
- port->capacity = lo | (hi << 32);
- port->dev_geom_head = le16_to_cpu(desc->head);
- port->dev_geom_sect = le16_to_cpu(desc->sect);
- port->dev_geom_cyl = le16_to_cpu(desc->cyl);
-
- host->dev_active |= (1 << cur_port);
-
- strncpy(port->name, desc->name, sizeof(port->name));
- port->name[sizeof(port->name) - 1] = 0;
- slen = strlen(port->name);
- while (slen && (port->name[slen - 1] == ' ')) {
- port->name[slen - 1] = 0;
- slen--;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n",
- pci_name(host->pdev), port->port_no,
- (unsigned long long) port->capacity);
- printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n",
- pci_name(host->pdev), port->port_no, port->name);
-
-out:
- assert(host->state == HST_DEV_SCAN);
- schedule_work(&host->fsm_task);
-}
-
-static void carm_handle_scan_chan(struct carm_host *host,
- struct carm_request *crq, u8 *mem,
- blk_status_t error)
-{
- u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
- unsigned int i, dev_count = 0;
- int new_state = HST_DEV_SCAN_START;
-
- DPRINTK("ENTER\n");
-
- if (error) {
- new_state = HST_ERROR;
- goto out;
- }
-
- /* TODO: scan and support non-disk devices */
- for (i = 0; i < 8; i++)
- if (msg_data[i] == 0) { /* direct-access device (disk) */
- host->dev_present |= (1 << i);
- dev_count++;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n",
- pci_name(host->pdev), dev_count);
-
-out:
- assert(host->state == HST_PORT_SCAN);
- host->state = new_state;
- schedule_work(&host->fsm_task);
-}
-
-static void carm_handle_generic(struct carm_host *host,
- struct carm_request *crq, blk_status_t error,
- int cur_state, int next_state)
-{
- DPRINTK("ENTER\n");
-
- assert(host->state == cur_state);
- if (error)
- host->state = HST_ERROR;
- else
- host->state = next_state;
- schedule_work(&host->fsm_task);
-}
-
-static inline void carm_handle_resp(struct carm_host *host,
- __le32 ret_handle_le, u32 status)
-{
- u32 handle = le32_to_cpu(ret_handle_le);
- unsigned int msg_idx;
- struct request *rq;
- struct carm_request *crq;
- blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
- u8 *mem;
-
- VPRINTK("ENTER, handle == 0x%x\n", handle);
-
- if (unlikely(!TAG_VALID(handle))) {
- printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n",
- pci_name(host->pdev), handle);
- return;
- }
-
- msg_idx = TAG_DECODE(handle);
- VPRINTK("tag == %u\n", msg_idx);
-
- rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx);
- crq = blk_mq_rq_to_pdu(rq);
-
- /* fast path */
- if (likely(crq->msg_type == CARM_MSG_READ ||
- crq->msg_type == CARM_MSG_WRITE)) {
- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem,
- carm_rq_dir(rq));
- goto done;
- }
-
- mem = carm_ref_msg(host, msg_idx);
-
- switch (crq->msg_type) {
- case CARM_MSG_IOCTL: {
- switch (crq->msg_subtype) {
- case CARM_IOC_SCAN_CHAN:
- carm_handle_scan_chan(host, crq, mem, error);
- goto done;
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- case CARM_MSG_MISC: {
- switch (crq->msg_subtype) {
- case MISC_ALLOC_MEM:
- carm_handle_generic(host, crq, error,
- HST_ALLOC_BUF, HST_SYNC_TIME);
- goto done;
- case MISC_SET_TIME:
- carm_handle_generic(host, crq, error,
- HST_SYNC_TIME, HST_GET_FW_VER);
- goto done;
- case MISC_GET_FW_VER: {
- struct carm_fw_ver *ver = (struct carm_fw_ver *)
- (mem + sizeof(struct carm_msg_get_fw_ver));
- if (!error) {
- host->fw_ver = le32_to_cpu(ver->version);
- host->flags |= (ver->features & FL_FW_VER_MASK);
- }
- carm_handle_generic(host, crq, error,
- HST_GET_FW_VER, HST_PORT_SCAN);
- goto done;
- }
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- case CARM_MSG_ARRAY: {
- switch (crq->msg_subtype) {
- case CARM_ARRAY_INFO:
- carm_handle_array_info(host, crq, mem, error);
- break;
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- default:
- /* unknown / invalid response */
- goto err_out;
- }
-
- return;
-
-err_out:
- printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
- pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
- error = BLK_STS_IOERR;
-done:
- host->hw_sg_used -= crq->n_elem;
- blk_mq_end_request(blk_mq_rq_from_pdu(crq), error);
-
- if (host->hw_sg_used <= CARM_SG_LOW_WATER)
- carm_round_robin(host);
-}
-
-static inline void carm_handle_responses(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- struct carm_response *resp = (struct carm_response *) host->shm;
- unsigned int work = 0;
- unsigned int idx = host->resp_idx % RMSG_Q_LEN;
-
- while (1) {
- u32 status = le32_to_cpu(resp[idx].status);
-
- if (status == 0xffffffff) {
- VPRINTK("ending response on index %u\n", idx);
- writel(idx << 3, mmio + CARM_RESP_IDX);
- break;
- }
-
- /* response to a message we sent */
- else if ((status & (1 << 31)) == 0) {
- VPRINTK("handling msg response on index %u\n", idx);
- carm_handle_resp(host, resp[idx].ret_handle, status);
- resp[idx].status = cpu_to_le32(0xffffffff);
- }
-
- /* asynchronous events the hardware throws our way */
- else if ((status & 0xff000000) == (1 << 31)) {
- u8 *evt_type_ptr = (u8 *) &resp[idx];
- u8 evt_type = *evt_type_ptr;
- printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
- pci_name(host->pdev), (int) evt_type);
- resp[idx].status = cpu_to_le32(0xffffffff);
- }
-
- idx = NEXT_RESP(idx);
- work++;
- }
-
- VPRINTK("EXIT, work==%u\n", work);
- host->resp_idx += work;
-}
-
-static irqreturn_t carm_interrupt(int irq, void *__host)
-{
- struct carm_host *host = __host;
- void __iomem *mmio;
- u32 mask;
- int handled = 0;
- unsigned long flags;
-
- if (!host) {
- VPRINTK("no host\n");
- return IRQ_NONE;
- }
-
- spin_lock_irqsave(&host->lock, flags);
-
- mmio = host->mmio;
-
- /* reading should also clear interrupts */
- mask = readl(mmio + CARM_INT_STAT);
-
- if (mask == 0 || mask == 0xffffffff) {
- VPRINTK("no work, mask == 0x%x\n", mask);
- goto out;
- }
-
- if (mask & INT_ACK_MASK)
- writel(mask, mmio + CARM_INT_STAT);
-
- if (unlikely(host->state == HST_INVALID)) {
- VPRINTK("not initialized yet, mask = 0x%x\n", mask);
- goto out;
- }
-
- if (mask & CARM_HAVE_RESP) {
- handled = 1;
- carm_handle_responses(host);
- }
-
-out:
- spin_unlock_irqrestore(&host->lock, flags);
- VPRINTK("EXIT\n");
- return IRQ_RETVAL(handled);
-}
-
-static void carm_fsm_task (struct work_struct *work)
-{
- struct carm_host *host =
- container_of(work, struct carm_host, fsm_task);
- unsigned long flags;
- unsigned int state;
- int rc, i, next_dev;
- int reschedule = 0;
- int new_state = HST_INVALID;
-
- spin_lock_irqsave(&host->lock, flags);
- state = host->state;
- spin_unlock_irqrestore(&host->lock, flags);
-
- DPRINTK("ENTER, state == %s\n", state_name[state]);
-
- switch (state) {
- case HST_PROBE_START:
- new_state = HST_ALLOC_BUF;
- reschedule = 1;
- break;
-
- case HST_ALLOC_BUF:
- rc = carm_send_special(host, carm_fill_alloc_buf);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_SYNC_TIME:
- rc = carm_send_special(host, carm_fill_sync_time);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_GET_FW_VER:
- rc = carm_send_special(host, carm_fill_get_fw_ver);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_PORT_SCAN:
- rc = carm_send_special(host, carm_fill_scan_channels);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_DEV_SCAN_START:
- host->cur_scan_dev = -1;
- new_state = HST_DEV_SCAN;
- reschedule = 1;
- break;
-
- case HST_DEV_SCAN:
- next_dev = -1;
- for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++)
- if (host->dev_present & (1 << i)) {
- next_dev = i;
- break;
- }
-
- if (next_dev >= 0) {
- host->cur_scan_dev = next_dev;
- rc = carm_array_info(host, next_dev);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- } else {
- new_state = HST_DEV_ACTIVATE;
- reschedule = 1;
- }
- break;
-
- case HST_DEV_ACTIVATE: {
- int activated = 0;
- for (i = 0; i < CARM_MAX_PORTS; i++)
- if (host->dev_active & (1 << i)) {
- struct carm_port *port = &host->port[i];
- struct gendisk *disk = port->disk;
-
- set_capacity(disk, port->capacity);
- host->probe_err = add_disk(disk);
- if (!host->probe_err)
- activated++;
- else
- break;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
- pci_name(host->pdev), activated);
-
- new_state = HST_PROBE_FINISHED;
- reschedule = 1;
- break;
- }
- case HST_PROBE_FINISHED:
- complete(&host->probe_comp);
- break;
- case HST_ERROR:
- /* FIXME: TODO */
- break;
-
- default:
- /* should never occur */
- printk(KERN_ERR PFX "BUG: unknown state %d\n", state);
- assert(0);
- break;
- }
-
- if (new_state != HST_INVALID) {
- spin_lock_irqsave(&host->lock, flags);
- host->state = new_state;
- spin_unlock_irqrestore(&host->lock, flags);
- }
- if (reschedule)
- schedule_work(&host->fsm_task);
-}
-
-static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit)
-{
- unsigned int i;
-
- for (i = 0; i < 50000; i++) {
- u32 tmp = readl(mmio + CARM_LMUC);
- udelay(100);
-
- if (test_bit) {
- if ((tmp & bits) == bits)
- return 0;
- } else {
- if ((tmp & bits) == 0)
- return 0;
- }
-
- cond_resched();
- }
-
- printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n",
- bits, test_bit ? "yes" : "no");
- return -EBUSY;
-}
-
-static void carm_init_responses(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- unsigned int i;
- struct carm_response *resp = (struct carm_response *) host->shm;
-
- for (i = 0; i < RMSG_Q_LEN; i++)
- resp[i].status = cpu_to_le32(0xffffffff);
-
- writel(0, mmio + CARM_RESP_IDX);
-}
-
-static int carm_init_host(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- u32 tmp;
- u8 tmp8;
- int rc;
-
- DPRINTK("ENTER\n");
-
- writel(0, mmio + CARM_INT_MASK);
-
- tmp8 = readb(mmio + CARM_INITC);
- if (tmp8 & 0x01) {
- tmp8 &= ~0x01;
- writeb(tmp8, mmio + CARM_INITC);
- readb(mmio + CARM_INITC); /* flush */
-
- DPRINTK("snooze...\n");
- msleep(5000);
- }
-
- tmp = readl(mmio + CARM_HMUC);
- if (tmp & CARM_CME) {
- DPRINTK("CME bit present, waiting\n");
- rc = carm_init_wait(mmio, CARM_CME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 1 failed\n");
- return rc;
- }
- }
- if (tmp & CARM_RME) {
- DPRINTK("RME bit present, waiting\n");
- rc = carm_init_wait(mmio, CARM_RME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 2 failed\n");
- return rc;
- }
- }
-
- tmp &= ~(CARM_RME | CARM_CME);
- writel(tmp, mmio + CARM_HMUC);
- readl(mmio + CARM_HMUC); /* flush */
-
- rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 3 failed\n");
- return rc;
- }
-
- carm_init_buckets(mmio);
-
- writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO);
- writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI);
- writel(RBUF_LEN, mmio + RBUF_BYTE_SZ);
-
- tmp = readl(mmio + CARM_HMUC);
- tmp |= (CARM_RME | CARM_CME | CARM_WZBC);
- writel(tmp, mmio + CARM_HMUC);
- readl(mmio + CARM_HMUC); /* flush */
-
- rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 4 failed\n");
- return rc;
- }
-
- writel(0, mmio + CARM_HMPHA);
- writel(INT_DEF_MASK, mmio + CARM_INT_MASK);
-
- carm_init_responses(host);
-
- /* start initialization, probing state machine */
- spin_lock_irq(&host->lock);
- assert(host->state == HST_INVALID);
- host->state = HST_PROBE_START;
- spin_unlock_irq(&host->lock);
- schedule_work(&host->fsm_task);
-
- DPRINTK("EXIT\n");
- return 0;
-}
-
-static const struct blk_mq_ops carm_mq_ops = {
- .queue_rq = carm_queue_rq,
-};
-
-static int carm_init_disk(struct carm_host *host, unsigned int port_no)
-{
- struct carm_port *port = &host->port[port_no];
- struct gendisk *disk;
-
- port->host = host;
- port->port_no = port_no;
-
- disk = blk_mq_alloc_disk(&host->tag_set, port);
- if (IS_ERR(disk))
- return PTR_ERR(disk);
-
- port->disk = disk;
- sprintf(disk->disk_name, DRV_NAME "/%u",
- (unsigned int)host->id * CARM_MAX_PORTS + port_no);
- disk->major = host->major;
- disk->first_minor = port_no * CARM_MINORS_PER_MAJOR;
- disk->minors = CARM_MINORS_PER_MAJOR;
- disk->fops = &carm_bd_ops;
- disk->private_data = port;
-
- blk_queue_max_segments(disk->queue, CARM_MAX_REQ_SG);
- blk_queue_segment_boundary(disk->queue, CARM_SG_BOUNDARY);
- return 0;
-}
-
-static void carm_free_disk(struct carm_host *host, unsigned int port_no)
-{
- struct carm_port *port = &host->port[port_no];
- struct gendisk *disk = port->disk;
-
- if (!disk)
- return;
-
- if (host->state > HST_DEV_ACTIVATE)
- del_gendisk(disk);
- put_disk(disk);
-}
-
-static int carm_init_shm(struct carm_host *host)
-{
- host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE,
- &host->shm_dma, GFP_KERNEL);
- if (!host->shm)
- return -ENOMEM;
-
- host->msg_base = host->shm + RBUF_LEN;
- host->msg_dma = host->shm_dma + RBUF_LEN;
-
- memset(host->shm, 0xff, RBUF_LEN);
- memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN);
-
- return 0;
-}
-
-static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct carm_host *host;
- int rc;
- struct request_queue *q;
- unsigned int i;
-
- printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
-
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out;
-
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
- pci_name(pdev));
- goto err_out_regions;
- }
-
- host = kzalloc(sizeof(*host), GFP_KERNEL);
- if (!host) {
- rc = -ENOMEM;
- goto err_out_regions;
- }
-
- host->pdev = pdev;
- spin_lock_init(&host->lock);
- INIT_WORK(&host->fsm_task, carm_fsm_task);
- init_completion(&host->probe_comp);
-
- host->mmio = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!host->mmio) {
- printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n",
- pci_name(pdev));
- rc = -ENOMEM;
- goto err_out_kfree;
- }
-
- rc = carm_init_shm(host);
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n",
- pci_name(pdev));
- goto err_out_iounmap;
- }
-
- memset(&host->tag_set, 0, sizeof(host->tag_set));
- host->tag_set.ops = &carm_mq_ops;
- host->tag_set.cmd_size = sizeof(struct carm_request);
- host->tag_set.nr_hw_queues = 1;
- host->tag_set.nr_maps = 1;
- host->tag_set.queue_depth = max_queue;
- host->tag_set.numa_node = NUMA_NO_NODE;
- host->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
-
- rc = blk_mq_alloc_tag_set(&host->tag_set);
- if (rc)
- goto err_out_dma_free;
-
- q = blk_mq_init_queue(&host->tag_set);
- if (IS_ERR(q)) {
- rc = PTR_ERR(q);
- blk_mq_free_tag_set(&host->tag_set);
- goto err_out_dma_free;
- }
-
- host->oob_q = q;
- q->queuedata = host;
-
- /*
- * Figure out which major to use: 160, 161, or dynamic
- */
- if (!test_and_set_bit(0, &carm_major_alloc))
- host->major = 160;
- else if (!test_and_set_bit(1, &carm_major_alloc))
- host->major = 161;
- else
- host->flags |= FL_DYN_MAJOR;
-
- host->id = carm_host_id;
- sprintf(host->name, DRV_NAME "%d", carm_host_id);
-
- rc = register_blkdev(host->major, host->name);
- if (rc < 0)
- goto err_out_free_majors;
- if (host->flags & FL_DYN_MAJOR)
- host->major = rc;
-
- for (i = 0; i < CARM_MAX_PORTS; i++) {
- rc = carm_init_disk(host, i);
- if (rc)
- goto err_out_blkdev_disks;
- }
-
- pci_set_master(pdev);
-
- rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host);
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
- pci_name(pdev));
- goto err_out_blkdev_disks;
- }
-
- rc = carm_init_host(host);
- if (rc)
- goto err_out_free_irq;
-
- DPRINTK("waiting for probe_comp\n");
- host->probe_err = -ENODEV;
- wait_for_completion(&host->probe_comp);
- if (host->probe_err) {
- rc = host->probe_err;
- goto err_out_free_irq;
- }
-
- printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
- host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
- (unsigned long long)pci_resource_start(pdev, 0),
- pdev->irq, host->major);
-
- carm_host_id++;
- pci_set_drvdata(pdev, host);
- return 0;
-
-err_out_free_irq:
- free_irq(pdev->irq, host);
-err_out_blkdev_disks:
- for (i = 0; i < CARM_MAX_PORTS; i++)
- carm_free_disk(host, i);
- unregister_blkdev(host->major, host->name);
-err_out_free_majors:
- if (host->major == 160)
- clear_bit(0, &carm_major_alloc);
- else if (host->major == 161)
- clear_bit(1, &carm_major_alloc);
- blk_mq_destroy_queue(host->oob_q);
- blk_mq_free_tag_set(&host->tag_set);
-err_out_dma_free:
- dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
-err_out_iounmap:
- iounmap(host->mmio);
-err_out_kfree:
- kfree(host);
-err_out_regions:
- pci_release_regions(pdev);
-err_out:
- pci_disable_device(pdev);
- return rc;
-}
-
-static void carm_remove_one (struct pci_dev *pdev)
-{
- struct carm_host *host = pci_get_drvdata(pdev);
- unsigned int i;
-
- if (!host) {
- printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
- pci_name(pdev));
- return;
- }
-
- free_irq(pdev->irq, host);
- for (i = 0; i < CARM_MAX_PORTS; i++)
- carm_free_disk(host, i);
- unregister_blkdev(host->major, host->name);
- if (host->major == 160)
- clear_bit(0, &carm_major_alloc);
- else if (host->major == 161)
- clear_bit(1, &carm_major_alloc);
- blk_mq_destroy_queue(host->oob_q);
- blk_mq_free_tag_set(&host->tag_set);
- dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
- iounmap(host->mmio);
- kfree(host);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-
-module_pci_driver(carm_driver);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 3f1906965ac8..6a4a94b4cdf4 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -47,7 +47,12 @@
#define UBLK_MINORS (1U << MINORBITS)
/* All UBLK_F_* have to be included into UBLK_F_ALL */
-#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_URING_CMD_COMP_IN_TASK)
+#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
+ | UBLK_F_URING_CMD_COMP_IN_TASK \
+ | UBLK_F_NEED_GET_DATA)
+
+/* All UBLK_PARAM_TYPE_* should be included here */
+#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
struct ublk_rq_data {
struct callback_head work;
@@ -86,6 +91,15 @@ struct ublk_uring_cmd_pdu {
*/
#define UBLK_IO_FLAG_ABORTED 0x04
+/*
+ * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
+ * get data buffer address from ublksrv.
+ *
+ * Then, bio data could be copied into this data buffer for a WRITE request
+ * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
+ */
+#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+
struct ublk_io {
/* userspace buffer address from io cmd */
__u64 addr;
@@ -119,7 +133,6 @@ struct ublk_device {
char *__queues;
unsigned short queue_size;
- unsigned short bs_shift;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
@@ -137,6 +150,8 @@ struct ublk_device {
spinlock_t mm_lock;
struct mm_struct *mm;
+ struct ublk_params params;
+
struct completion completion;
unsigned int nr_queues_ready;
atomic_t nr_aborted_queues;
@@ -149,6 +164,12 @@ struct ublk_device {
struct work_struct stop_work;
};
+/* header of ublk_params */
+struct ublk_params_header {
+ __u32 len;
+ __u32 types;
+};
+
static dev_t ublk_chr_devt;
static struct class *ublk_chr_class;
@@ -160,6 +181,90 @@ static DEFINE_MUTEX(ublk_ctl_mutex);
static struct miscdevice ublk_misc;
+static void ublk_dev_param_basic_apply(struct ublk_device *ub)
+{
+ struct request_queue *q = ub->ub_disk->queue;
+ const struct ublk_param_basic *p = &ub->params.basic;
+
+ blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
+ blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
+ blk_queue_io_min(q, 1 << p->io_min_shift);
+ blk_queue_io_opt(q, 1 << p->io_opt_shift);
+
+ blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
+ p->attrs & UBLK_ATTR_FUA);
+ if (p->attrs & UBLK_ATTR_ROTATIONAL)
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ else
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+
+ blk_queue_max_hw_sectors(q, p->max_sectors);
+ blk_queue_chunk_sectors(q, p->chunk_sectors);
+ blk_queue_virt_boundary(q, p->virt_boundary_mask);
+
+ if (p->attrs & UBLK_ATTR_READ_ONLY)
+ set_disk_ro(ub->ub_disk, true);
+
+ set_capacity(ub->ub_disk, p->dev_sectors);
+}
+
+static void ublk_dev_param_discard_apply(struct ublk_device *ub)
+{
+ struct request_queue *q = ub->ub_disk->queue;
+ const struct ublk_param_discard *p = &ub->params.discard;
+
+ q->limits.discard_alignment = p->discard_alignment;
+ q->limits.discard_granularity = p->discard_granularity;
+ blk_queue_max_discard_sectors(q, p->max_discard_sectors);
+ blk_queue_max_write_zeroes_sectors(q,
+ p->max_write_zeroes_sectors);
+ blk_queue_max_discard_segments(q, p->max_discard_segments);
+}
+
+static int ublk_validate_params(const struct ublk_device *ub)
+{
+ /* basic param is the only one which must be set */
+ if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
+ const struct ublk_param_basic *p = &ub->params.basic;
+
+ if (p->logical_bs_shift > PAGE_SHIFT)
+ return -EINVAL;
+
+ if (p->logical_bs_shift > p->physical_bs_shift)
+ return -EINVAL;
+
+ if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
+ const struct ublk_param_discard *p = &ub->params.discard;
+
+ /* So far, only support single segment discard */
+ if (p->max_discard_sectors && p->max_discard_segments != 1)
+ return -EINVAL;
+
+ if (!p->discard_granularity)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ublk_apply_params(struct ublk_device *ub)
+{
+ if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
+ return -EINVAL;
+
+ ublk_dev_param_basic_apply(ub);
+
+ if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
+ ublk_dev_param_discard_apply(ub);
+
+ return 0;
+}
+
static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
{
if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
@@ -168,6 +273,13 @@ static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
return false;
}
+static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
+{
+ if (ubq->flags & UBLK_F_NEED_GET_DATA)
+ return true;
+ return false;
+}
+
static struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
@@ -443,7 +555,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
}
-static bool ubq_daemon_is_dying(struct ublk_queue *ubq)
+static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
{
return ubq->ubq_daemon->flags & PF_EXITING;
}
@@ -493,8 +605,9 @@ static void ublk_complete_rq(struct request *req)
}
/*
- * __ublk_fail_req() may be called from abort context or ->ubq_daemon
- * context during exiting, so lock is required.
+ * Since __ublk_rq_task_work always fails requests immediately during
+ * exiting, __ublk_fail_req() is only called from abort context during
+ * exiting. So lock is unnecessary.
*
* Also aborting may not be started yet, keep in mind that one failed
* request may be issued by block layer again.
@@ -509,6 +622,21 @@ static void __ublk_fail_req(struct ublk_io *io, struct request *req)
}
}
+static void ubq_complete_io_cmd(struct ublk_io *io, int res)
+{
+ /* mark this cmd owned by ublksrv */
+ io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
+
+ /*
+ * clear ACTIVE since we are done with this sqe/cmd slot
+ * We can only accept io cmd in case of being not active.
+ */
+ io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+
+ /* tell ublksrv one io request is coming */
+ io_uring_cmd_done(io->cmd, res, 0);
+}
+
#define UBLK_REQUEUE_DELAY_MS 3
static inline void __ublk_rq_task_work(struct request *req)
@@ -517,8 +645,7 @@ static inline void __ublk_rq_task_work(struct request *req)
struct ublk_device *ub = ubq->dev;
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
- bool task_exiting = current != ubq->ubq_daemon ||
- (current->flags & PF_EXITING);
+ bool task_exiting = current != ubq->ubq_daemon || ubq_daemon_is_dying(ubq);
unsigned int mapped_bytes;
pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
@@ -531,6 +658,35 @@ static inline void __ublk_rq_task_work(struct request *req)
return;
}
+ if (ublk_need_get_data(ubq) &&
+ (req_op(req) == REQ_OP_WRITE ||
+ req_op(req) == REQ_OP_FLUSH)) {
+ /*
+ * We have not handled UBLK_IO_NEED_GET_DATA command yet,
+ * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
+ * and notify it.
+ */
+ if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
+ io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
+ pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
+ __func__, io->cmd->cmd_op, ubq->q_id,
+ req->tag, io->flags);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
+ return;
+ }
+ /*
+ * We have handled UBLK_IO_NEED_GET_DATA command,
+ * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
+ * do the copy work.
+ */
+ io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+ ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
+ __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+ }
+
mapped_bytes = ublk_map_io(ubq, req, io);
/* partially mapped, update io descriptor */
@@ -553,17 +709,7 @@ static inline void __ublk_rq_task_work(struct request *req)
mapped_bytes >> 9;
}
- /* mark this cmd owned by ublksrv */
- io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
-
- /*
- * clear ACTIVE since we are done with this sqe/cmd slot
- * We can only accept io cmd in case of being not active.
- */
- io->flags &= ~UBLK_IO_FLAG_ACTIVE;
-
- /* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_OK, 0);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
}
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
@@ -610,9 +756,25 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
goto fail;
} else {
- struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_io *io = &ubq->ios[rq->tag];
+ struct io_uring_cmd *cmd = io->cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ /*
+ * If the check pass, we know that this is a re-issued request aborted
+ * previously in monitor_work because the ubq_daemon(cmd's task) is
+ * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
+ * because this ioucmd's io_uring context may be freed now if no inflight
+ * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
+ *
+ * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
+ * the tag). Then the request is re-started(allocating the tag) and we are here.
+ * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
+ * guarantees that here is a re-issued request aborted previously.
+ */
+ if ((io->flags & UBLK_IO_FLAG_ABORTED))
+ goto fail;
+
pdu->req = rq;
io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
}
@@ -788,16 +950,27 @@ static void ublk_daemon_monitor_work(struct work_struct *work)
UBLK_DAEMON_MONITOR_PERIOD);
}
+static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+{
+ return ubq->nr_io_ready == ubq->q_depth;
+}
+
static void ublk_cancel_queue(struct ublk_queue *ubq)
{
int i;
+ if (!ublk_queue_ready(ubq))
+ return;
+
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE)
io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
}
+
+ /* all io commands are canceled */
+ ubq->nr_io_ready = 0;
}
/* Cancel all pending commands, must be called after del_gendisk() returns */
@@ -818,19 +991,14 @@ static void ublk_stop_dev(struct ublk_device *ub)
del_gendisk(ub->ub_disk);
ub->dev_info.state = UBLK_S_DEV_DEAD;
ub->dev_info.ublksrv_pid = -1;
- ublk_cancel_dev(ub);
put_disk(ub->ub_disk);
ub->ub_disk = NULL;
unlock:
+ ublk_cancel_dev(ub);
mutex_unlock(&ub->mutex);
cancel_delayed_work_sync(&ub->monitor_work);
}
-static inline bool ublk_queue_ready(struct ublk_queue *ubq)
-{
- return ubq->nr_io_ready == ubq->q_depth;
-}
-
/* device can only be started after all IOs are ready */
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
{
@@ -846,6 +1014,25 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
mutex_unlock(&ub->mutex);
}
+static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
+ int tag, struct io_uring_cmd *cmd)
+{
+ struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
+
+ if (ublk_can_use_task_work(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ /* should not fail since we call it just in ubq->ubq_daemon */
+ task_work_add(ubq->ubq_daemon, &data->work, TWA_SIGNAL_NO_IPI);
+ } else {
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ pdu->req = req;
+ io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
+ }
+}
+
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
@@ -884,6 +1071,14 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
goto out;
}
+ /*
+ * ensure that the user issues UBLK_IO_NEED_GET_DATA
+ * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
+ */
+ if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
+ ^ (cmd_op == UBLK_IO_NEED_GET_DATA))
+ goto out;
+
switch (cmd_op) {
case UBLK_IO_FETCH_REQ:
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
@@ -917,6 +1112,14 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
io->cmd = cmd;
ublk_commit_completion(ub, ub_cmd);
break;
+ case UBLK_IO_NEED_GET_DATA:
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ goto out;
+ io->addr = ub_cmd->addr;
+ io->cmd = cmd;
+ io->flags |= UBLK_IO_FLAG_ACTIVE;
+ ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag, cmd);
+ break;
default:
goto out;
}
@@ -1083,13 +1286,13 @@ static void ublk_stop_work_fn(struct work_struct *work)
ublk_stop_dev(ub);
}
-/* align maximum I/O size to PAGE_SIZE */
+/* align max io buffer size with PAGE_SIZE */
static void ublk_align_max_io_size(struct ublk_device *ub)
{
- unsigned int max_rq_bytes = ub->dev_info.rq_max_blocks << ub->bs_shift;
+ unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
- ub->dev_info.rq_max_blocks =
- round_down(max_rq_bytes, PAGE_SIZE) >> ub->bs_shift;
+ ub->dev_info.max_io_buf_bytes =
+ round_down(max_io_bytes, PAGE_SIZE);
}
static int ublk_add_tag_set(struct ublk_device *ub)
@@ -1132,7 +1335,6 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
int ublksrv_pid = (int)header->data[0];
- unsigned long dev_blocks = header->data[1];
struct ublk_device *ub;
struct gendisk *disk;
int ret = -EINVAL;
@@ -1155,10 +1357,6 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
goto out_unlock;
}
- /* We may get disk size updated */
- if (dev_blocks)
- ub->dev_info.dev_blocks = dev_blocks;
-
disk = blk_mq_alloc_disk(&ub->tag_set, ub);
if (IS_ERR(disk)) {
ret = PTR_ERR(disk);
@@ -1168,27 +1366,28 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
disk->fops = &ub_fops;
disk->private_data = ub;
- blk_queue_logical_block_size(disk->queue, ub->dev_info.block_size);
- blk_queue_physical_block_size(disk->queue, ub->dev_info.block_size);
- blk_queue_io_min(disk->queue, ub->dev_info.block_size);
- blk_queue_max_hw_sectors(disk->queue,
- ub->dev_info.rq_max_blocks << (ub->bs_shift - 9));
- disk->queue->limits.discard_granularity = PAGE_SIZE;
- blk_queue_max_discard_sectors(disk->queue, UINT_MAX >> 9);
- blk_queue_max_write_zeroes_sectors(disk->queue, UINT_MAX >> 9);
-
- set_capacity(disk, ub->dev_info.dev_blocks << (ub->bs_shift - 9));
-
ub->dev_info.ublksrv_pid = ublksrv_pid;
ub->ub_disk = disk;
+
+ ret = ublk_apply_params(ub);
+ if (ret)
+ goto out_put_disk;
+
get_device(&ub->cdev_dev);
ret = add_disk(disk);
if (ret) {
- put_disk(disk);
- goto out_unlock;
+ /*
+ * Has to drop the reference since ->free_disk won't be
+ * called in case of add_disk failure.
+ */
+ ublk_put_device(ub);
+ goto out_put_disk;
}
set_bit(UB_STATE_USED, &ub->state);
ub->dev_info.state = UBLK_S_DEV_LIVE;
+out_put_disk:
+ if (ret)
+ put_disk(disk);
out_unlock:
mutex_unlock(&ub->mutex);
ublk_put_device(ub);
@@ -1250,9 +1449,8 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
{
pr_devel("%s: dev id %d flags %llx\n", __func__,
info->dev_id, info->flags);
- pr_devel("\t nr_hw_queues %d queue_depth %d block size %d dev_capacity %lld\n",
- info->nr_hw_queues, info->queue_depth,
- info->block_size, info->dev_blocks);
+ pr_devel("\t nr_hw_queues %d queue_depth %d\n",
+ info->nr_hw_queues, info->queue_depth);
}
static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
@@ -1312,7 +1510,6 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
/* We are not ready to support zero copy */
ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
- ub->bs_shift = ilog2(ub->dev_info.block_size);
ub->dev_info.nr_hw_queues = min_t(unsigned int,
ub->dev_info.nr_hw_queues, nr_cpu_ids);
ublk_align_max_io_size(ub);
@@ -1436,6 +1633,82 @@ static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
return ret;
}
+static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublk_params_header ph;
+ struct ublk_device *ub;
+ int ret;
+
+ if (header->len <= sizeof(ph) || !header->addr)
+ return -EINVAL;
+
+ if (copy_from_user(&ph, argp, sizeof(ph)))
+ return -EFAULT;
+
+ if (ph.len > header->len || !ph.len)
+ return -EINVAL;
+
+ if (ph.len > sizeof(struct ublk_params))
+ ph.len = sizeof(struct ublk_params);
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ mutex_lock(&ub->mutex);
+ if (copy_to_user(argp, &ub->params, ph.len))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ mutex_unlock(&ub->mutex);
+
+ ublk_put_device(ub);
+ return ret;
+}
+
+static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublk_params_header ph;
+ struct ublk_device *ub;
+ int ret = -EFAULT;
+
+ if (header->len <= sizeof(ph) || !header->addr)
+ return -EINVAL;
+
+ if (copy_from_user(&ph, argp, sizeof(ph)))
+ return -EFAULT;
+
+ if (ph.len > header->len || !ph.len || !ph.types)
+ return -EINVAL;
+
+ if (ph.len > sizeof(struct ublk_params))
+ ph.len = sizeof(struct ublk_params);
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ /* parameters can only be changed when device isn't live */
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
+ ret = -EACCES;
+ } else if (copy_from_user(&ub->params, argp, ph.len)) {
+ ret = -EFAULT;
+ } else {
+ /* clear all we don't support yet */
+ ub->params.types &= UBLK_PARAM_TYPE_ALL;
+ ret = ublk_validate_params(ub);
+ }
+ mutex_unlock(&ub->mutex);
+ ublk_put_device(ub);
+
+ return ret;
+}
+
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
@@ -1471,6 +1744,12 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_CMD_GET_QUEUE_AFFINITY:
ret = ublk_ctrl_get_queue_affinity(cmd);
break;
+ case UBLK_CMD_GET_PARAMS:
+ ret = ublk_ctrl_get_params(cmd);
+ break;
+ case UBLK_CMD_SET_PARAMS:
+ ret = ublk_ctrl_set_params(cmd);
+ break;
default:
break;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index d7d72e8f6e55..30255fcaf181 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -101,6 +101,14 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
}
}
+static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
+
+ return vq;
+}
+
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
{
struct scatterlist hdr, status, *sgs[3];
@@ -416,7 +424,7 @@ static void virtio_queue_rqs(struct request **rqlist)
struct request *requeue_list = NULL;
rq_list_for_each_safe(rqlist, req, next) {
- struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+ struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
bool kick;
if (!virtblk_prep_rq_batch(req)) {
@@ -837,7 +845,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
- struct virtio_blk_vq *vq = hctx->driver_data;
+ struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
struct virtblk_req *vbr;
unsigned long flags;
unsigned int len;
@@ -862,22 +870,10 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
return found;
}
-static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
-{
- struct virtio_blk *vblk = data;
- struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
-
- WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
- hctx->driver_data = vq;
- return 0;
-}
-
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
- .init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
.map_queues = virtblk_map_queues,
.poll = virtblk_poll,
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 97de13b14175..ee7ad2fb432d 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -157,6 +157,11 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
return 0;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature");
+
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
struct xen_blkif *blkif;
@@ -472,12 +477,6 @@ static void xen_vbd_free(struct xen_vbd *vbd)
vbd->bdev = NULL;
}
-/* Enable the persistent grants feature. */
-static bool feature_persistent = true;
-module_param(feature_persistent, bool, 0644);
-MODULE_PARM_DESC(feature_persistent,
- "Enables the persistent grants feature");
-
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
unsigned major, unsigned minor, int readonly,
int cdrom)
@@ -520,8 +519,6 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (bdev_max_secure_erase_sectors(bdev))
vbd->discard_secure = true;
- vbd->feature_gnt_persistent = feature_persistent;
-
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
@@ -1087,10 +1084,9 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- if (blkif->vbd.feature_gnt_persistent)
- blkif->vbd.feature_gnt_persistent =
- xenbus_read_unsigned(dev->otherend,
- "feature-persistent", 0);
+
+ blkif->vbd.feature_gnt_persistent = feature_persistent &&
+ xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index dc48298225a6..8e56e69fb4c4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1988,8 +1988,6 @@ static int blkfront_probe(struct xenbus_device *dev,
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
- info->feature_persistent = feature_persistent;
-
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
@@ -2283,7 +2281,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- if (info->feature_persistent)
+ if (feature_persistent)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 052aa3f65514..0916de952e09 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -63,12 +63,6 @@ static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
bool zcomp_available_algorithm(const char *comp)
{
- int i;
-
- i = sysfs_match_string(backends, comp);
- if (i >= 0)
- return true;
-
/*
* Crypto does not ignore a trailing new line symbol,
* so make sure you don't supply a string containing
@@ -217,6 +211,11 @@ struct zcomp *zcomp_create(const char *compress)
struct zcomp *comp;
int error;
+ /*
+ * Crypto API will execute /sbin/modprobe if the compression module
+ * is not loaded yet. We must do it here, otherwise we are about to
+ * call /sbin/modprobe under CPU hot-plug lock.
+ */
if (!zcomp_available_algorithm(compress))
return ERR_PTR(-EINVAL);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 4abeb261b833..226ea76cc819 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,7 +52,9 @@ static unsigned int num_devices = 1;
static size_t huge_class_size;
static const struct block_device_operations zram_devops;
+#ifdef CONFIG_ZRAM_WRITEBACK
static const struct block_device_operations zram_wb_devops;
+#endif
static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
@@ -1144,14 +1146,15 @@ static ssize_t bd_stat_show(struct device *dev,
static ssize_t debug_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int version = 2;
+ int version = 1;
struct zram *zram = dev_to_zram(dev);
ssize_t ret;
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
- "version: %d\n%8llu\n",
+ "version: %d\n%8llu %8llu\n",
version,
+ (u64)atomic64_read(&zram->stats.writestall),
(u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
@@ -1349,7 +1352,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
{
int ret = 0;
unsigned long alloced_pages;
- unsigned long handle = 0;
+ unsigned long handle = -ENOMEM;
unsigned int comp_len = 0;
void *src, *dst, *mem;
struct zcomp_strm *zstrm;
@@ -1367,6 +1370,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
}
kunmap_atomic(mem);
+compress_again:
zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
ret = zcomp_compress(zstrm, src, &comp_len);
@@ -1375,21 +1379,40 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (unlikely(ret)) {
zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
+ zs_free(zram->mem_pool, handle);
return ret;
}
if (comp_len >= huge_class_size)
comp_len = PAGE_SIZE;
-
- handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
-
- if (unlikely(!handle)) {
+ /*
+ * handle allocation has 2 paths:
+ * a) fast path is executed with preemption disabled (for
+ * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
+ * since we can't sleep;
+ * b) slow path enables preemption and attempts to allocate
+ * the page with __GFP_DIRECT_RECLAIM bit set. we have to
+ * put per-cpu compression stream and, thus, to re-do
+ * the compression once handle is allocated.
+ *
+ * if we have a 'non-null' handle here then we are coming
+ * from the slow path and handle has already been allocated.
+ */
+ if (IS_ERR((void *)handle))
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ __GFP_KSWAPD_RECLAIM |
+ __GFP_NOWARN |
+ __GFP_HIGHMEM |
+ __GFP_MOVABLE);
+ if (IS_ERR((void *)handle)) {
zcomp_stream_put(zram->comp);
- return -ENOMEM;
+ atomic64_inc(&zram->stats.writestall);
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
+ if (!IS_ERR((void *)handle))
+ goto compress_again;
+ return PTR_ERR((void *)handle);
}
alloced_pages = zs_get_total_pages(zram->mem_pool);
@@ -1946,6 +1969,7 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 158c91e54850..80c3b43b4828 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -81,6 +81,7 @@ struct zram_stats {
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
+ atomic64_t writestall; /* no. of write slow paths */
atomic64_t miss_free; /* no. of missed free */
#ifdef CONFIG_ZRAM_WRITEBACK
atomic64_t bd_count; /* no. of pages in backing device */
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fe7e2105e766..bf6716ff863b 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -20,7 +20,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
-#include <linux/intel-iommu.h>
+#include <linux/iommu.h>
#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
@@ -573,18 +573,15 @@ static void intel_gtt_cleanup(void)
*/
static inline int needs_ilk_vtd_wa(void)
{
-#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
+ /*
+ * Query iommu subsystem to see if we need the workaround. Presumably
+ * that was loaded first.
*/
- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
- gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
- intel_iommu_gfx_mapped)
- return 1;
-#endif
- return 0;
+ return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ device_iommu_mapped(&intel_private.pcidev->dev));
}
static bool intel_gtt_can_wc(void)
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index 8da1d7917bdc..429e956f34e1 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -23,7 +23,7 @@ static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
buf = (unsigned long *)data;
for (i = 0; i < len; i++)
- powernv_get_random_long(buf++);
+ pnv_get_random_long(buf++);
return len * sizeof(unsigned long);
}
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index 488808dc17a2..795853dfc46b 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -252,5 +252,5 @@ static void __exit trng_exit(void)
trng_debug_exit();
}
-module_cpu_feature_match(MSA, trng_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, trng_init);
module_exit(trng_exit);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 84ca98ed1dad..32a932a065a6 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -480,6 +480,11 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
+static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ return 0;
+}
+
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
{
size_t written = 0;
@@ -663,6 +668,7 @@ static const struct file_operations null_fops = {
.read_iter = read_iter_null,
.write_iter = write_iter_null,
.splice_write = splice_write_null,
+ .uring_cmd = uring_cmd_null,
};
static const struct file_operations __maybe_unused port_fops = {
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 4a5516406c22..927088b2c3d3 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -74,6 +74,18 @@ config TCG_TIS_SPI_CR50
If you have a H1 secure module running Cr50 firmware on SPI bus,
say Yes and it will be accessible from within Linux.
+config TCG_TIS_I2C
+ tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (I2C - generic)"
+ depends on I2C
+ select CRC_CCITT
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip, compliant with the TCG TPM PTP
+ (I2C interface) specification and connected to an I2C bus master,
+ say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_i2c.
+
config TCG_TIS_SYNQUACER
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
depends on ARCH_SYNQUACER || COMPILE_TEST
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 66d39ea6bd10..0222b1ddb310 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -29,6 +29,7 @@ tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
obj-$(CONFIG_TCG_TIS_I2C_CR50) += tpm_tis_i2c_cr50.o
+obj-$(CONFIG_TCG_TIS_I2C) += tpm_tis_i2c.o
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 2163c6ee0d36..24ee4e1cc452 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -55,6 +55,7 @@ enum tpm_addr {
#define TPM_WARN_DOING_SELFTEST 0x802
#define TPM_ERR_DEACTIVATED 0x6
#define TPM_ERR_DISABLED 0x7
+#define TPM_ERR_FAILEDSELFTEST 0x1C
#define TPM_ERR_INVALID_POSTINIT 38
#define TPM_TAG_RQU_COMMAND 193
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index f7dc986fa4a0..cf64c7385105 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -709,7 +709,12 @@ int tpm1_auto_startup(struct tpm_chip *chip)
if (rc)
goto out;
rc = tpm1_do_selftest(chip);
- if (rc) {
+ if (rc == TPM_ERR_FAILEDSELFTEST) {
+ dev_warn(&chip->dev, "TPM self test failed, switching to the firmware upgrade mode\n");
+ /* A TPM in this state possibly allows or needs a firmware upgrade */
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ return 0;
+ } else if (rc) {
dev_err(&chip->dev, "TPM self test failed\n");
goto out;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c1eb5d223839..65d03867e114 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -752,6 +752,12 @@ int tpm2_auto_startup(struct tpm_chip *chip)
}
rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc == TPM2_RC_FAILURE || (rc < 0 && rc != -ENOMEM)) {
+ dev_info(&chip->dev,
+ "TPM in field failure mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
out:
/*
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index dc56b976d816..757623bacfd5 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -289,6 +289,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
int size = 0;
int status;
u32 expected;
+ int rc;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -328,6 +329,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
+ rc = tpm_tis_verify_crc(priv, (size_t)size, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for response.\n");
+ size = rc;
+ goto out;
+ }
+
out:
tpm_tis_ready(chip);
return size;
@@ -443,6 +451,12 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
if (rc < 0)
return rc;
+ rc = tpm_tis_verify_crc(priv, len, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for command.\n");
+ return rc;
+ }
+
/* go and do it */
rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
if (rc < 0)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 6c203f36b8a1..66a5a13cd1df 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -121,6 +121,8 @@ struct tpm_tis_phy_ops {
u8 *result, enum tpm_tis_io_mode mode);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
const u8 *value, enum tpm_tis_io_mode mode);
+ int (*verify_crc)(struct tpm_tis_data *data, size_t len,
+ const u8 *value);
};
static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr,
@@ -188,6 +190,14 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
return rc;
}
+static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ if (!data->phy_ops->verify_crc)
+ return 0;
+ return data->phy_ops->verify_crc(data, len, value);
+}
+
static inline bool is_bsw(void)
{
#ifdef CONFIG_X86
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
new file mode 100644
index 000000000000..ba0911b1d1ff
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2021 Nuvoton Technology corporation
+ * Copyright (C) 2019-2022 Infineon Technologies AG
+ *
+ * This device driver implements the TPM interface as defined in the TCG PC
+ * Client Platform TPM Profile (PTP) Specification for TPM 2.0 v1.04
+ * Revision 14.
+ *
+ * It is based on the tpm_tis_spi device driver.
+ */
+
+#include <linux/i2c.h>
+#include <linux/crc-ccitt.h>
+#include "tpm_tis_core.h"
+
+/* TPM registers */
+#define TPM_I2C_LOC_SEL 0x00
+#define TPM_I2C_ACCESS 0x04
+#define TPM_I2C_INTERFACE_CAPABILITY 0x30
+#define TPM_I2C_DEVICE_ADDRESS 0x38
+#define TPM_I2C_DATA_CSUM_ENABLE 0x40
+#define TPM_DATA_CSUM 0x44
+#define TPM_I2C_DID_VID 0x48
+#define TPM_I2C_RID 0x4C
+
+/* TIS-compatible register address to avoid clash with TPM_ACCESS (0x00) */
+#define TPM_LOC_SEL 0x0FFF
+
+/* Mask to extract the I2C register from TIS register addresses */
+#define TPM_TIS_REGISTER_MASK 0x0FFF
+
+/* Default Guard Time of 250µs until interface capability register is read */
+#define GUARD_TIME_DEFAULT_MIN 250
+#define GUARD_TIME_DEFAULT_MAX 300
+
+/* Guard Time of 250µs after I2C slave NACK */
+#define GUARD_TIME_ERR_MIN 250
+#define GUARD_TIME_ERR_MAX 300
+
+/* Guard Time bit masks; SR is repeated start, RW is read then write, etc. */
+#define TPM_GUARD_TIME_SR_MASK 0x40000000
+#define TPM_GUARD_TIME_RR_MASK 0x00100000
+#define TPM_GUARD_TIME_RW_MASK 0x00080000
+#define TPM_GUARD_TIME_WR_MASK 0x00040000
+#define TPM_GUARD_TIME_WW_MASK 0x00020000
+#define TPM_GUARD_TIME_MIN_MASK 0x0001FE00
+#define TPM_GUARD_TIME_MIN_SHIFT 9
+
+/* Masks with bits that must be read zero */
+#define TPM_ACCESS_READ_ZERO 0x48
+#define TPM_INT_ENABLE_ZERO 0x7FFFFF6
+#define TPM_STS_READ_ZERO 0x23
+#define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000
+#define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000
+
+struct tpm_tis_i2c_phy {
+ struct tpm_tis_data priv;
+ struct i2c_client *i2c_client;
+ bool guard_time_read;
+ bool guard_time_write;
+ u16 guard_time_min;
+ u16 guard_time_max;
+ u8 *io_buf;
+};
+
+static inline struct tpm_tis_i2c_phy *
+to_tpm_tis_i2c_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_i2c_phy, priv);
+}
+
+/*
+ * tpm_tis_core uses the register addresses as defined in Table 19 "Allocation
+ * of Register Space for FIFO TPM Access" of the TCG PC Client PTP
+ * Specification. In order for this code to work together with tpm_tis_core,
+ * those addresses need to mapped to the registers defined for I2C TPMs in
+ * Table 51 "I2C-TPM Register Overview".
+ *
+ * For most addresses this can be done by simply stripping off the locality
+ * information from the address. A few addresses need to be mapped explicitly,
+ * since the corresponding I2C registers have been moved around. TPM_LOC_SEL is
+ * only defined for I2C TPMs and is also mapped explicitly here to distinguish
+ * it from TPM_ACCESS(0).
+ *
+ * Locality information is ignored, since this driver assumes exclusive access
+ * to the TPM and always uses locality 0.
+ */
+static u8 tpm_tis_i2c_address_to_register(u32 addr)
+{
+ addr &= TPM_TIS_REGISTER_MASK;
+
+ switch (addr) {
+ case TPM_ACCESS(0):
+ return TPM_I2C_ACCESS;
+ case TPM_LOC_SEL:
+ return TPM_I2C_LOC_SEL;
+ case TPM_DID_VID(0):
+ return TPM_I2C_DID_VID;
+ case TPM_RID(0):
+ return TPM_I2C_RID;
+ default:
+ return addr;
+ }
+}
+
+static int tpm_tis_i2c_retry_transfer_until_ack(struct tpm_tis_data *data,
+ struct i2c_msg *msg)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ bool guard_time;
+ int i = 0;
+ int ret;
+
+ if (msg->flags & I2C_M_RD)
+ guard_time = phy->guard_time_read;
+ else
+ guard_time = phy->guard_time_write;
+
+ do {
+ ret = i2c_transfer(phy->i2c_client->adapter, msg, 1);
+ if (ret < 0)
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ else if (guard_time)
+ usleep_range(phy->guard_time_min, phy->guard_time_max);
+ /* retry on TPM NACK */
+ } while (ret < 0 && i++ < TPM_RETRY);
+
+ return ret;
+}
+
+/* Check that bits which must be read zero are not set */
+static int tpm_tis_i2c_sanity_check_read(u8 reg, u16 len, u8 *buf)
+{
+ u32 zero_mask;
+ u32 value;
+
+ switch (len) {
+ case sizeof(u8):
+ value = buf[0];
+ break;
+ case sizeof(u16):
+ value = le16_to_cpup((__le16 *)buf);
+ break;
+ case sizeof(u32):
+ value = le32_to_cpup((__le32 *)buf);
+ break;
+ default:
+ /* unknown length, skip check */
+ return 0;
+ }
+
+ switch (reg) {
+ case TPM_I2C_ACCESS:
+ zero_mask = TPM_ACCESS_READ_ZERO;
+ break;
+ case TPM_INT_ENABLE(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INT_ENABLE_ZERO;
+ break;
+ case TPM_STS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_STS_READ_ZERO;
+ break;
+ case TPM_INTF_CAPS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INTF_CAPABILITY_ZERO;
+ break;
+ case TPM_I2C_INTERFACE_CAPABILITY:
+ zero_mask = TPM_I2C_INTERFACE_CAPABILITY_ZERO;
+ break;
+ default:
+ /* unknown register, skip check */
+ return 0;
+ }
+
+ if (unlikely((value & zero_mask) != 0x00)) {
+ pr_debug("TPM I2C read of register 0x%02x failed sanity check: 0x%x\n", reg, value);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int i;
+ int ret;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ /* write register */
+ msg.len = sizeof(reg);
+ msg.buf = &reg;
+ msg.flags = 0;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ /* read data */
+ msg.buf = result;
+ msg.len = len;
+ msg.flags = I2C_M_RD;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
+ if (ret == 0)
+ return 0;
+
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ }
+
+ return ret;
+}
+
+static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int ret;
+
+ if (len > TPM_BUFSIZE - 1)
+ return -EIO;
+
+ /* write register and data in one go */
+ phy->io_buf[0] = reg;
+ memcpy(phy->io_buf + sizeof(reg), value, len);
+
+ msg.len = sizeof(reg) + len;
+ msg.buf = phy->io_buf;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tpm_tis_i2c_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ u16 crc_tpm, crc_host;
+ int rc;
+
+ rc = tpm_tis_read16(data, TPM_DATA_CSUM, &crc_tpm);
+ if (rc < 0)
+ return rc;
+
+ /* reflect crc result, regardless of host endianness */
+ crc_host = swab16(crc_ccitt(0, value, len));
+ if (crc_tpm != crc_host)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Guard Time:
+ * After each I2C operation, the TPM might require the master to wait.
+ * The time period is vendor-specific and must be read from the
+ * TPM_I2C_INTERFACE_CAPABILITY register.
+ *
+ * Before the Guard Time is read (or after the TPM failed to send an I2C NACK),
+ * a Guard Time of 250µs applies.
+ *
+ * Various flags in the same register indicate if a guard time is needed:
+ * - SR: <I2C read with repeated start> <guard time> <I2C read>
+ * - RR: <I2C read> <guard time> <I2C read>
+ * - RW: <I2C read> <guard time> <I2C write>
+ * - WR: <I2C write> <guard time> <I2C read>
+ * - WW: <I2C write> <guard time> <I2C write>
+ *
+ * See TCG PC Client PTP Specification v1.04, 8.1.10 GUARD_TIME
+ */
+static int tpm_tis_i2c_init_guard_time(struct tpm_tis_i2c_phy *phy)
+{
+ u32 i2c_caps;
+ int ret;
+
+ phy->guard_time_read = true;
+ phy->guard_time_write = true;
+ phy->guard_time_min = GUARD_TIME_DEFAULT_MIN;
+ phy->guard_time_max = GUARD_TIME_DEFAULT_MAX;
+
+ ret = tpm_tis_i2c_read_bytes(&phy->priv, TPM_I2C_INTERFACE_CAPABILITY,
+ sizeof(i2c_caps), (u8 *)&i2c_caps,
+ TPM_TIS_PHYS_32);
+ if (ret)
+ return ret;
+
+ phy->guard_time_read = (i2c_caps & TPM_GUARD_TIME_RR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_RW_MASK);
+ phy->guard_time_write = (i2c_caps & TPM_GUARD_TIME_WR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_WW_MASK);
+ phy->guard_time_min = (i2c_caps & TPM_GUARD_TIME_MIN_MASK) >>
+ TPM_GUARD_TIME_MIN_SHIFT;
+ /* guard_time_max = guard_time_min * 1.2 */
+ phy->guard_time_max = phy->guard_time_min + phy->guard_time_min / 5;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static const struct tpm_tis_phy_ops tpm_i2c_phy_ops = {
+ .read_bytes = tpm_tis_i2c_read_bytes,
+ .write_bytes = tpm_tis_i2c_write_bytes,
+ .verify_crc = tpm_tis_i2c_verify_crc,
+};
+
+static int tpm_tis_i2c_probe(struct i2c_client *dev,
+ const struct i2c_device_id *id)
+{
+ struct tpm_tis_i2c_phy *phy;
+ const u8 crc_enable = 1;
+ const u8 locality = 0;
+ int ret;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->io_buf = devm_kzalloc(&dev->dev, TPM_BUFSIZE, GFP_KERNEL);
+ if (!phy->io_buf)
+ return -ENOMEM;
+
+ phy->i2c_client = dev;
+
+ /* must precede all communication with the tpm */
+ ret = tpm_tis_i2c_init_guard_time(phy);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_LOC_SEL, sizeof(locality),
+ &locality, TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_I2C_DATA_CSUM_ENABLE,
+ sizeof(crc_enable), &crc_enable,
+ TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_i2c_phy_ops,
+ NULL);
+}
+
+static int tpm_tis_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+ return 0;
+}
+
+static const struct i2c_device_id tpm_tis_i2c_id[] = {
+ { "tpm_tis_i2c", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_tis_i2c_match[] = {
+ { .compatible = "infineon,slb9673", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tis_i2c_match);
+#endif
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+ .driver = {
+ .name = "tpm_tis_i2c",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(of_tis_i2c_match),
+ },
+ .probe = tpm_tis_i2c_probe,
+ .remove = tpm_tis_i2c_remove,
+ .id_table = tpm_tis_i2c_id,
+};
+module_i2c_driver(tpm_tis_i2c_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native I2C access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index f9d5b7334341..4fb4fd4b06bd 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -4,42 +4,101 @@
#include <linux/export.h>
#include <linux/gfp.h>
+struct devm_clk_state {
+ struct clk *clk;
+ void (*exit)(struct clk *clk);
+};
+
static void devm_clk_release(struct device *dev, void *res)
{
- clk_put(*(struct clk **)res);
+ struct devm_clk_state *state = res;
+
+ if (state->exit)
+ state->exit(state->clk);
+
+ clk_put(state->clk);
}
-struct clk *devm_clk_get(struct device *dev, const char *id)
+static struct clk *__devm_clk_get(struct device *dev, const char *id,
+ struct clk *(*get)(struct device *dev, const char *id),
+ int (*init)(struct clk *clk),
+ void (*exit)(struct clk *clk))
{
- struct clk **ptr, *clk;
+ struct devm_clk_state *state;
+ struct clk *clk;
+ int ret;
- ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
+ state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
+ if (!state)
return ERR_PTR(-ENOMEM);
- clk = clk_get(dev, id);
- if (!IS_ERR(clk)) {
- *ptr = clk;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
+ clk = get(dev, id);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_clk_get;
}
+ if (init) {
+ ret = init(clk);
+ if (ret)
+ goto err_clk_init;
+ }
+
+ state->clk = clk;
+ state->exit = exit;
+
+ devres_add(dev, state);
+
return clk;
+
+err_clk_init:
+
+ clk_put(clk);
+err_clk_get:
+
+ devres_free(state);
+ return ERR_PTR(ret);
+}
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get);
-struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id)
{
- struct clk *clk = devm_clk_get(dev, id);
+ return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_prepared);
- if (clk == ERR_PTR(-ENOENT))
- return NULL;
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get,
+ clk_prepare_enable, clk_disable_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_enabled);
- return clk;
+struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get_optional);
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional,
+ clk_prepare, clk_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared);
+
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional,
+ clk_prepare_enable, clk_disable_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
+
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 54942d758ee6..f734e34735a9 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -78,7 +78,8 @@ static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *
static struct clk_hw *
__clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
- const char *name, const char *parent_name, int index,
+ const char *name, const char *parent_name,
+ const struct clk_hw *parent_hw, int index,
unsigned long flags, unsigned int mult, unsigned int div,
bool devm)
{
@@ -110,6 +111,8 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
init.flags = flags;
if (parent_name)
init.parent_names = &parent_name;
+ else if (parent_hw)
+ init.parent_hws = &parent_hw;
else
init.parent_data = &pdata;
init.num_parents = 1;
@@ -148,16 +151,48 @@ struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
const char *name, unsigned int index, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, index,
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, NULL, index,
flags, mult, div, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_index);
+/**
+ * devm_clk_hw_register_fixed_factor_parent_hw - Register a fixed factor clock with
+ * pointer to parent clock
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: fixed factor flags
+ * @mult: multiplier
+ * @div: divider
+ *
+ * Return: Pointer to fixed factor clk_hw structure that was registered or
+ * an error pointer.
+ */
+struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div)
+{
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, parent_hw,
+ -1, flags, mult, div, true);
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_parent_hw);
+
+struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div)
+{
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL,
+ parent_hw, -1, flags, mult, div,
+ false);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_parent_hw);
+
struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL, -1,
flags, mult, div, false);
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor);
@@ -204,22 +239,16 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL, -1,
flags, mult, div, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor);
#ifdef CONFIG_OF
-static const struct of_device_id set_rate_parent_matches[] = {
- { .compatible = "allwinner,sun4i-a10-pll3-2x-clk" },
- { /* Sentinel */ },
-};
-
static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
{
struct clk_hw *hw;
const char *clk_name = node->name;
- unsigned long flags = 0;
u32 div, mult;
int ret;
@@ -237,11 +266,8 @@ static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
of_property_read_string(node, "clock-output-names", &clk_name);
- if (of_match_node(set_rate_parent_matches, node))
- flags |= CLK_SET_RATE_PARENT;
-
- hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, 0,
- flags, mult, div, false);
+ hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, NULL, 0,
+ 0, mult, div, false);
if (IS_ERR(hw)) {
/*
* Clear OF_POPULATED flag so that clock registration can be
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f00d4c1158d7..7fc191c15507 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4279,54 +4279,6 @@ int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register);
-static int devm_clk_match(struct device *dev, void *res, void *data)
-{
- struct clk *c = res;
- if (WARN_ON(!c))
- return 0;
- return c == data;
-}
-
-static int devm_clk_hw_match(struct device *dev, void *res, void *data)
-{
- struct clk_hw *hw = res;
-
- if (WARN_ON(!hw))
- return 0;
- return hw == data;
-}
-
-/**
- * devm_clk_unregister - resource managed clk_unregister()
- * @dev: device that is unregistering the clock data
- * @clk: clock to unregister
- *
- * Deallocate a clock allocated with devm_clk_register(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
- */
-void devm_clk_unregister(struct device *dev, struct clk *clk)
-{
- WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
-}
-EXPORT_SYMBOL_GPL(devm_clk_unregister);
-
-/**
- * devm_clk_hw_unregister - resource managed clk_hw_unregister()
- * @dev: device that is unregistering the hardware-specific clock data
- * @hw: link to hardware-specific clock data
- *
- * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
- */
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
-{
- WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
- hw));
-}
-EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
-
static void devm_clk_release(struct device *dev, void *res)
{
clk_put(*(struct clk **)res);
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index 71c102d950ab..a2aaa14fc1ae 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -64,10 +64,13 @@ struct clk_fracn_gppll {
* Fout = Fvco / (rdiv * odiv)
*/
static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
- PLL_FRACN_GP(650000000U, 81, 0, 0, 0, 3),
- PLL_FRACN_GP(594000000U, 198, 0, 0, 0, 8),
- PLL_FRACN_GP(560000000U, 70, 0, 0, 0, 3),
- PLL_FRACN_GP(400000000U, 50, 0, 0, 0, 3),
+ PLL_FRACN_GP(650000000U, 81, 0, 1, 0, 3),
+ PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8),
+ PLL_FRACN_GP(560000000U, 70, 0, 1, 0, 3),
+ PLL_FRACN_GP(498000000U, 83, 0, 1, 0, 4),
+ PLL_FRACN_GP(484000000U, 121, 0, 1, 0, 6),
+ PLL_FRACN_GP(445333333U, 167, 0, 1, 0, 9),
+ PLL_FRACN_GP(400000000U, 50, 0, 1, 0, 3),
PLL_FRACN_GP(393216000U, 81, 92, 100, 0, 5)
};
@@ -131,18 +134,7 @@ static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned lon
mfi = FIELD_GET(PLL_MFI_MASK, pll_div);
rdiv = FIELD_GET(PLL_RDIV_MASK, pll_div);
- rdiv = rdiv + 1;
odiv = FIELD_GET(PLL_ODIV_MASK, pll_div);
- switch (odiv) {
- case 0:
- odiv = 2;
- break;
- case 1:
- odiv = 3;
- break;
- default:
- break;
- }
/*
* Sometimes, the recalculated rate has deviation due to
@@ -160,6 +152,20 @@ static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned lon
if (rate)
return (unsigned long)rate;
+ if (!rdiv)
+ rdiv = rdiv + 1;
+
+ switch (odiv) {
+ case 0:
+ odiv = 2;
+ break;
+ case 1:
+ odiv = 3;
+ break;
+ default:
+ break;
+ }
+
/* Fvco = Fref * (MFI + MFN / MFD) */
fvco = fvco * mfi * mfd + fvco * mfn;
do_div(fvco, mfd * rdiv * odiv);
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index edcc87661d1f..f5c9fa40491c 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -150,7 +150,7 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_A55_GATE, "a55", "a55_root", 0x8000, },
/* M33 critical clk for system run */
{ IMX93_CLK_CM33_GATE, "cm33", "m33_root", 0x8040, CLK_IS_CRITICAL },
- { IMX93_CLK_ADC1_GATE, "adc1", "osc_24m", 0x82c0, },
+ { IMX93_CLK_ADC1_GATE, "adc1", "adc_root", 0x82c0, },
{ IMX93_CLK_WDOG1_GATE, "wdog1", "osc_24m", 0x8300, },
{ IMX93_CLK_WDOG2_GATE, "wdog2", "osc_24m", 0x8340, },
{ IMX93_CLK_WDOG3_GATE, "wdog3", "osc_24m", 0x8380, },
@@ -160,7 +160,7 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_SEMA2_GATE, "sema2", "bus_wakeup_root", 0x8480, },
{ IMX93_CLK_MU_A_GATE, "mu_a", "bus_aon_root", 0x84c0, },
{ IMX93_CLK_MU_B_GATE, "mu_b", "bus_aon_root", 0x8500, },
- { IMX93_CLK_EDMA1_GATE, "edma1", "wakeup_axi_root", 0x8540, },
+ { IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
{ IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
{ IMX93_CLK_FLEXSPI1_GATE, "flexspi", "flexspi_root", 0x8640, },
{ IMX93_CLK_GPIO1_GATE, "gpio1", "m33_root", 0x8880, },
@@ -219,7 +219,7 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_LCDIF_GATE, "lcdif", "media_apb_root", 0x9640, },
{ IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, },
{ IMX93_CLK_ISI_GATE, "isi", "media_apb_root", 0x96c0, },
- { IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_apb_root", 0x9700, },
+ { IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_axi_root", 0x9700, },
{ IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root", 0x9a00, },
{ IMX93_CLK_USB_TEST_60M_GATE, "usb_test_60m", "hsio_usb_test_60m_root", 0x9a40, },
{ IMX93_CLK_HSIO_TROUT_24M_GATE, "hsio_trout_24m", "osc_24m", 0x9a80, },
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
index 47c2289f3d1d..edf1e2ed2b59 100644
--- a/drivers/clk/mediatek/clk-mt2701-eth.c
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -36,6 +36,14 @@ static const struct mtk_gate eth_clks[] = {
GATE_ETH(CLK_ETHSYS_CRYPTO, "crypto_clk", "ethif_sel", 29),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static const struct of_device_id of_match_clk_mt2701_eth[] = {
{ .compatible = "mediatek,mt2701-ethsys", },
{}
@@ -58,7 +66,7 @@ static int clk_mt2701_eth_probe(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
index 79929ed37f83..1458109d99d9 100644
--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -35,6 +35,14 @@ static const struct mtk_gate g3d_clks[] = {
GATE_G3D(CLK_G3DSYS_CORE, "g3d_core", "mfg_sel", 0),
};
+static u16 rst_ofs[] = { 0xc, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -52,7 +60,7 @@ static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0xc);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
index 1aa36cb93ad0..434cbbe8c037 100644
--- a/drivers/clk/mediatek/clk-mt2701-hif.c
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -33,6 +33,14 @@ static const struct mtk_gate hif_clks[] = {
GATE_HIF(CLK_HIFSYS_PCIE2, "pcie2_clk", "ethpll_500m_ck", 26),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static const struct of_device_id of_match_clk_mt2701_hif[] = {
{ .compatible = "mediatek,mt2701-hifsys", },
{}
@@ -57,7 +65,7 @@ static int clk_mt2701_hif_probe(struct platform_device *pdev)
return r;
}
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return 0;
}
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 04ba356db2d7..9b442af37e67 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -735,6 +735,24 @@ static const struct mtk_fixed_factor infra_fixed_divs[] = {
FACTOR(CLK_INFRA_CLK_13M, "clk13m", "clk26m", 1, 2),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ },
+};
+
static struct clk_hw_onecell_data *infra_clk_data;
static void __init mtk_infrasys_init_early(struct device_node *node)
@@ -787,7 +805,7 @@ static int mtk_infrasys_init(struct platform_device *pdev)
if (r)
return r;
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
return 0;
}
@@ -910,7 +928,7 @@ static int mtk_pericfg_init(struct platform_device *pdev)
if (r)
return r;
- mtk_register_reset_controller(node, 2, 0x0);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
return 0;
}
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 410b059727ea..56980dd6c2ea 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -1258,6 +1258,24 @@ static const struct mtk_pll_data plls[] = {
0, 31, 0x0300, 4, 0, 0, 0, 0x0304, 0),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infra */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* peri */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ },
+};
+
static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -1361,7 +1379,7 @@ static int clk_mt2712_infra_probe(struct platform_device *pdev)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
return r;
}
@@ -1383,7 +1401,7 @@ static int clk_mt2712_peri_probe(struct platform_device *pdev)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
index b12d48705496..43de0477d5d9 100644
--- a/drivers/clk/mediatek/clk-mt7622-eth.c
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -65,6 +65,14 @@ static const struct mtk_gate sgmii_clks[] = {
"ssusb_cdr_fb", 5),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7622_ethsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -82,7 +90,7 @@ static int clk_mt7622_ethsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
index 58728e35e80a..67e96231dd25 100644
--- a/drivers/clk/mediatek/clk-mt7622-hif.c
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -76,6 +76,14 @@ static const struct mtk_gate pcie_clks[] = {
GATE_PCIE(CLK_SATA_PM_EN, "sata_pm_en", "univpll2_d4", 30),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -93,7 +101,7 @@ static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
@@ -115,7 +123,7 @@ static int clk_mt7622_pciesys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index e4a5e5230861..3b55f8641fae 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -610,6 +610,24 @@ static struct mtk_composite peri_muxes[] = {
MUX(CLK_PERIBUS_SEL, "peribus_ck_sel", peribus_ck_parents, 0x05C, 0, 1),
};
+static u16 infrasys_rst_ofs[] = { 0x30, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ },
+};
+
static int mtk_topckgen_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -663,7 +681,7 @@ static int mtk_infrasys_init(struct platform_device *pdev)
if (r)
return r;
- mtk_register_reset_controller(node, 1, 0x30);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
return 0;
}
@@ -714,7 +732,7 @@ static int mtk_pericfg_init(struct platform_device *pdev)
clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk);
- mtk_register_reset_controller(node, 2, 0x0);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
return 0;
}
diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
index c49fd732c9b2..282dd6559465 100644
--- a/drivers/clk/mediatek/clk-mt7629-eth.c
+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
@@ -76,6 +76,14 @@ static const struct mtk_gate sgmii_clks[2][4] = {
}
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7629_ethsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -92,7 +100,7 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
index acaa97fda331..0c8b9e139789 100644
--- a/drivers/clk/mediatek/clk-mt7629-hif.c
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -71,6 +71,14 @@ static const struct mtk_gate pcie_clks[] = {
GATE_PCIE(CLK_PCIE_P0_PIPE_EN, "pcie_p0_pipe_en", "pcie0_pipe_en", 23),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -88,7 +96,7 @@ static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
@@ -110,7 +118,7 @@ static int clk_mt7629_pciesys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index 9ef524b44862..b68888a034c4 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -514,6 +514,24 @@ static const struct mtk_composite peri_clks[] __initconst = {
MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ }
+};
+
static void __init mtk_topckgen_init(struct device_node *node)
{
struct clk_hw_onecell_data *clk_data;
@@ -559,7 +577,7 @@ static void __init mtk_infrasys_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller(node, &clk_rst_desc[0]);
}
CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8135-infracfg", mtk_infrasys_init);
@@ -587,7 +605,7 @@ static void __init mtk_pericfg_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0);
+ mtk_register_reset_controller(node, &clk_rst_desc[1]);
}
CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8135-pericfg", mtk_pericfg_init);
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 0929db330852..b8529ee7199d 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -819,6 +819,24 @@ static const struct mtk_gate venclt_clks[] __initconst = {
GATE_VENCLT(CLK_VENCLT_CKE1, "venclt_cke1", "venclt_sel", 4),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ }
+};
+
static struct clk_hw_onecell_data *mt8173_top_clk_data __initdata;
static struct clk_hw_onecell_data *mt8173_pll_clk_data __initdata;
@@ -882,7 +900,7 @@ static void __init mtk_infrasys_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller(node, &clk_rst_desc[0]);
}
CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8173-infracfg", mtk_infrasys_init);
@@ -910,7 +928,7 @@ static void __init mtk_pericfg_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0);
+ mtk_register_reset_controller(node, &clk_rst_desc[1]);
}
CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index b5c17988c337..8512101e1189 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -18,9 +18,6 @@
#include <dt-bindings/clock/mt8183-clk.h>
-/* Infra global controller reset set register */
-#define INFRA_RST0_SET_OFFSET 0x120
-
static DEFINE_SPINLOCK(mt8183_clk_lock);
static const struct mtk_fixed_clk top_fixed_clks[] = {
@@ -1153,6 +1150,19 @@ static const struct mtk_pll_data plls[] = {
0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4),
};
+static u16 infra_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+};
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_rst_ofs),
+};
+
static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -1240,7 +1250,7 @@ static int clk_mt8183_infra_probe(struct platform_device *pdev)
return r;
}
- mtk_register_reset_controller_set_clr(node, 4, INFRA_RST0_SET_OFFSET);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt8186-infra_ao.c b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
index 2a7adc25abaa..df2a6bd1aefa 100644
--- a/drivers/clk/mediatek/clk-mt8186-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt8186-clk.h>
+#include <dt-bindings/reset/mt8186-resets.h>
#include "clk-gate.h"
#include "clk-mtk.h"
@@ -191,9 +192,31 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO3(CLK_INFRA_AO_FLASHIF_66M, "infra_ao_flashif_66m", "top_axi", 29),
};
+static u16 infra_ao_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+ INFRA_RST4_SET_OFFSET,
+};
+
+static u16 infra_ao_idx_map[] = {
+ [MT8186_INFRA_THERMAL_CTRL_RST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8186_INFRA_PTP_CTRL_RST] = 1 * RST_NR_PER_BANK + 0,
+};
+
+static struct mtk_clk_rst_desc infra_ao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
static const struct mtk_clk_desc infra_ao_desc = {
.clks = infra_ao_clks,
.num_clks = ARRAY_SIZE(infra_ao_clks),
+ .rst_desc = &infra_ao_rst_desc,
};
static const struct of_device_id of_match_clk_mt8186_infra_ao[] = {
diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c
index 87c3b79b79cf..635f7a0b629a 100644
--- a/drivers/clk/mediatek/clk-mt8192-msdc.c
+++ b/drivers/clk/mediatek/clk-mt8192-msdc.c
@@ -12,28 +12,15 @@
#include <dt-bindings/clock/mt8192-clk.h>
-static const struct mtk_gate_regs msdc_cg_regs = {
- .set_ofs = 0xb4,
- .clr_ofs = 0xb4,
- .sta_ofs = 0xb4,
-};
-
static const struct mtk_gate_regs msdc_top_cg_regs = {
.set_ofs = 0x0,
.clr_ofs = 0x0,
.sta_ofs = 0x0,
};
-#define GATE_MSDC(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &msdc_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-
#define GATE_MSDC_TOP(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &msdc_top_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-static const struct mtk_gate msdc_clks[] = {
- GATE_MSDC(CLK_MSDC_AXI_WRAP, "msdc_axi_wrap", "axi_sel", 22),
-};
-
static const struct mtk_gate msdc_top_clks[] = {
GATE_MSDC_TOP(CLK_MSDC_TOP_AES_0P, "msdc_top_aes_0p", "aes_msdcfde_sel", 0),
GATE_MSDC_TOP(CLK_MSDC_TOP_SRC_0P, "msdc_top_src_0p", "infra_msdc0_src", 1),
@@ -52,11 +39,6 @@ static const struct mtk_gate msdc_top_clks[] = {
GATE_MSDC_TOP(CLK_MSDC_TOP_AHB2AXI_BRG_AXI, "msdc_top_ahb2axi_brg_axi", "axi_sel", 14),
};
-static const struct mtk_clk_desc msdc_desc = {
- .clks = msdc_clks,
- .num_clks = ARRAY_SIZE(msdc_clks),
-};
-
static const struct mtk_clk_desc msdc_top_desc = {
.clks = msdc_top_clks,
.num_clks = ARRAY_SIZE(msdc_top_clks),
@@ -64,9 +46,6 @@ static const struct mtk_clk_desc msdc_top_desc = {
static const struct of_device_id of_match_clk_mt8192_msdc[] = {
{
- .compatible = "mediatek,mt8192-msdc",
- .data = &msdc_desc,
- }, {
.compatible = "mediatek,mt8192-msdc_top",
.data = &msdc_top_desc,
}, {
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index dda211b7a745..ebbd2798d9a3 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -18,6 +18,7 @@
#include "clk-pll.h"
#include <dt-bindings/clock/mt8192-clk.h>
+#include <dt-bindings/reset/mt8192-resets.h>
static DEFINE_SPINLOCK(mt8192_clk_lock);
@@ -1114,6 +1115,30 @@ static const struct mtk_gate top_clks[] = {
GATE_TOP(CLK_TOP_SSUSB_PHY_REF, "ssusb_phy_ref", "clk26m", 25),
};
+static u16 infra_ao_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+ INFRA_RST4_SET_OFFSET,
+};
+
+static u16 infra_ao_idx_map[] = {
+ [MT8192_INFRA_RST0_THERM_CTRL_SWRST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8192_INFRA_RST2_PEXTP_PHY_SWRST] = 2 * RST_NR_PER_BANK + 15,
+ [MT8192_INFRA_RST3_THERM_CTRL_PTP_SWRST] = 3 * RST_NR_PER_BANK + 5,
+ [MT8192_INFRA_RST4_PCIE_TOP_SWRST] = 4 * RST_NR_PER_BANK + 1,
+ [MT8192_INFRA_RST4_THERM_CTRL_MCU_SWRST] = 4 * RST_NR_PER_BANK + 12,
+};
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
#define MT8192_PLL_FMAX (3800UL * MHZ)
#define MT8192_PLL_FMIN (1500UL * MHZ)
#define MT8192_INTEGER_BITS 8
@@ -1240,6 +1265,10 @@ static int clk_mt8192_infra_probe(struct platform_device *pdev)
if (r)
goto free_clk_data;
+ r = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+ if (r)
+ goto free_clk_data;
+
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto free_clk_data;
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index 8ebe3b9415c4..97657f255618 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -7,6 +7,7 @@
#include "clk-mtk.h"
#include <dt-bindings/clock/mt8195-clk.h>
+#include <dt-bindings/reset/mt8195-resets.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
@@ -182,9 +183,32 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO4(CLK_INFRA_AO_PERI_UFS_MEM_SUB, "infra_ao_peri_ufs_mem_sub", "mem_466m", 31),
};
+static u16 infra_ao_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+ INFRA_RST4_SET_OFFSET,
+};
+
+static u16 infra_ao_idx_map[] = {
+ [MT8195_INFRA_RST0_THERM_CTRL_SWRST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST] = 3 * RST_NR_PER_BANK + 5,
+ [MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST] = 4 * RST_NR_PER_BANK + 10,
+};
+
+static struct mtk_clk_rst_desc infra_ao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
static const struct mtk_clk_desc infra_ao_desc = {
.clks = infra_ao_clks,
.num_clks = ARRAY_SIZE(infra_ao_clks),
+ .rst_desc = &infra_ao_rst_desc,
};
static const struct of_device_id of_match_clk_mt8195_infra_ao[] = {
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index b9188000ab3c..05a188c62119 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -444,6 +444,13 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, clk_data);
+ if (mcd->rst_desc) {
+ r = mtk_register_reset_controller_with_dev(&pdev->dev,
+ mcd->rst_desc);
+ if (r)
+ goto unregister_clks;
+ }
+
return r;
unregister_clks:
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index adb1304d35d4..1b95c484d5aa 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -13,6 +13,8 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+#include "reset.h"
+
#define MAX_MUX_GATE_BIT 31
#define INVALID_MUX_GATE_BIT (MAX_MUX_GATE_BIT + 1)
@@ -187,15 +189,10 @@ void mtk_free_clk_data(struct clk_hw_onecell_data *clk_data);
struct clk_hw *mtk_clk_register_ref2usb_tx(const char *name,
const char *parent_name, void __iomem *reg);
-void mtk_register_reset_controller(struct device_node *np,
- unsigned int num_regs, int regofs);
-
-void mtk_register_reset_controller_set_clr(struct device_node *np,
- unsigned int num_regs, int regofs);
-
struct mtk_clk_desc {
const struct mtk_gate *clks;
size_t num_clks;
+ const struct mtk_clk_rst_desc *rst_desc;
};
int mtk_clk_simple_probe(struct platform_device *pdev);
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index bcec4b89f449..179505549a7c 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -8,55 +8,39 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <linux/slab.h>
-#include "clk-mtk.h"
+#include "reset.h"
-struct mtk_reset {
- struct regmap *regmap;
- int regofs;
- struct reset_controller_dev rcdev;
-};
-
-static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev,
- unsigned long id)
+static inline struct mtk_clk_rst_data *to_mtk_clk_rst_data(struct reset_controller_dev *rcdev)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
- unsigned int reg = data->regofs + ((id / 32) << 4);
-
- return regmap_write(data->regmap, reg, 1);
+ return container_of(rcdev, struct mtk_clk_rst_data, rcdev);
}
-static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int mtk_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool deassert)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
- unsigned int reg = data->regofs + ((id / 32) << 4) + 0x4;
+ struct mtk_clk_rst_data *data = to_mtk_clk_rst_data(rcdev);
+ unsigned int val = deassert ? 0 : ~0;
- return regmap_write(data->regmap, reg, 1);
+ return regmap_update_bits(data->regmap,
+ data->desc->rst_bank_ofs[id / RST_NR_PER_BANK],
+ BIT(id % RST_NR_PER_BANK), val);
}
static int mtk_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+ unsigned long id)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
-
- return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2),
- BIT(id % 32), ~0);
+ return mtk_reset_update(rcdev, id, false);
}
static int mtk_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
+ unsigned long id)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
-
- return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2),
- BIT(id % 32), 0);
+ return mtk_reset_update(rcdev, id, true);
}
-static int mtk_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int mtk_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
int ret;
@@ -67,8 +51,32 @@ static int mtk_reset(struct reset_controller_dev *rcdev,
return mtk_reset_deassert(rcdev, id);
}
+static int mtk_reset_update_set_clr(struct reset_controller_dev *rcdev,
+ unsigned long id, bool deassert)
+{
+ struct mtk_clk_rst_data *data = to_mtk_clk_rst_data(rcdev);
+ unsigned int deassert_ofs = deassert ? 0x4 : 0;
+
+ return regmap_write(data->regmap,
+ data->desc->rst_bank_ofs[id / RST_NR_PER_BANK] +
+ deassert_ofs,
+ BIT(id % RST_NR_PER_BANK));
+}
+
+static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return mtk_reset_update_set_clr(rcdev, id, false);
+}
+
+static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return mtk_reset_update_set_clr(rcdev, id, true);
+}
+
static int mtk_reset_set_clr(struct reset_controller_dev *rcdev,
- unsigned long id)
+ unsigned long id)
{
int ret;
@@ -90,51 +98,135 @@ static const struct reset_control_ops mtk_reset_ops_set_clr = {
.reset = mtk_reset_set_clr,
};
-static void mtk_register_reset_controller_common(struct device_node *np,
- unsigned int num_regs, int regofs,
- const struct reset_control_ops *reset_ops)
+static int reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct mtk_clk_rst_data *data = to_mtk_clk_rst_data(rcdev);
+
+ if (reset_spec->args[0] >= rcdev->nr_resets ||
+ reset_spec->args[0] >= data->desc->rst_idx_map_nr)
+ return -EINVAL;
+
+ return data->desc->rst_idx_map[reset_spec->args[0]];
+}
+
+int mtk_register_reset_controller(struct device_node *np,
+ const struct mtk_clk_rst_desc *desc)
{
- struct mtk_reset *data;
- int ret;
struct regmap *regmap;
+ const struct reset_control_ops *rcops = NULL;
+ struct mtk_clk_rst_data *data;
+ int ret;
+
+ if (!desc) {
+ pr_err("mtk clock reset desc is NULL\n");
+ return -EINVAL;
+ }
+
+ switch (desc->version) {
+ case MTK_RST_SIMPLE:
+ rcops = &mtk_reset_ops;
+ break;
+ case MTK_RST_SET_CLR:
+ rcops = &mtk_reset_ops_set_clr;
+ break;
+ default:
+ pr_err("Unknown reset version %d\n", desc->version);
+ return -EINVAL;
+ }
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap)) {
pr_err("Cannot find regmap for %pOF: %pe\n", np, regmap);
- return;
+ return -EINVAL;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- return;
+ return -ENOMEM;
+ data->desc = desc;
data->regmap = regmap;
- data->regofs = regofs;
data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = num_regs * 32;
- data->rcdev.ops = reset_ops;
+ data->rcdev.ops = rcops;
data->rcdev.of_node = np;
+ if (data->desc->rst_idx_map_nr > 0) {
+ data->rcdev.of_reset_n_cells = 1;
+ data->rcdev.nr_resets = desc->rst_idx_map_nr;
+ data->rcdev.of_xlate = reset_xlate;
+ } else {
+ data->rcdev.nr_resets = desc->rst_bank_nr * RST_NR_PER_BANK;
+ }
+
ret = reset_controller_register(&data->rcdev);
if (ret) {
pr_err("could not register reset controller: %d\n", ret);
kfree(data);
- return;
+ return ret;
}
-}
-void mtk_register_reset_controller(struct device_node *np,
- unsigned int num_regs, int regofs)
-{
- mtk_register_reset_controller_common(np, num_regs, regofs,
- &mtk_reset_ops);
+ return 0;
}
-void mtk_register_reset_controller_set_clr(struct device_node *np,
- unsigned int num_regs, int regofs)
+int mtk_register_reset_controller_with_dev(struct device *dev,
+ const struct mtk_clk_rst_desc *desc)
{
- mtk_register_reset_controller_common(np, num_regs, regofs,
- &mtk_reset_ops_set_clr);
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ const struct reset_control_ops *rcops = NULL;
+ struct mtk_clk_rst_data *data;
+ int ret;
+
+ if (!desc) {
+ dev_err(dev, "mtk clock reset desc is NULL\n");
+ return -EINVAL;
+ }
+
+ switch (desc->version) {
+ case MTK_RST_SIMPLE:
+ rcops = &mtk_reset_ops;
+ break;
+ case MTK_RST_SET_CLR:
+ rcops = &mtk_reset_ops_set_clr;
+ break;
+ default:
+ dev_err(dev, "Unknown reset version %d\n", desc->version);
+ return -EINVAL;
+ }
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Cannot find regmap %pe\n", regmap);
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->desc = desc;
+ data->regmap = regmap;
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.ops = rcops;
+ data->rcdev.of_node = np;
+ data->rcdev.dev = dev;
+
+ if (data->desc->rst_idx_map_nr > 0) {
+ data->rcdev.of_reset_n_cells = 1;
+ data->rcdev.nr_resets = desc->rst_idx_map_nr;
+ data->rcdev.of_xlate = reset_xlate;
+ } else {
+ data->rcdev.nr_resets = desc->rst_bank_nr * RST_NR_PER_BANK;
+ }
+
+ ret = devm_reset_controller_register(dev, &data->rcdev);
+ if (ret) {
+ dev_err(dev, "could not register reset controller: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/reset.h b/drivers/clk/mediatek/reset.h
new file mode 100644
index 000000000000..6a58a3d59165
--- /dev/null
+++ b/drivers/clk/mediatek/reset.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __DRV_CLK_MTK_RESET_H
+#define __DRV_CLK_MTK_RESET_H
+
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+#define RST_NR_PER_BANK 32
+
+/* Infra global controller reset set register */
+#define INFRA_RST0_SET_OFFSET 0x120
+#define INFRA_RST1_SET_OFFSET 0x130
+#define INFRA_RST2_SET_OFFSET 0x140
+#define INFRA_RST3_SET_OFFSET 0x150
+#define INFRA_RST4_SET_OFFSET 0x730
+
+/**
+ * enum mtk_reset_version - Version of MediaTek clock reset controller.
+ * @MTK_RST_SIMPLE: Use the same registers for bit set and clear.
+ * @MTK_RST_SET_CLR: Use separate registers for bit set and clear.
+ * @MTK_RST_MAX: Total quantity of version for MediaTek clock reset controller.
+ */
+enum mtk_reset_version {
+ MTK_RST_SIMPLE = 0,
+ MTK_RST_SET_CLR,
+ MTK_RST_MAX,
+};
+
+/**
+ * struct mtk_clk_rst_desc - Description of MediaTek clock reset.
+ * @version: Reset version which is defined in enum mtk_reset_version.
+ * @rst_bank_ofs: Pointer to an array containing base offsets of the reset register.
+ * @rst_bank_nr: Quantity of reset bank.
+ * @rst_idx_map:Pointer to an array containing ids if input argument is index.
+ * This array is not necessary if our input argument does not mean index.
+ * @rst_idx_map_nr: Quantity of reset index map.
+ */
+struct mtk_clk_rst_desc {
+ enum mtk_reset_version version;
+ u16 *rst_bank_ofs;
+ u32 rst_bank_nr;
+ u16 *rst_idx_map;
+ u32 rst_idx_map_nr;
+};
+
+/**
+ * struct mtk_clk_rst_data - Data of MediaTek clock reset controller.
+ * @regmap: Pointer to base address of reset register address.
+ * @rcdev: Reset controller device.
+ * @desc: Pointer to description of the reset controller.
+ */
+struct mtk_clk_rst_data {
+ struct regmap *regmap;
+ struct reset_controller_dev rcdev;
+ const struct mtk_clk_rst_desc *desc;
+};
+
+/**
+ * mtk_register_reset_controller - Register MediaTek clock reset controller
+ * @np: Pointer to device node.
+ * @desc: Constant pointer to description of clock reset.
+ *
+ * Return: 0 on success and errorno otherwise.
+ */
+int mtk_register_reset_controller(struct device_node *np,
+ const struct mtk_clk_rst_desc *desc);
+
+/**
+ * mtk_register_reset_controller - Register mediatek clock reset controller with device
+ * @np: Pointer to device.
+ * @desc: Constant pointer to description of clock reset.
+ *
+ * Return: 0 on success and errorno otherwise.
+ */
+int mtk_register_reset_controller_with_dev(struct device *dev,
+ const struct mtk_clk_rst_desc *desc);
+
+#endif /* __DRV_CLK_MTK_RESET_H */
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index bfe36bd41339..5016682e47c8 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -1657,35 +1657,6 @@ static struct clk_regmap *const sm1_clk_regmaps[] = {
&sm1_sysclk_b_en,
};
-static int devm_clk_get_enable(struct device *dev, char *id)
-{
- struct clk *clk;
- int ret;
-
- clk = devm_clk_get(dev, id);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err_probe(dev, ret, "failed to get %s", id);
- return ret;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to enable %s", id);
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
- if (ret) {
- dev_err(dev, "failed to add reset action on %s", id);
- return ret;
- }
-
- return 0;
-}
-
struct axg_audio_reset_data {
struct reset_controller_dev rstc;
struct regmap *map;
@@ -1787,6 +1758,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
struct regmap *map;
void __iomem *regs;
struct clk_hw *hw;
+ struct clk *clk;
int ret, i;
data = of_device_get_match_data(dev);
@@ -1804,9 +1776,9 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
/* Get the mandatory peripheral clock */
- ret = devm_clk_get_enable(dev, "pclk");
- if (ret)
- return ret;
+ clk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
ret = device_reset(dev);
if (ret) {
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index bc4dcf356d82..1cf1ef70e347 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -166,6 +166,7 @@ config IPQ_LCC_806X
config IPQ_GCC_8074
tristate "IPQ8074 Global Clock Controller"
+ select QCOM_GDSC
help
Support for global clock controller on ipq8074 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -608,6 +609,13 @@ config SM_CAMCC_8250
Support for the camera clock controller on SM8250 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_CAMCC_8450
+ tristate "SM8450 Camera Clock Controller"
+ select SM_GCC_8450
+ help
+ Support for the camera clock controller on SM8450 devices.
+ Say Y if you want to support camera devices and camera functionality.
+
config SM_DISPCC_6125
tristate "SM6125 Display Clock Controller"
depends on SM_GCC_6125
@@ -618,11 +626,11 @@ config SM_DISPCC_6125
splash screen
config SM_DISPCC_8250
- tristate "SM8150 and SM8250 Display Clock Controller"
- depends on SM_GCC_8150 || SM_GCC_8250
+ tristate "SM8150/SM8250/SM8350 Display Clock Controller"
+ depends on SM_GCC_8150 || SM_GCC_8250 || SM_GCC_8350
help
Support for the display clock controller on Qualcomm Technologies, Inc
- SM8150 and SM8250 devices.
+ SM8150/SM8250/SM8350 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
@@ -712,6 +720,14 @@ config SM_GPUCC_8250
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_GPUCC_8350
+ tristate "SM8350 Graphics Clock Controller"
+ select SM_GCC_8350
+ help
+ Support for the graphics clock controller on SM8350 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_VIDEOCC_8150
tristate "SM8150 Video Clock Controller"
select SM_GCC_8150
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 36789f5233ef..fbcf04073f07 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -11,6 +11,7 @@ clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-y += clk-regmap-phy-mux.o
clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
+obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
@@ -101,6 +103,7 @@ obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
+obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index be3f95326965..27d44188a7ab 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1534,6 +1534,8 @@ static struct clk_branch cam_cc_sys_tmr_clk = {
},
};
+static struct gdsc titan_top_gdsc;
+
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
.pd = {
@@ -1567,6 +1569,7 @@ static struct gdsc ife_0_gdsc = {
.name = "ife_0_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1576,6 +1579,7 @@ static struct gdsc ife_1_gdsc = {
.name = "ife_1_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c
index 439eaafdcc86..9b32c56a5bc5 100644
--- a/drivers/clk/qcom/camcc-sm8250.c
+++ b/drivers/clk/qcom/camcc-sm8250.c
@@ -2205,6 +2205,8 @@ static struct clk_branch cam_cc_sleep_clk = {
},
};
+static struct gdsc titan_top_gdsc;
+
static struct gdsc bps_gdsc = {
.gdscr = 0x7004,
.pd = {
@@ -2238,6 +2240,7 @@ static struct gdsc ife_0_gdsc = {
.name = "ife_0_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2247,6 +2250,7 @@ static struct gdsc ife_1_gdsc = {
.name = "ife_1_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2440,17 +2444,7 @@ static struct platform_driver cam_cc_sm8250_driver = {
},
};
-static int __init cam_cc_sm8250_init(void)
-{
- return platform_driver_register(&cam_cc_sm8250_driver);
-}
-subsys_initcall(cam_cc_sm8250_init);
-
-static void __exit cam_cc_sm8250_exit(void)
-{
- platform_driver_unregister(&cam_cc_sm8250_driver);
-}
-module_exit(cam_cc_sm8250_exit);
+module_platform_driver(cam_cc_sm8250_driver);
MODULE_DESCRIPTION("QTI CAMCC SM8250 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c
new file mode 100644
index 000000000000..e3c09471dadf
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sm8450.c
@@ -0,0 +1,2856 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8450-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_CAM_CC_PLL7_OUT_EVEN,
+ P_CAM_CC_PLL8_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct pll_vco rivian_evo_vco[] = {
+ { 864000000, 1056000000, 0 },
+};
+
+static const struct clk_parent_data pll_parent_data_tcxo = { .index = DT_BI_TCXO };
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x25,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x90008820,
+ .config_ctl_hi_val = 0x00890263,
+ .config_ctl_hi1_val = 0x00000217,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = rivian_evo_vco,
+ .num_vco = ARRAY_SIZE(rivian_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x5000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x5000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x6000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll7_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll7 = {
+ .offset = 0x7000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll7",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll7_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll7_out_even = {
+ .offset = 0x7000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll7_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll7_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll7_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll7.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll8_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll8 = {
+ .offset = 0x8000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll8",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll8_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll8_out_even = {
+ .offset = 0x8000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll8_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll8_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll8_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL8_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll8_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL7_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll7_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_8[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_9_ao[] = {
+ { .index = DT_BI_TCXO_AO, .name = "bi_tcxo_ao" },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x10050,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0x13194,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x1312c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x13148,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x1104c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x150e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x15104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x15124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x1514c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
+ .cmd_rcgr = 0x1516c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi4phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
+ .cmd_rcgr = 0x1518c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi5phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csid_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csid_clk_src = {
+ .cmd_rcgr = 0x13174,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x10018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0x13108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x11018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0x12018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_2_clk_src[] = {
+ F(432000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_2_clk_src = {
+ .cmd_rcgr = 0x12064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0x13000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0x13024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_nps_clk_src[] = {
+ F(364000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(500000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_nps_clk_src = {
+ .cmd_rcgr = 0x1008c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ipe_nps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0x130dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(68571429, P_CAM_CC_PLL2_OUT_MAIN, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x15000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x1501c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x15038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x15054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x15070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ .cmd_rcgr = 0x1508c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk6_clk_src = {
+ .cmd_rcgr = 0x150a8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk7_clk_src = {
+ .cmd_rcgr = 0x150c4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_qdss_debug_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_qdss_debug_clk_src = {
+ .cmd_rcgr = 0x131bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_qdss_debug_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sfe_0_clk_src[] = {
+ F(432000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sfe_0_clk_src = {
+ .cmd_rcgr = 0x13064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_sfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sfe_1_clk_src[] = {
+ F(432000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sfe_1_clk_src = {
+ .cmd_rcgr = 0x130ac,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_sfe_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x13210,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_8,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x10034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0x131f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_9,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_9_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_9_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_gdsc_clk = {
+ .halt_reg = 0x1320c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1320c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_gdsc_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x1004c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1004c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x10068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_fast_ahb_clk = {
+ .halt_reg = 0x10030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0x131ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0x131b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x13144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x13160,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13160,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x131f0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x131f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x13164,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_bps_clk = {
+ .halt_reg = 0x10070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_bps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_fast_ahb_clk = {
+ .halt_reg = 0x1316c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1316c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_0_clk = {
+ .halt_reg = 0x11038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_1_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_2_clk = {
+ .halt_reg = 0x12084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_lite_clk = {
+ .halt_reg = 0x13020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_lite_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ipe_nps_clk = {
+ .halt_reg = 0x100ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ipe_nps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sbi_clk = {
+ .halt_reg = 0x100ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sbi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_0_clk = {
+ .halt_reg = 0x13084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_1_clk = {
+ .halt_reg = 0x130cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x150f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x1511c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1511c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x1513c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1513c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x15164,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi4phytimer_clk = {
+ .halt_reg = 0x15184,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15184,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi4phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi4phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi5phytimer_clk = {
+ .halt_reg = 0x151a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi5phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi5phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_clk = {
+ .halt_reg = 0x1318c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1318c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_csiphy_rx_clk = {
+ .halt_reg = 0x15100,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15100,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_csiphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x150fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x15120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x15140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15140,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x15168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15168,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy4_clk = {
+ .halt_reg = 0x15188,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15188,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy4_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy5_clk = {
+ .halt_reg = 0x151a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy5_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0x13128,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13128,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0x13120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x11030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0x1103c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1103c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_fast_ahb_clk = {
+ .halt_reg = 0x11048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0x12030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_fast_ahb_clk = {
+ .halt_reg = 0x12048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_clk = {
+ .halt_reg = 0x1207c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1207c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_dsp_clk = {
+ .halt_reg = 0x12088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_dsp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_fast_ahb_clk = {
+ .halt_reg = 0x12094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_ahb_clk = {
+ .halt_reg = 0x13048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0x13018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0x13044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0x1303c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1303c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_ahb_clk = {
+ .halt_reg = 0x100c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_clk = {
+ .halt_reg = 0x100a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_fast_ahb_clk = {
+ .halt_reg = 0x100c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_pps_clk = {
+ .halt_reg = 0x100b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_pps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_pps_fast_ahb_clk = {
+ .halt_reg = 0x100c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_pps_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0x130f4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x15018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x15034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x15050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x1506c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1506c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x15088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk5_clk = {
+ .halt_reg = 0x150a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk6_clk = {
+ .halt_reg = 0x150c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk7_clk = {
+ .halt_reg = 0x150dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_qdss_debug_clk = {
+ .halt_reg = 0x131d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_qdss_debug_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_qdss_debug_xo_clk = {
+ .halt_reg = 0x131d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_xo_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sbi_ahb_clk = {
+ .halt_reg = 0x100f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sbi_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sbi_clk = {
+ .halt_reg = 0x100e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sbi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_clk = {
+ .halt_reg = 0x1307c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1307c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
+ .halt_reg = 0x13090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_1_clk = {
+ .halt_reg = 0x130c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_1_fast_ahb_clk = {
+ .halt_reg = 0x130d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sleep_clk = {
+ .halt_reg = 0x13228,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13228,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *cam_cc_sm8450_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_BPS_FAST_AHB_CLK] = &cam_cc_bps_fast_ahb_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPAS_BPS_CLK] = &cam_cc_cpas_bps_clk.clkr,
+ [CAM_CC_CPAS_FAST_AHB_CLK] = &cam_cc_cpas_fast_ahb_clk.clkr,
+ [CAM_CC_CPAS_IFE_0_CLK] = &cam_cc_cpas_ife_0_clk.clkr,
+ [CAM_CC_CPAS_IFE_1_CLK] = &cam_cc_cpas_ife_1_clk.clkr,
+ [CAM_CC_CPAS_IFE_2_CLK] = &cam_cc_cpas_ife_2_clk.clkr,
+ [CAM_CC_CPAS_IFE_LITE_CLK] = &cam_cc_cpas_ife_lite_clk.clkr,
+ [CAM_CC_CPAS_IPE_NPS_CLK] = &cam_cc_cpas_ipe_nps_clk.clkr,
+ [CAM_CC_CPAS_SBI_CLK] = &cam_cc_cpas_sbi_clk.clkr,
+ [CAM_CC_CPAS_SFE_0_CLK] = &cam_cc_cpas_sfe_0_clk.clkr,
+ [CAM_CC_CPAS_SFE_1_CLK] = &cam_cc_cpas_sfe_1_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK] = &cam_cc_csi4phytimer_clk.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK_SRC] = &cam_cc_csi4phytimer_clk_src.clkr,
+ [CAM_CC_CSI5PHYTIMER_CLK] = &cam_cc_csi5phytimer_clk.clkr,
+ [CAM_CC_CSI5PHYTIMER_CLK_SRC] = &cam_cc_csi5phytimer_clk_src.clkr,
+ [CAM_CC_CSID_CLK] = &cam_cc_csid_clk.clkr,
+ [CAM_CC_CSID_CLK_SRC] = &cam_cc_csid_clk_src.clkr,
+ [CAM_CC_CSID_CSIPHY_RX_CLK] = &cam_cc_csid_csiphy_rx_clk.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr,
+ [CAM_CC_CSIPHY5_CLK] = &cam_cc_csiphy5_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_0_FAST_AHB_CLK] = &cam_cc_ife_0_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_1_FAST_AHB_CLK] = &cam_cc_ife_1_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_2_CLK] = &cam_cc_ife_2_clk.clkr,
+ [CAM_CC_IFE_2_CLK_SRC] = &cam_cc_ife_2_clk_src.clkr,
+ [CAM_CC_IFE_2_DSP_CLK] = &cam_cc_ife_2_dsp_clk.clkr,
+ [CAM_CC_IFE_2_FAST_AHB_CLK] = &cam_cc_ife_2_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_AHB_CLK] = &cam_cc_ife_lite_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_NPS_AHB_CLK] = &cam_cc_ipe_nps_ahb_clk.clkr,
+ [CAM_CC_IPE_NPS_CLK] = &cam_cc_ipe_nps_clk.clkr,
+ [CAM_CC_IPE_NPS_CLK_SRC] = &cam_cc_ipe_nps_clk_src.clkr,
+ [CAM_CC_IPE_NPS_FAST_AHB_CLK] = &cam_cc_ipe_nps_fast_ahb_clk.clkr,
+ [CAM_CC_IPE_PPS_CLK] = &cam_cc_ipe_pps_clk.clkr,
+ [CAM_CC_IPE_PPS_FAST_AHB_CLK] = &cam_cc_ipe_pps_fast_ahb_clk.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr,
+ [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr,
+ [CAM_CC_MCLK6_CLK] = &cam_cc_mclk6_clk.clkr,
+ [CAM_CC_MCLK6_CLK_SRC] = &cam_cc_mclk6_clk_src.clkr,
+ [CAM_CC_MCLK7_CLK] = &cam_cc_mclk7_clk.clkr,
+ [CAM_CC_MCLK7_CLK_SRC] = &cam_cc_mclk7_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr,
+ [CAM_CC_PLL7] = &cam_cc_pll7.clkr,
+ [CAM_CC_PLL7_OUT_EVEN] = &cam_cc_pll7_out_even.clkr,
+ [CAM_CC_PLL8] = &cam_cc_pll8.clkr,
+ [CAM_CC_PLL8_OUT_EVEN] = &cam_cc_pll8_out_even.clkr,
+ [CAM_CC_QDSS_DEBUG_CLK] = &cam_cc_qdss_debug_clk.clkr,
+ [CAM_CC_QDSS_DEBUG_CLK_SRC] = &cam_cc_qdss_debug_clk_src.clkr,
+ [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
+ [CAM_CC_SBI_AHB_CLK] = &cam_cc_sbi_ahb_clk.clkr,
+ [CAM_CC_SBI_CLK] = &cam_cc_sbi_clk.clkr,
+ [CAM_CC_SFE_0_CLK] = &cam_cc_sfe_0_clk.clkr,
+ [CAM_CC_SFE_0_CLK_SRC] = &cam_cc_sfe_0_clk_src.clkr,
+ [CAM_CC_SFE_0_FAST_AHB_CLK] = &cam_cc_sfe_0_fast_ahb_clk.clkr,
+ [CAM_CC_SFE_1_CLK] = &cam_cc_sfe_1_clk.clkr,
+ [CAM_CC_SFE_1_CLK_SRC] = &cam_cc_sfe_1_clk_src.clkr,
+ [CAM_CC_SFE_1_FAST_AHB_CLK] = &cam_cc_sfe_1_fast_ahb_clk.clkr,
+ [CAM_CC_SLEEP_CLK] = &cam_cc_sleep_clk.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map cam_cc_sm8450_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x10000 },
+ [CAM_CC_ICP_BCR] = { 0x13104 },
+ [CAM_CC_IFE_0_BCR] = { 0x11000 },
+ [CAM_CC_IFE_1_BCR] = { 0x12000 },
+ [CAM_CC_IFE_2_BCR] = { 0x1204c },
+ [CAM_CC_IPE_0_BCR] = { 0x10074 },
+ [CAM_CC_QDSS_DEBUG_BCR] = { 0x131b8 },
+ [CAM_CC_SBI_BCR] = { 0x100cc },
+ [CAM_CC_SFE_0_BCR] = { 0x1304c },
+ [CAM_CC_SFE_1_BCR] = { 0x13094 },
+};
+
+static const struct regmap_config cam_cc_sm8450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1601c,
+ .fast_io = true,
+};
+
+static struct gdsc titan_top_gdsc;
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x10078,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc sbi_gdsc = {
+ .gdscr = 0x100d0,
+ .pd = {
+ .name = "sbi_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0x11004,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0x12004,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_2_gdsc = {
+ .gdscr = 0x12050,
+ .pd = {
+ .name = "ife_2_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc sfe_0_gdsc = {
+ .gdscr = 0x13050,
+ .pd = {
+ .name = "sfe_0_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc sfe_1_gdsc = {
+ .gdscr = 0x13098,
+ .pd = {
+ .name = "sfe_1_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0x131dc,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc *cam_cc_sm8450_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [SBI_GDSC] = &sbi_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IFE_2_GDSC] = &ife_2_gdsc,
+ [SFE_0_GDSC] = &sfe_0_gdsc,
+ [SFE_1_GDSC] = &sfe_1_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct qcom_cc_desc cam_cc_sm8450_desc = {
+ .config = &cam_cc_sm8450_regmap_config,
+ .clks = cam_cc_sm8450_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sm8450_clocks),
+ .resets = cam_cc_sm8450_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sm8450_resets),
+ .gdscs = cam_cc_sm8450_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sm8450_gdscs),
+};
+
+static const struct of_device_id cam_cc_sm8450_match_table[] = {
+ { .compatible = "qcom,sm8450-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sm8450_match_table);
+
+static int cam_cc_sm8450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sm8450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
+
+ return qcom_cc_really_probe(pdev, &cam_cc_sm8450_desc, regmap);
+}
+
+static struct platform_driver cam_cc_sm8450_driver = {
+ .probe = cam_cc_sm8450_probe,
+ .driver = {
+ .name = "camcc-sm8450",
+ .of_match_table = cam_cc_sm8450_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sm8450_driver);
+
+MODULE_DESCRIPTION("QCOM CAMCC SM8450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 4406cf609aae..b42684703fbb 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -154,6 +154,18 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x30,
[PLL_OFF_TEST_CTL_U1] = 0x34,
},
+ [CLK_ALPHA_PLL_TYPE_RIVIAN_EVO] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x14,
+ [PLL_OFF_USER_CTL_U] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1c,
+ [PLL_OFF_CONFIG_CTL_U] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x24,
+ [PLL_OFF_TEST_CTL] = 0x28,
+ [PLL_OFF_TEST_CTL_U] = 0x2c,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -191,8 +203,10 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define LUCID_5LPE_ENABLE_VOTE_RUN BIT(21)
/* LUCID EVO PLL specific settings and offsets */
+#define LUCID_EVO_PCAL_NOT_DONE BIT(8)
#define LUCID_EVO_ENABLE_VOTE_RUN BIT(25)
#define LUCID_EVO_PLL_L_VAL_MASK GENMASK(15, 0)
+#define LUCID_EVO_PLL_CAL_L_VAL_SHIFT 16
/* ZONDA PLL specific */
#define ZONDA_PLL_OUT_MASK 0xf
@@ -1439,7 +1453,7 @@ const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
/**
- * clk_lucid_pll_configure - configure the lucid pll
+ * clk_trion_pll_configure - configure the trion pll
*
* @pll: clk alpha pll
* @regmap: register map
@@ -1823,7 +1837,7 @@ const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
.round_rate = clk_alpha_pll_round_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_lucid_5lpe_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_5lpe_ops);
const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
.enable = alpha_pll_lucid_5lpe_enable,
@@ -1832,14 +1846,14 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
.recalc_rate = clk_trion_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_fixed_lucid_5lpe_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_5lpe_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
.round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
.set_rate = clk_lucid_5lpe_pll_postdiv_set_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
@@ -1992,7 +2006,33 @@ const struct clk_ops clk_alpha_pll_zonda_ops = {
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_zonda_pll_set_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_zonda_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_zonda_ops);
+
+void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 lval = config->l;
+
+ lval |= TRION_PLL_CAL_VAL << LUCID_EVO_PLL_CAL_L_VAL_SHIFT;
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), lval);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+
+ /* Set operation mode to STANDBY and de-assert the reset */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+EXPORT_SYMBOL_GPL(clk_lucid_evo_pll_configure);
static int alpha_pll_lucid_evo_enable(struct clk_hw *hw)
{
@@ -2079,6 +2119,31 @@ static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
}
+static int alpha_pll_lucid_evo_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct clk_hw *p;
+ u32 val = 0;
+ int ret;
+
+ /* Return early if calibration is not needed. */
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (!(val & LUCID_EVO_PCAL_NOT_DONE))
+ return 0;
+
+ p = clk_hw_get_parent(hw);
+ if (!p)
+ return -EINVAL;
+
+ ret = alpha_pll_lucid_evo_enable(hw);
+ if (ret)
+ return ret;
+
+ alpha_pll_lucid_evo_disable(hw);
+
+ return 0;
+}
+
static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -2114,3 +2179,72 @@ const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops = {
.set_rate = clk_lucid_evo_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_evo_ops);
+
+const struct clk_ops clk_alpha_pll_lucid_evo_ops = {
+ .prepare = alpha_pll_lucid_evo_prepare,
+ .enable = alpha_pll_lucid_evo_enable,
+ .disable = alpha_pll_lucid_evo_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = alpha_pll_lucid_evo_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = alpha_pll_lucid_5lpe_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_evo_ops);
+
+void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ regmap_update_bits(regmap, PLL_MODE(pll),
+ PLL_RESET_N | PLL_BYPASSNL | PLL_OUTCTRL,
+ PLL_RESET_N | PLL_BYPASSNL);
+}
+EXPORT_SYMBOL_GPL(clk_rivian_evo_pll_configure);
+
+static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l;
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+
+ return parent_rate * l;
+}
+
+static long clk_rivian_evo_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ unsigned long min_freq, max_freq;
+ u32 l;
+ u64 a;
+
+ rate = alpha_pll_round_rate(rate, *prate, &l, &a, 0);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
+ return rate;
+
+ min_freq = pll->vco_table[0].min_freq;
+ max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
+
+ return clamp(rate, min_freq, max_freq);
+}
+
+const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
+ .enable = alpha_pll_lucid_5lpe_enable,
+ .disable = alpha_pll_lucid_5lpe_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_rivian_evo_pll_recalc_rate,
+ .round_rate = clk_rivian_evo_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_rivian_evo_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 6e9907deaf30..447efb82fe59 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -18,6 +18,7 @@ enum {
CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_ZONDA,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
+ CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -152,9 +153,14 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops;
extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_postdiv_zonda_ops clk_alpha_pll_postdiv_fabia_ops
+
+extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
+extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
+#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -168,6 +174,9 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
-
+void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
#endif
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
index e847d586a73a..7dd17c184b69 100644
--- a/drivers/clk/qcom/clk-hfpll.c
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -72,13 +72,16 @@ static void __clk_hfpll_enable(struct clk_hw *hw)
regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N);
/* Wait for PLL to lock. */
- if (hd->status_reg) {
- do {
- regmap_read(regmap, hd->status_reg, &val);
- } while (!(val & BIT(hd->lock_bit)));
- } else {
+ if (hd->status_reg)
+ /*
+ * Busy wait. Should never timeout, we add a timeout to
+ * prevent any sort of stall.
+ */
+ regmap_read_poll_timeout(regmap, hd->status_reg, val,
+ !(val & BIT(hd->lock_bit)), 0,
+ 100 * USEC_PER_MSEC);
+ else
udelay(60);
- }
/* Enable PLL output. */
regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL);
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
index 59f1af415b58..45da736bd5f4 100644
--- a/drivers/clk/qcom/clk-krait.c
+++ b/drivers/clk/qcom/clk-krait.c
@@ -18,13 +18,23 @@
static DEFINE_SPINLOCK(krait_clock_reg_lock);
#define LPL_SHIFT 8
+#define SECCLKAGD BIT(4)
+
static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
{
unsigned long flags;
u32 regval;
spin_lock_irqsave(&krait_clock_reg_lock, flags);
+
regval = krait_get_l2_indirect_reg(mux->offset);
+
+ /* apq/ipq8064 Errata: disable sec_src clock gating during switch. */
+ if (mux->disable_sec_src_gating) {
+ regval |= SECCLKAGD;
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ }
+
regval &= ~(mux->mask << mux->shift);
regval |= (sel & mux->mask) << mux->shift;
if (mux->lpl) {
@@ -32,11 +42,22 @@ static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
}
krait_set_l2_indirect_reg(mux->offset, regval);
- spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ /* apq/ipq8064 Errata: re-enabled sec_src clock gating. */
+ if (mux->disable_sec_src_gating) {
+ regval &= ~SECCLKAGD;
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ }
/* Wait for switch to complete. */
mb();
udelay(1);
+
+ /*
+ * Unlock now to make sure the mux register is not
+ * modified while switching to the new parent.
+ */
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
}
static int krait_mux_set_parent(struct clk_hw *hw, u8 index)
diff --git a/drivers/clk/qcom/clk-krait.h b/drivers/clk/qcom/clk-krait.h
index 9120bd2f5297..f930538c539e 100644
--- a/drivers/clk/qcom/clk-krait.h
+++ b/drivers/clk/qcom/clk-krait.h
@@ -15,6 +15,7 @@ struct krait_mux_clk {
u8 safe_sel;
u8 old_index;
bool reparent;
+ bool disable_sec_src_gating;
struct clk_hw hw;
struct notifier_block clk_nb;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8e5dce09d162..28019edd2a50 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -13,6 +13,7 @@
#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/math64.h>
+#include <linux/minmax.h>
#include <linux/slab.h>
#include <asm/div64.h>
@@ -437,7 +438,7 @@ static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- u32 notn_m, n, m, d, not2d, mask, duty_per;
+ u32 notn_m, n, m, d, not2d, mask, duty_per, cfg;
int ret;
/* Duty-cycle cannot be modified for non-MND RCGs */
@@ -448,6 +449,11 @@ static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
+ regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+
+ /* Duty-cycle cannot be modified if MND divider is in bypass mode. */
+ if (!(cfg & CFG_MODE_MASK))
+ return -EINVAL;
n = (~(notn_m) + m) & mask;
@@ -456,9 +462,11 @@ static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
/* Calculate 2d value */
d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
- /* Check bit widths of 2d. If D is too big reduce duty cycle. */
- if (d > mask)
- d = mask;
+ /*
+ * Check bit widths of 2d. If D is too big reduce duty cycle.
+ * Also make sure it is never zero.
+ */
+ d = clamp_val(d, 1, mask);
if ((d / 2) > (n - m))
d = (n - m) * 2;
diff --git a/drivers/clk/qcom/clk-regmap-phy-mux.c b/drivers/clk/qcom/clk-regmap-phy-mux.c
new file mode 100644
index 000000000000..7b7243b7107d
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-phy-mux.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap.h"
+#include "clk-regmap-phy-mux.h"
+
+#define PHY_MUX_MASK GENMASK(1, 0)
+#define PHY_MUX_PHY_SRC 0
+#define PHY_MUX_REF_SRC 2
+
+static inline struct clk_regmap_phy_mux *to_clk_regmap_phy_mux(struct clk_regmap *clkr)
+{
+ return container_of(clkr, struct clk_regmap_phy_mux, clkr);
+}
+
+static int phy_mux_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr);
+ unsigned int val;
+
+ regmap_read(clkr->regmap, phy_mux->reg, &val);
+ val = FIELD_GET(PHY_MUX_MASK, val);
+
+ WARN_ON(val != PHY_MUX_PHY_SRC && val != PHY_MUX_REF_SRC);
+
+ return val == PHY_MUX_PHY_SRC;
+}
+
+static int phy_mux_enable(struct clk_hw *hw)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr);
+
+ return regmap_update_bits(clkr->regmap, phy_mux->reg,
+ PHY_MUX_MASK,
+ FIELD_PREP(PHY_MUX_MASK, PHY_MUX_PHY_SRC));
+}
+
+static void phy_mux_disable(struct clk_hw *hw)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr);
+
+ regmap_update_bits(clkr->regmap, phy_mux->reg,
+ PHY_MUX_MASK,
+ FIELD_PREP(PHY_MUX_MASK, PHY_MUX_REF_SRC));
+}
+
+const struct clk_ops clk_regmap_phy_mux_ops = {
+ .enable = phy_mux_enable,
+ .disable = phy_mux_disable,
+ .is_enabled = phy_mux_is_enabled,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_phy_mux_ops);
diff --git a/drivers/clk/qcom/clk-regmap-phy-mux.h b/drivers/clk/qcom/clk-regmap-phy-mux.h
new file mode 100644
index 000000000000..614dd384695c
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-phy-mux.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_PHY_MUX_H__
+#define __QCOM_CLK_REGMAP_PHY_MUX_H__
+
+#include "clk-regmap.h"
+
+/*
+ * A clock implementation for PHY pipe and symbols clock muxes.
+ *
+ * If the clock is running off the from-PHY source, report it as enabled.
+ * Report it as disabled otherwise (if it uses reference source).
+ *
+ * This way the PHY will disable the pipe clock before turning off the GDSC,
+ * which in turn would lead to disabling corresponding pipe_clk_src (and thus
+ * it being parked to a safe, reference clock source). And vice versa, after
+ * enabling the GDSC the PHY will enable the pipe clock, which would cause
+ * pipe_clk_src to be switched from a safe source to the working one.
+ *
+ * For some platforms this should be used for the UFS symbol_clk_src clocks
+ * too.
+ */
+struct clk_regmap_phy_mux {
+ u32 reg;
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_regmap_phy_mux_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index a18811c38018..747c473b0b5e 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -23,6 +23,14 @@
#define QCOM_RPM_SCALING_ENABLE_ID 0x2
#define QCOM_RPM_XO_MODE_ON 0x2
+static const struct clk_parent_data gcc_pxo[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+};
+
+static const struct clk_parent_data gcc_cxo[] = {
+ { .fw_name = "cxo", .name = "cxo_board" },
+};
+
#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
static struct clk_rpm _platform##_##_active; \
static struct clk_rpm _platform##_##_name = { \
@@ -32,8 +40,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_ops, \
.name = #_name, \
- .parent_names = (const char *[]){ "pxo_board" }, \
- .num_parents = 1, \
+ .parent_data = gcc_pxo, \
+ .num_parents = ARRAY_SIZE(gcc_pxo), \
}, \
}; \
static struct clk_rpm _platform##_##_active = { \
@@ -44,8 +52,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_ops, \
.name = #_active, \
- .parent_names = (const char *[]){ "pxo_board" }, \
- .num_parents = 1, \
+ .parent_data = gcc_pxo, \
+ .num_parents = ARRAY_SIZE(gcc_pxo), \
}, \
}
@@ -56,8 +64,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_xo_ops, \
.name = #_name, \
- .parent_names = (const char *[]){ "cxo_board" }, \
- .num_parents = 1, \
+ .parent_data = gcc_cxo, \
+ .num_parents = ARRAY_SIZE(gcc_cxo), \
}, \
}
@@ -68,8 +76,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_fixed_ops, \
.name = #_name, \
- .parent_names = (const char *[]){ "pxo" }, \
- .num_parents = 1, \
+ .parent_data = gcc_pxo, \
+ .num_parents = ARRAY_SIZE(gcc_pxo), \
}, \
}
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index aed907982344..c07cab6905cb 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -274,6 +274,11 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
cmd.addr = c->res_addr;
cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
+ /*
+ * Send only an active only state request. RPMh continues to
+ * use the active state when we're in sleep/wake state as long
+ * as the sleep/wake state has never been set.
+ */
ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable);
if (ret) {
dev_err(c->dev, "set active state of %s failed: (%d)\n",
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index db9379634fb2..709076f0f9d7 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -43,6 +43,10 @@ static struct pll_vco vco_table[] = {
{ 249600000, 2000000000, 0 },
};
+static struct pll_vco lucid_5lpe_vco[] = {
+ { 249600000, 1750000000, 0 },
+};
+
static struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x47,
.alpha = 0xE000,
@@ -1134,7 +1138,6 @@ static struct gdsc mdss_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.flags = HW_CTRL,
- .supply = "mmcx",
};
static struct clk_regmap *disp_cc_sm8250_clocks[] = {
@@ -1228,6 +1231,7 @@ static const struct of_device_id disp_cc_sm8250_match_table[] = {
{ .compatible = "qcom,sc8180x-dispcc" },
{ .compatible = "qcom,sm8150-dispcc" },
{ .compatible = "qcom,sm8250-dispcc" },
+ { .compatible = "qcom,sm8350-dispcc" },
{ }
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8250_match_table);
@@ -1258,7 +1262,7 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
- /* note: trion == lucid, except for the prepare() op */
+ /* Apply differences for SM8150 and SM8350 */
BUILD_BUG_ON(CLK_ALPHA_PLL_TYPE_TRION != CLK_ALPHA_PLL_TYPE_LUCID);
if (of_device_is_compatible(pdev->dev.of_node, "qcom,sc8180x-dispcc") ||
of_device_is_compatible(pdev->dev.of_node, "qcom,sm8150-dispcc")) {
@@ -1270,6 +1274,62 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
disp_cc_pll1_config.config_ctl_hi1_val = 0x00000024;
disp_cc_pll1_config.user_ctl_hi1_val = 0x000000D0;
disp_cc_pll1_init.ops = &clk_alpha_pll_trion_ops;
+ } else if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8350-dispcc")) {
+ static struct clk_rcg2 * const rcgs[] = {
+ &disp_cc_mdss_byte0_clk_src,
+ &disp_cc_mdss_byte1_clk_src,
+ &disp_cc_mdss_dp_aux1_clk_src,
+ &disp_cc_mdss_dp_aux_clk_src,
+ &disp_cc_mdss_dp_link1_clk_src,
+ &disp_cc_mdss_dp_link_clk_src,
+ &disp_cc_mdss_dp_pixel1_clk_src,
+ &disp_cc_mdss_dp_pixel2_clk_src,
+ &disp_cc_mdss_dp_pixel_clk_src,
+ &disp_cc_mdss_esc0_clk_src,
+ &disp_cc_mdss_mdp_clk_src,
+ &disp_cc_mdss_pclk0_clk_src,
+ &disp_cc_mdss_pclk1_clk_src,
+ &disp_cc_mdss_rot_clk_src,
+ &disp_cc_mdss_vsync_clk_src,
+ };
+ static struct clk_regmap_div * const divs[] = {
+ &disp_cc_mdss_byte0_div_clk_src,
+ &disp_cc_mdss_byte1_div_clk_src,
+ &disp_cc_mdss_dp_link1_div_clk_src,
+ &disp_cc_mdss_dp_link_div_clk_src,
+ };
+ unsigned int i;
+ static bool offset_applied;
+
+ /*
+ * note: trion == lucid, except for the prepare() op
+ * only apply the offsets once (in case of deferred probe)
+ */
+ if (!offset_applied) {
+ for (i = 0; i < ARRAY_SIZE(rcgs); i++)
+ rcgs[i]->cmd_rcgr -= 4;
+
+ for (i = 0; i < ARRAY_SIZE(divs); i++) {
+ divs[i]->reg -= 4;
+ divs[i]->width = 4;
+ }
+
+ disp_cc_mdss_ahb_clk.halt_reg -= 4;
+ disp_cc_mdss_ahb_clk.clkr.enable_reg -= 4;
+
+ offset_applied = true;
+ }
+
+ disp_cc_mdss_ahb_clk_src.cmd_rcgr = 0x22a0;
+
+ disp_cc_pll0_config.config_ctl_hi1_val = 0x2a9a699c;
+ disp_cc_pll0_config.test_ctl_hi1_val = 0x01800000;
+ disp_cc_pll0_init.ops = &clk_alpha_pll_lucid_5lpe_ops;
+ disp_cc_pll0.vco_table = lucid_5lpe_vco;
+ disp_cc_pll1_config.config_ctl_hi1_val = 0x2a9a699c;
+ disp_cc_pll1_config.test_ctl_hi1_val = 0x01800000;
+ disp_cc_pll1_init.ops = &clk_alpha_pll_lucid_5lpe_ops;
+ disp_cc_pll1.vco_table = lucid_5lpe_vco;
}
clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 541016db3c4b..42d185fe19c8 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -22,6 +22,7 @@
#include "clk-alpha-pll.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "gdsc.h"
#include "reset.h"
enum {
@@ -662,6 +663,7 @@ static struct clk_branch gcc_sleep_clk_src = {
},
.num_parents = 1,
.ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
},
},
};
@@ -1788,8 +1790,10 @@ static struct clk_regmap_div nss_port4_tx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_RX, 5, 0, 0),
F(78125000, P_UNIPHY1_RX, 4, 0, 0),
F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_RX, 1, 0, 0),
F(156250000, P_UNIPHY1_RX, 2, 0, 0),
F(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
@@ -1828,8 +1832,10 @@ static struct clk_regmap_div nss_port5_rx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_TX, 5, 0, 0),
F(78125000, P_UNIPHY1_TX, 4, 0, 0),
F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_TX, 1, 0, 0),
F(156250000, P_UNIPHY1_TX, 2, 0, 0),
F(312500000, P_UNIPHY1_TX, 1, 0, 0),
{ }
@@ -1867,8 +1873,10 @@ static struct clk_regmap_div nss_port5_tx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port6_rx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY2_RX, 5, 0, 0),
F(25000000, P_UNIPHY2_RX, 12.5, 0, 0),
F(78125000, P_UNIPHY2_RX, 4, 0, 0),
+ F(125000000, P_UNIPHY2_RX, 1, 0, 0),
F(125000000, P_UNIPHY2_RX, 2.5, 0, 0),
F(156250000, P_UNIPHY2_RX, 2, 0, 0),
F(312500000, P_UNIPHY2_RX, 1, 0, 0),
@@ -1907,8 +1915,10 @@ static struct clk_regmap_div nss_port6_rx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port6_tx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY2_TX, 5, 0, 0),
F(25000000, P_UNIPHY2_TX, 12.5, 0, 0),
F(78125000, P_UNIPHY2_TX, 4, 0, 0),
+ F(125000000, P_UNIPHY2_TX, 1, 0, 0),
F(125000000, P_UNIPHY2_TX, 2.5, 0, 0),
F(156250000, P_UNIPHY2_TX, 2, 0, 0),
F(312500000, P_UNIPHY2_TX, 1, 0, 0),
@@ -3174,6 +3184,24 @@ static struct clk_branch gcc_nss_ptp_ref_clk = {
},
};
+static struct clk_branch gcc_crypto_ppe_clk = {
+ .halt_reg = 0x68310,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x68310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ppe_clk",
+ .parent_names = (const char *[]){
+ "nss_ppe_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_nssnoc_ce_apb_clk = {
.halt_reg = 0x6830c,
.clkr = {
@@ -3346,6 +3374,7 @@ static struct clk_branch gcc_nssnoc_ubi1_ahb_clk = {
static struct clk_branch gcc_ubi0_ahb_clk = {
.halt_reg = 0x6820c,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x6820c,
.enable_mask = BIT(0),
@@ -3363,6 +3392,7 @@ static struct clk_branch gcc_ubi0_ahb_clk = {
static struct clk_branch gcc_ubi0_axi_clk = {
.halt_reg = 0x68200,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68200,
.enable_mask = BIT(0),
@@ -3380,6 +3410,7 @@ static struct clk_branch gcc_ubi0_axi_clk = {
static struct clk_branch gcc_ubi0_nc_axi_clk = {
.halt_reg = 0x68204,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68204,
.enable_mask = BIT(0),
@@ -3397,6 +3428,7 @@ static struct clk_branch gcc_ubi0_nc_axi_clk = {
static struct clk_branch gcc_ubi0_core_clk = {
.halt_reg = 0x68210,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68210,
.enable_mask = BIT(0),
@@ -3414,6 +3446,7 @@ static struct clk_branch gcc_ubi0_core_clk = {
static struct clk_branch gcc_ubi0_mpt_clk = {
.halt_reg = 0x68208,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68208,
.enable_mask = BIT(0),
@@ -3431,6 +3464,7 @@ static struct clk_branch gcc_ubi0_mpt_clk = {
static struct clk_branch gcc_ubi1_ahb_clk = {
.halt_reg = 0x6822c,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x6822c,
.enable_mask = BIT(0),
@@ -3448,6 +3482,7 @@ static struct clk_branch gcc_ubi1_ahb_clk = {
static struct clk_branch gcc_ubi1_axi_clk = {
.halt_reg = 0x68220,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68220,
.enable_mask = BIT(0),
@@ -3465,6 +3500,7 @@ static struct clk_branch gcc_ubi1_axi_clk = {
static struct clk_branch gcc_ubi1_nc_axi_clk = {
.halt_reg = 0x68224,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68224,
.enable_mask = BIT(0),
@@ -3482,6 +3518,7 @@ static struct clk_branch gcc_ubi1_nc_axi_clk = {
static struct clk_branch gcc_ubi1_core_clk = {
.halt_reg = 0x68230,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68230,
.enable_mask = BIT(0),
@@ -3499,6 +3536,7 @@ static struct clk_branch gcc_ubi1_core_clk = {
static struct clk_branch gcc_ubi1_mpt_clk = {
.halt_reg = 0x68228,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68228,
.enable_mask = BIT(0),
@@ -4371,6 +4409,49 @@ static struct clk_branch gcc_pcie0_axi_s_bridge_clk = {
},
};
+static struct gdsc usb0_gdsc = {
+ .gdscr = 0x3e078,
+ .pd = {
+ .name = "usb0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb1_gdsc = {
+ .gdscr = 0x3f078,
+ .pd = {
+ .name = "usb1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static const struct alpha_pll_config ubi32_pll_config = {
+ .l = 0x4e,
+ .config_ctl_val = 0x200d4aa8,
+ .config_ctl_hi_val = 0x3c2,
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = GENMASK(9, 8),
+};
+
+static const struct alpha_pll_config nss_crypto_pll_config = {
+ .l = 0x3e,
+ .alpha = 0x0,
+ .alpha_hi = 0x80,
+ .config_ctl_val = 0x4001055b,
+ .main_output_mask = BIT(0),
+ .pre_div_val = 0x0,
+ .pre_div_mask = GENMASK(14, 12),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(11, 8),
+ .vco_mask = GENMASK(21, 20),
+ .vco_val = 0x0,
+ .alpha_en_mask = BIT(24),
+};
+
static struct clk_hw *gcc_ipq8074_hws[] = {
&gpll0_out_main_div2.hw,
&gpll6_out_main_div2.hw,
@@ -4609,6 +4690,7 @@ static struct clk_regmap *gcc_ipq8074_clks[] = {
[GCC_PCIE0_RCHNG_CLK_SRC] = &pcie0_rchng_clk_src.clkr,
[GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
[GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
+ [GCC_CRYPTO_PPE_CLK] = &gcc_crypto_ppe_clk.clkr,
};
static const struct qcom_reset_map gcc_ipq8074_resets[] = {
@@ -4746,6 +4828,11 @@ static const struct qcom_reset_map gcc_ipq8074_resets[] = {
[GCC_PCIE1_AXI_MASTER_STICKY_ARES] = { 0x76040, 6 },
};
+static struct gdsc *gcc_ipq8074_gdscs[] = {
+ [USB0_GDSC] = &usb0_gdsc,
+ [USB1_GDSC] = &usb1_gdsc,
+};
+
static const struct of_device_id gcc_ipq8074_match_table[] = {
{ .compatible = "qcom,gcc-ipq8074" },
{ }
@@ -4768,11 +4855,26 @@ static const struct qcom_cc_desc gcc_ipq8074_desc = {
.num_resets = ARRAY_SIZE(gcc_ipq8074_resets),
.clk_hws = gcc_ipq8074_hws,
.num_clk_hws = ARRAY_SIZE(gcc_ipq8074_hws),
+ .gdscs = gcc_ipq8074_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_ipq8074_gdscs),
};
static int gcc_ipq8074_probe(struct platform_device *pdev)
{
- return qcom_cc_probe(pdev, &gcc_ipq8074_desc);
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_ipq8074_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* SW Workaround for UBI32 Huayra PLL */
+ regmap_update_bits(regmap, 0x2501c, BIT(26), BIT(26));
+
+ clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+ clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
+ &nss_crypto_pll_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_ipq8074_desc, regmap);
}
static struct platform_driver gcc_ipq8074_driver = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 17e4a5a2a9fd..9a46794f6eb8 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -765,7 +765,20 @@ static struct clk_rcg2 cci_clk_src = {
},
};
+/*
+ * This is a frequency table for "General Purpose" clocks.
+ * These clocks can be muxed to the SoC pins and may be used by
+ * external devices. They're often used as PWM source.
+ *
+ * See comment at ftbl_gcc_gp1_3_clk.
+ */
static const struct freq_tbl ftbl_gcc_camss_gp0_1_clk[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(100000, P_XO, 16, 1, 12),
+ F(500000, P_GPLL0, 16, 1, 100),
+ F(1000000, P_GPLL0, 16, 1, 50),
+ F(2500000, P_GPLL0, 16, 1, 20),
+ F(5000000, P_GPLL0, 16, 1, 10),
F(100000000, P_GPLL0, 8, 0, 0),
F(200000000, P_GPLL0, 4, 0, 0),
{ }
@@ -927,7 +940,29 @@ static struct clk_rcg2 crypto_clk_src = {
},
};
+/*
+ * This is a frequency table for "General Purpose" clocks.
+ * These clocks can be muxed to the SoC pins and may be used by
+ * external devices. They're often used as PWM source.
+ *
+ * Please note that MND divider must be enabled for duty-cycle
+ * control to be possible. (M != N) Also since D register is configured
+ * with a value multiplied by 2, and duty cycle is calculated as
+ * (2 * D) % 2^W
+ * DutyCycle = ----------------
+ * 2 * (N % 2^W)
+ * (where W = .mnd_width)
+ * N must be half or less than maximum value for the register.
+ * Otherwise duty-cycle control would be limited.
+ * (e.g. for 8-bit NMD N should be less than 128)
+ */
static const struct freq_tbl ftbl_gcc_gp1_3_clk[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(100000, P_XO, 16, 1, 12),
+ F(500000, P_GPLL0, 16, 1, 100),
+ F(1000000, P_GPLL0, 16, 1, 50),
+ F(2500000, P_GPLL0, 16, 1, 20),
+ F(5000000, P_GPLL0, 16, 1, 10),
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 39ebb443ae3d..8e2d9fb98ad5 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -632,7 +632,7 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = {
};
static struct clk_rcg2 bimc_ddr_clk_src = {
- .cmd_rcgr = 0x32004,
+ .cmd_rcgr = 0x32024,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_bimc_map,
.clkr.hw.init = &(struct clk_init_data){
@@ -644,6 +644,18 @@ static struct clk_rcg2 bimc_ddr_clk_src = {
},
};
+static struct clk_rcg2 system_mm_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x2600c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6a_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_mm_noc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_camss_ahb_clk[] = {
F(40000000, P_GPLL0, 10, 1, 2),
F(80000000, P_GPLL0, 10, 0, 0),
@@ -1002,7 +1014,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
};
static const struct freq_tbl ftbl_gcc_camss_cci_clk[] = {
- F(19200000, P_XO, 1, 0, 0),
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0, 1, 3, 64),
{ }
};
@@ -1142,6 +1155,9 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
static const struct freq_tbl ftbl_gcc_camss_cpp_clk[] = {
F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
F(320000000, P_GPLL0, 2.5, 0, 0),
F(465000000, P_GPLL2, 2, 0, 0),
{ }
@@ -1290,6 +1306,8 @@ static const struct freq_tbl ftbl_gcc_mdss_mdp_clk[] = {
F(50000000, P_GPLL0_AUX, 16, 0, 0),
F(80000000, P_GPLL0_AUX, 10, 0, 0),
F(100000000, P_GPLL0_AUX, 8, 0, 0),
+ F(145500000, P_GPLL0_AUX, 5.5, 0, 0),
+ F(153600000, P_GPLL0, 4, 0, 0),
F(160000000, P_GPLL0_AUX, 5, 0, 0),
F(177780000, P_GPLL0_AUX, 4.5, 0, 0),
F(200000000, P_GPLL0_AUX, 4, 0, 0),
@@ -1462,7 +1480,9 @@ static struct clk_rcg2 bimc_gpu_clk_src = {
};
static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F(57140000, P_GPLL0, 14, 0, 0),
F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
{ }
};
@@ -1823,9 +1843,9 @@ static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
};
static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
- F(100000000, P_GPLL0, 8, 0, 0),
- F(160000000, P_GPLL0, 5, 0, 0),
- F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
{ }
};
@@ -2441,7 +2461,7 @@ static struct clk_branch gcc_camss_jpeg_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2645,7 +2665,7 @@ static struct clk_branch gcc_camss_vfe_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2801,7 +2821,7 @@ static struct clk_branch gcc_mdss_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3193,7 +3213,7 @@ static struct clk_branch gcc_mdp_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_mdp_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3211,7 +3231,7 @@ static struct clk_branch gcc_venus_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_venus_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3229,7 +3249,7 @@ static struct clk_branch gcc_vfe_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_vfe_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3247,7 +3267,7 @@ static struct clk_branch gcc_jpeg_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_jpeg_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3484,7 +3504,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3623,6 +3643,7 @@ static struct clk_regmap *gcc_msm8939_clocks[] = {
[GPLL2_VOTE] = &gpll2_vote,
[PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
[SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+ [SYSTEM_MM_NOC_BFDCD_CLK_SRC] = &system_mm_noc_bfdcd_clk_src.clkr,
[CAMSS_AHB_CLK_SRC] = &camss_ahb_clk_src.clkr,
[APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
[CSI0_CLK_SRC] = &csi0_clk_src.clkr,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 051745ef99c8..a6e13b91e4c8 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3641,6 +3641,9 @@ static int gcc_msm8960_probe(struct platform_device *pdev)
hfpll_l2.d = &hfpll_l2_8064_data;
}
+ if (of_get_available_child_count(pdev->dev.of_node) != 0)
+ return devm_of_platform_populate(&pdev->dev);
+
tsens = platform_device_register_data(&pdev->dev, "qcom-tsens", -1,
NULL, 0);
if (IS_ERR(tsens))
@@ -3655,7 +3658,8 @@ static int gcc_msm8960_remove(struct platform_device *pdev)
{
struct platform_device *tsens = platform_get_drvdata(pdev);
- platform_device_unregister(tsens);
+ if (tsens)
+ platform_device_unregister(tsens);
return 0;
}
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 6b702cdacbf2..0f52c48e89d8 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -52,7 +52,9 @@ static struct clk_alpha_pll_postdiv gpll0 = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0",
- .parent_names = (const char *[]) { "gpll0_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
},
@@ -81,7 +83,9 @@ static struct clk_alpha_pll_postdiv gpll4 = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4",
- .parent_names = (const char *[]) { "gpll4_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll4_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
},
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 423627d49719..7ff64d4d5920 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -17,6 +17,7 @@
#include "clk-rcg.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "common.h"
#include "gdsc.h"
#include "reset.h"
@@ -255,26 +256,6 @@ static const struct clk_parent_data gcc_parent_data_5[] = {
{ .hw = &gcc_gpll0_out_even.clkr.hw },
};
-static const struct parent_map gcc_parent_map_6[] = {
- { P_PCIE_0_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_6[] = {
- { .fw_name = "pcie_0_pipe_clk", .name = "pcie_0_pipe_clk" },
- { .fw_name = "bi_tcxo" },
-};
-
-static const struct parent_map gcc_parent_map_7[] = {
- { P_PCIE_1_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_7[] = {
- { .fw_name = "pcie_1_pipe_clk", .name = "pcie_1_pipe_clk" },
- { .fw_name = "bi_tcxo" },
-};
-
static const struct parent_map gcc_parent_map_8[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -369,32 +350,32 @@ static const struct clk_parent_data gcc_parent_data_15[] = {
{ .hw = &gcc_mss_gpll0_main_div_clk_src.clkr.hw },
};
-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
.reg = 0x6b054,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_6,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk_src",
- .parent_data = gcc_parent_data_6,
- .num_parents = ARRAY_SIZE(gcc_parent_data_6),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_0_pipe_clk",
+ .name = "pcie_0_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
.reg = 0x8d054,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_7,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk_src",
- .parent_data = gcc_parent_data_7,
- .num_parents = ARRAY_SIZE(gcc_parent_data_7),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_1_pipe_clk",
+ .name = "pcie_1_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index 4b894442fdf5..a2f3ffcc5849 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -20,6 +20,7 @@
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "common.h"
#include "gdsc.h"
#include "reset.h"
@@ -82,11 +83,6 @@ enum {
P_GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC,
P_GCC_USB4_PHY_PIPEGMUX_CLK_SRC,
P_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC,
- P_PCIE_2A_PIPE_CLK,
- P_PCIE_2B_PIPE_CLK,
- P_PCIE_3A_PIPE_CLK,
- P_PCIE_3B_PIPE_CLK,
- P_PCIE_4_PIPE_CLK,
P_QUSB4PHY_1_GCC_USB4_RX0_CLK,
P_QUSB4PHY_1_GCC_USB4_RX1_CLK,
P_QUSB4PHY_GCC_USB4_RX0_CLK,
@@ -351,56 +347,6 @@ static const struct clk_parent_data gcc_parent_data_9[] = {
{ .hw = &gcc_gpll0_out_even.clkr.hw },
};
-static const struct parent_map gcc_parent_map_10[] = {
- { P_PCIE_2A_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_10[] = {
- { .index = DT_PCIE_2A_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_11[] = {
- { P_PCIE_2B_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_11[] = {
- { .index = DT_PCIE_2B_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_12[] = {
- { P_PCIE_3A_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_12[] = {
- { .index = DT_PCIE_3A_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_13[] = {
- { P_PCIE_3B_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_13[] = {
- { .index = DT_PCIE_3B_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_14[] = {
- { P_PCIE_4_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_14[] = {
- { .index = DT_PCIE_4_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
static const struct parent_map gcc_parent_map_15[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -741,77 +687,72 @@ static const struct clk_parent_data gcc_parent_data_41[] = {
{ .index = DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK },
};
-static struct clk_regmap_mux gcc_pcie_2a_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_2a_pipe_clk_src = {
.reg = 0x9d05c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_10,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_2a_pipe_clk_src",
- .parent_data = gcc_parent_data_10,
- .num_parents = ARRAY_SIZE(gcc_parent_data_10),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_2A_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_2b_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_2b_pipe_clk_src = {
.reg = 0x9e05c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_11,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_2b_pipe_clk_src",
- .parent_data = gcc_parent_data_11,
- .num_parents = ARRAY_SIZE(gcc_parent_data_11),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_2B_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_3a_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_3a_pipe_clk_src = {
.reg = 0xa005c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_12,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_3a_pipe_clk_src",
- .parent_data = gcc_parent_data_12,
- .num_parents = ARRAY_SIZE(gcc_parent_data_12),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3A_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_3b_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_3b_pipe_clk_src = {
.reg = 0xa205c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_13,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_3b_pipe_clk_src",
- .parent_data = gcc_parent_data_13,
- .num_parents = ARRAY_SIZE(gcc_parent_data_13),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3B_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_4_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_4_pipe_clk_src = {
.reg = 0x6b05c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_14,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_4_pipe_clk_src",
- .parent_data = gcc_parent_data_14,
- .num_parents = ARRAY_SIZE(gcc_parent_data_14),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_4_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
@@ -6807,58 +6748,79 @@ static struct clk_branch gcc_video_vcodec_throttle_clk = {
static struct gdsc pcie_0_tunnel_gdsc = {
.gdscr = 0xa4004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(0),
.pd = {
.name = "pcie_0_tunnel_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_1_tunnel_gdsc = {
.gdscr = 0x8d004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(1),
.pd = {
.name = "pcie_1_tunnel_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_2a_gdsc = {
.gdscr = 0x9d004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(2),
.pd = {
.name = "pcie_2a_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_2b_gdsc = {
.gdscr = 0x9e004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(3),
.pd = {
.name = "pcie_2b_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_3a_gdsc = {
.gdscr = 0xa0004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(4),
.pd = {
.name = "pcie_3a_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_3b_gdsc = {
.gdscr = 0xa2004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(5),
.pd = {
.name = "pcie_3b_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_4_gdsc = {
.gdscr = 0x6b004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(6),
.pd = {
.name = "pcie_4_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc ufs_card_gdsc = {
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index a4f7fba70393..69412400efa4 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -2558,7 +2558,7 @@ static int gcc_sm6350_probe(struct platform_device *pdev)
if (ret)
return ret;
- return qcom_cc_really_probe(pdev, &gcc_sm6350_desc, regmap);;
+ return qcom_cc_really_probe(pdev, &gcc_sm6350_desc, regmap);
}
static struct platform_driver gcc_sm6350_driver = {
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
index 593a195467ff..666efa5ff978 100644
--- a/drivers/clk/qcom/gcc-sm8450.c
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -17,6 +17,7 @@
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "gdsc.h"
#include "reset.h"
@@ -26,9 +27,7 @@ enum {
P_GCC_GPLL0_OUT_MAIN,
P_GCC_GPLL4_OUT_MAIN,
P_GCC_GPLL9_OUT_MAIN,
- P_PCIE_0_PIPE_CLK,
P_PCIE_1_PHY_AUX_CLK,
- P_PCIE_1_PIPE_CLK,
P_SLEEP_CLK,
P_UFS_PHY_RX_SYMBOL_0_CLK,
P_UFS_PHY_RX_SYMBOL_1_CLK,
@@ -153,16 +152,6 @@ static const struct clk_parent_data gcc_parent_data_3[] = {
{ .fw_name = "bi_tcxo" },
};
-static const struct parent_map gcc_parent_map_4[] = {
- { P_PCIE_0_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_4[] = {
- { .fw_name = "pcie_0_pipe_clk", },
- { .fw_name = "bi_tcxo", },
-};
-
static const struct parent_map gcc_parent_map_5[] = {
{ P_PCIE_1_PHY_AUX_CLK, 0 },
{ P_BI_TCXO, 2 },
@@ -173,16 +162,6 @@ static const struct clk_parent_data gcc_parent_data_5[] = {
{ .fw_name = "bi_tcxo" },
};
-static const struct parent_map gcc_parent_map_6[] = {
- { P_PCIE_1_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_6[] = {
- { .fw_name = "pcie_1_pipe_clk" },
- { .fw_name = "bi_tcxo" },
-};
-
static const struct parent_map gcc_parent_map_7[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -239,17 +218,16 @@ static const struct clk_parent_data gcc_parent_data_11[] = {
{ .fw_name = "bi_tcxo" },
};
-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
.reg = 0x7b060,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk_src",
- .parent_data = gcc_parent_data_4,
- .num_parents = ARRAY_SIZE(gcc_parent_data_4),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_0_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
@@ -269,17 +247,16 @@ static struct clk_regmap_mux gcc_pcie_1_phy_aux_clk_src = {
},
};
-static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
.reg = 0x9d064,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_6,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk_src",
- .parent_data = gcc_parent_data_6,
- .num_parents = ARRAY_SIZE(gcc_parent_data_6),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_1_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 44520efc6c72..d3244006c661 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -132,10 +132,29 @@ static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status)
return -ETIMEDOUT;
}
+static int gdsc_update_collapse_bit(struct gdsc *sc, bool val)
+{
+ u32 reg, mask;
+ int ret;
+
+ if (sc->collapse_mask) {
+ reg = sc->collapse_ctrl;
+ mask = sc->collapse_mask;
+ } else {
+ reg = sc->gdscr;
+ mask = SW_COLLAPSE_MASK;
+ }
+
+ ret = regmap_update_bits(sc->regmap, reg, mask, val ? mask : 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
{
int ret;
- u32 val = (status == GDSC_ON) ? 0 : SW_COLLAPSE_MASK;
if (status == GDSC_ON && sc->rsupply) {
ret = regulator_enable(sc->rsupply);
@@ -143,9 +162,7 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
return ret;
}
- ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
- if (ret)
- return ret;
+ ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF);
/* If disabling votable gdscs, don't poll on status */
if ((sc->flags & VOTABLE) && status == GDSC_OFF) {
@@ -420,13 +437,20 @@ static int gdsc_init(struct gdsc *sc)
return ret;
}
+ /* ...and the power-domain */
+ ret = gdsc_pm_runtime_get(sc);
+ if (ret) {
+ if (sc->rsupply)
+ regulator_disable(sc->rsupply);
+ return ret;
+ }
+
/*
* Votable GDSCs can be ON due to Vote from other masters.
* If a Votable GDSC is ON, make sure we have a Vote.
*/
if (sc->flags & VOTABLE) {
- ret = regmap_update_bits(sc->regmap, sc->gdscr,
- SW_COLLAPSE_MASK, val);
+ ret = gdsc_update_collapse_bit(sc, false);
if (ret)
return ret;
}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index ad313d7210bd..5de48c9439b2 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -18,6 +18,8 @@ struct reset_controller_dev;
* @pd: generic power domain
* @regmap: regmap for MMIO accesses
* @gdscr: gsdc control register
+ * @collapse_ctrl: APCS collapse-vote register
+ * @collapse_mask: APCS collapse-vote mask
* @gds_hw_ctrl: gds_hw_ctrl register
* @cxcs: offsets of branch registers to toggle mem/periph bits in
* @cxc_count: number of @cxcs
@@ -35,6 +37,8 @@ struct gdsc {
struct generic_pm_domain *parent;
struct regmap *regmap;
unsigned int gdscr;
+ unsigned int collapse_ctrl;
+ unsigned int collapse_mask;
unsigned int gds_hw_ctrl;
unsigned int clamp_io_ctrl;
unsigned int *cxcs;
diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c
new file mode 100644
index 000000000000..5367ce654ac9
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm8350.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sm8350.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-divider.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static struct pll_vco lucid_5lpe_vco[] = {
+ { 249600000, 1750000000, 0 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x18,
+ .alpha = 0x6000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static const struct clk_parent_data gpu_cc_parent = {
+ .fw_name = "bi_tcxo",
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_data = &gpu_cc_parent,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1a,
+ .alpha = 0xaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &gpu_cc_parent,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(150000000, P_GPLL0_OUT_MAIN_DIV, 2, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x117c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = {
+ .reg = 0x11c0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_hub_ahb_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = {
+ .reg = 0x11bc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cb_clk = {
+ .halt_reg = 0x1170,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1170,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+ .halt_reg = 0x1088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_qdss_at_clk = {
+ .halt_reg = 0x1080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_qdss_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_qdss_trig_clk = {
+ .halt_reg = 0x1094,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_qdss_trig_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_qdss_tsctr_clk = {
+ .halt_reg = 0x1084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_qdss_tsctr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x120c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x120c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_qdss_tsctr_clk = {
+ .halt_reg = 0x105c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x105c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_qdss_tsctr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x1058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x1178,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1178,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x1204,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm8350_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CB_CLK] = &gpu_cc_cb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_QDSS_AT_CLK] = &gpu_cc_cx_qdss_at_clk.clkr,
+ [GPU_CC_CX_QDSS_TRIG_CLK] = &gpu_cc_cx_qdss_trig_clk.clkr,
+ [GPU_CC_CX_QDSS_TSCTR_CLK] = &gpu_cc_cx_qdss_tsctr_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_QDSS_TSCTR_CLK] = &gpu_cc_gx_qdss_tsctr_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sm8350_resets[] = {
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x1160 },
+ [GPUCC_GPU_CC_CB_BCR] = { 0x116c },
+ [GPUCC_GPU_CC_CX_BCR] = { 0x1068 },
+ [GPUCC_GPU_CC_FAST_HUB_BCR] = { 0x1174 },
+ [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
+ [GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
+ [GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static struct gdsc *gpu_cc_sm8350_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm8350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8030,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm8350_desc = {
+ .config = &gpu_cc_sm8350_regmap_config,
+ .clks = gpu_cc_sm8350_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm8350_clocks),
+ .resets = gpu_cc_sm8350_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm8350_resets),
+ .gdscs = gpu_cc_sm8350_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm8350_gdscs),
+};
+
+static int gpu_cc_sm8350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm8350_desc);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to map gpu cc registers\n");
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sm8350_desc, regmap);
+}
+
+static const struct of_device_id gpu_cc_sm8350_match_table[] = {
+ { .compatible = "qcom,sm8350-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm8350_match_table);
+
+static struct platform_driver gpu_cc_sm8350_driver = {
+ .probe = gpu_cc_sm8350_probe,
+ .driver = {
+ .name = "sm8350-gpucc",
+ .of_match_table = gpu_cc_sm8350_match_table,
+ },
+};
+
+static int __init gpu_cc_sm8350_init(void)
+{
+ return platform_driver_register(&gpu_cc_sm8350_driver);
+}
+subsys_initcall(gpu_cc_sm8350_init);
+
+static void __exit gpu_cc_sm8350_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_sm8350_driver);
+}
+module_exit(gpu_cc_sm8350_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SM8350 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
index 4d4b657d33c3..cfd961d5cc45 100644
--- a/drivers/clk/qcom/krait-cc.c
+++ b/drivers/clk/qcom/krait-cc.c
@@ -139,6 +139,14 @@ krait_add_sec_mux(struct device *dev, int id, const char *s,
mux->hw.init = &init;
mux->safe_sel = 0;
+ /* Checking for qcom,krait-cc-v1 or qcom,krait-cc-v2 is not
+ * enough to limit this to apq/ipq8064. Directly check machine
+ * compatible to correctly handle this errata.
+ */
+ if (of_machine_is_compatible("qcom,ipq8064") ||
+ of_machine_is_compatible("qcom,apq8064"))
+ mux->disable_sec_src_gating = true;
+
init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
if (!init.name)
return -ENOMEM;
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 24843e4f2599..80330dab4d81 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -45,194 +45,14 @@ enum {
P_MMPLL4,
};
-static const struct parent_map mmss_xo_hdmi_map[] = {
- { P_XO, 0 },
- { P_HDMIPLL, 1 }
-};
-
-static const char * const mmss_xo_hdmi[] = {
- "xo",
- "hdmipll"
-};
-
-static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
- { P_XO, 0 },
- { P_DSI0PLL, 1 },
- { P_DSI1PLL, 2 }
-};
-
-static const char * const mmss_xo_dsi0pll_dsi1pll[] = {
- "xo",
- "dsi0pll",
- "dsi1pll"
-};
-
-static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_gpll0_gpll0_div[] = {
- "xo",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_dsibyte_map[] = {
- { P_XO, 0 },
- { P_DSI0PLL_BYTE, 1 },
- { P_DSI1PLL_BYTE, 2 }
-};
-
-static const char * const mmss_xo_dsibyte[] = {
- "xo",
- "dsi0pllbyte",
- "dsi1pllbyte"
-};
-
-static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL1, 2 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll1",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL3, 3 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll3",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL5, 2 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll5",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL4, 3 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll4",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL9, 2 },
- { P_MMPLL2, 3 },
- { P_MMPLL8, 4 },
- { P_GPLL0, 5 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0[] = {
- "xo",
- "mmpll0",
- "mmpll9",
- "mmpll2",
- "mmpll8",
- "gpll0"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL9, 2 },
- { P_MMPLL2, 3 },
- { P_MMPLL8, 4 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll9",
- "mmpll2",
- "mmpll8",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL1, 2 },
- { P_MMPLL4, 3 },
- { P_MMPLL3, 4 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll1",
- "mmpll4",
- "mmpll3",
- "gpll0",
- "gpll0_div"
-};
-
static struct clk_fixed_factor gpll0_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "gpll0_div",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gpll0", .name = "gpll0" },
+ },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
},
@@ -265,7 +85,9 @@ static struct clk_alpha_pll mmpll0_early = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmpll0_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -278,7 +100,9 @@ static struct clk_alpha_pll_postdiv mmpll0 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll0",
- .parent_names = (const char *[]){ "mmpll0_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll0_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -295,7 +119,9 @@ static struct clk_alpha_pll mmpll1_early = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "mmpll1_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
}
@@ -308,7 +134,9 @@ static struct clk_alpha_pll_postdiv mmpll1 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll1",
- .parent_names = (const char *[]){ "mmpll1_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll1_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -322,7 +150,9 @@ static struct clk_alpha_pll mmpll2_early = {
.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll2_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -334,7 +164,9 @@ static struct clk_alpha_pll_postdiv mmpll2 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll2",
- .parent_names = (const char *[]){ "mmpll2_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll2_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -348,7 +180,9 @@ static struct clk_alpha_pll mmpll3_early = {
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll3_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -360,7 +194,9 @@ static struct clk_alpha_pll_postdiv mmpll3 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll3",
- .parent_names = (const char *[]){ "mmpll3_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll3_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -374,7 +210,9 @@ static struct clk_alpha_pll mmpll4_early = {
.num_vco = ARRAY_SIZE(mmpll_t_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll4_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -386,7 +224,9 @@ static struct clk_alpha_pll_postdiv mmpll4 = {
.width = 2,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll4",
- .parent_names = (const char *[]){ "mmpll4_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll4_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -400,7 +240,9 @@ static struct clk_alpha_pll mmpll5_early = {
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll5_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -412,7 +254,9 @@ static struct clk_alpha_pll_postdiv mmpll5 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll5",
- .parent_names = (const char *[]){ "mmpll5_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll5_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -426,7 +270,9 @@ static struct clk_alpha_pll mmpll8_early = {
.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll8_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -438,7 +284,9 @@ static struct clk_alpha_pll_postdiv mmpll8 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll8",
- .parent_names = (const char *[]){ "mmpll8_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll8_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -452,7 +300,9 @@ static struct clk_alpha_pll mmpll9_early = {
.num_vco = ARRAY_SIZE(mmpll_t_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll9_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -464,13 +314,197 @@ static struct clk_alpha_pll_postdiv mmpll9 = {
.width = 2,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll9",
- .parent_names = (const char *[]){ "mmpll9_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll9_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
+static const struct parent_map mmss_xo_hdmi_map[] = {
+ { P_XO, 0 },
+ { P_HDMIPLL, 1 }
+};
+
+static const struct clk_parent_data mmss_xo_hdmi[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "hdmipll", .name = "hdmipll" }
+};
+
+static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 }
+};
+
+static const struct clk_parent_data mmss_xo_dsi0pll_dsi1pll[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" }
+};
+
+static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 2 }
+};
+
+static const struct clk_parent_data mmss_xo_dsibyte[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
+ { .fw_name = "dsi1pllbyte", .name = "dsi1pllbyte" }
+};
+
+static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll1.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL3, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll3.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL5, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll5.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL9, 2 },
+ { P_MMPLL2, 3 },
+ { P_MMPLL8, 4 },
+ { P_GPLL0, 5 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll9.clkr.hw },
+ { .hw = &mmpll2.clkr.hw },
+ { .hw = &mmpll8.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL9, 2 },
+ { P_MMPLL2, 3 },
+ { P_MMPLL8, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll9.clkr.hw },
+ { .hw = &mmpll2.clkr.hw },
+ { .hw = &mmpll8.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_MMPLL4, 3 },
+ { P_MMPLL3, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll1.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll3.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
static const struct freq_tbl ftbl_ahb_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(40000000, P_GPLL0_DIV, 7.5, 0, 0),
@@ -485,8 +519,8 @@ static struct clk_rcg2 ahb_clk_src = {
.freq_tbl = ftbl_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ahb_clk_src",
- .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -509,8 +543,8 @@ static struct clk_rcg2 axi_clk_src = {
.freq_tbl = ftbl_axi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "axi_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -522,8 +556,8 @@ static struct clk_rcg2 maxi_clk_src = {
.freq_tbl = ftbl_axi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "maxi_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -535,8 +569,8 @@ static struct clk_rcg2_gfx3d gfx3d_clk_src = {
.parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
- .num_parents = 6,
+ .parent_data = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0),
.ops = &clk_gfx3d_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -560,8 +594,8 @@ static struct clk_rcg2 rbbmtimer_clk_src = {
.freq_tbl = ftbl_rbbmtimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "rbbmtimer_clk_src",
- .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -572,8 +606,8 @@ static struct clk_rcg2 isense_clk_src = {
.parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "isense_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -591,8 +625,8 @@ static struct clk_rcg2 rbcpr_clk_src = {
.freq_tbl = ftbl_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "rbcpr_clk_src",
- .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -613,8 +647,8 @@ static struct clk_rcg2 video_core_clk_src = {
.freq_tbl = ftbl_video_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_core_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -627,8 +661,8 @@ static struct clk_rcg2 video_subcore0_clk_src = {
.freq_tbl = ftbl_video_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_subcore0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -641,8 +675,8 @@ static struct clk_rcg2 video_subcore1_clk_src = {
.freq_tbl = ftbl_video_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_subcore1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -654,8 +688,8 @@ static struct clk_rcg2 pclk0_clk_src = {
.parent_map = mmss_xo_dsi0pll_dsi1pll_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
- .parent_names = mmss_xo_dsi0pll_dsi1pll,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsi0pll_dsi1pll,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -668,8 +702,8 @@ static struct clk_rcg2 pclk1_clk_src = {
.parent_map = mmss_xo_dsi0pll_dsi1pll_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
- .parent_names = mmss_xo_dsi0pll_dsi1pll,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsi0pll_dsi1pll,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -695,8 +729,8 @@ static struct clk_rcg2 mdp_clk_src = {
.freq_tbl = ftbl_mdp_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -713,8 +747,8 @@ static struct clk_rcg2 extpclk_clk_src = {
.freq_tbl = extpclk_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "extpclk_clk_src",
- .parent_names = mmss_xo_hdmi,
- .num_parents = 2,
+ .parent_data = mmss_xo_hdmi,
+ .num_parents = ARRAY_SIZE(mmss_xo_hdmi),
.ops = &clk_byte_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -732,8 +766,8 @@ static struct clk_rcg2 vsync_clk_src = {
.freq_tbl = ftbl_mdss_vsync_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
- .parent_names = mmss_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .parent_data = mmss_xo_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -750,8 +784,8 @@ static struct clk_rcg2 hdmi_clk_src = {
.freq_tbl = ftbl_mdss_hdmi_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "hdmi_clk_src",
- .parent_names = mmss_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .parent_data = mmss_xo_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -762,8 +796,8 @@ static struct clk_rcg2 byte0_clk_src = {
.parent_map = mmss_xo_dsibyte_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -775,8 +809,8 @@ static struct clk_rcg2 byte1_clk_src = {
.parent_map = mmss_xo_dsibyte_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -794,8 +828,8 @@ static struct clk_rcg2 esc0_clk_src = {
.freq_tbl = ftbl_mdss_esc0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -807,8 +841,8 @@ static struct clk_rcg2 esc1_clk_src = {
.freq_tbl = ftbl_mdss_esc0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -831,8 +865,8 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.freq_tbl = ftbl_camss_gp0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -845,8 +879,8 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.freq_tbl = ftbl_camss_gp0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -873,8 +907,8 @@ static struct clk_rcg2 mclk0_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -887,8 +921,8 @@ static struct clk_rcg2 mclk1_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -901,8 +935,8 @@ static struct clk_rcg2 mclk2_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk2_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -915,8 +949,8 @@ static struct clk_rcg2 mclk3_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk3_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -937,8 +971,8 @@ static struct clk_rcg2 cci_clk_src = {
.freq_tbl = ftbl_cci_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -957,8 +991,8 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.freq_tbl = ftbl_csi0phytimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -970,8 +1004,8 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.freq_tbl = ftbl_csi0phytimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -983,8 +1017,8 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
.freq_tbl = ftbl_csi0phytimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2phytimer_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1004,8 +1038,8 @@ static struct clk_rcg2 csiphy0_3p_clk_src = {
.freq_tbl = ftbl_csiphy0_3p_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy0_3p_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1017,8 +1051,8 @@ static struct clk_rcg2 csiphy1_3p_clk_src = {
.freq_tbl = ftbl_csiphy0_3p_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy1_3p_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1030,8 +1064,8 @@ static struct clk_rcg2 csiphy2_3p_clk_src = {
.freq_tbl = ftbl_csiphy0_3p_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy2_3p_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1053,8 +1087,8 @@ static struct clk_rcg2 jpeg0_clk_src = {
.freq_tbl = ftbl_jpeg0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1075,8 +1109,8 @@ static struct clk_rcg2 jpeg2_clk_src = {
.freq_tbl = ftbl_jpeg2_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg2_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1088,8 +1122,8 @@ static struct clk_rcg2 jpeg_dma_clk_src = {
.freq_tbl = ftbl_jpeg0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg_dma_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1111,8 +1145,8 @@ static struct clk_rcg2 vfe0_clk_src = {
.freq_tbl = ftbl_vfe0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1124,8 +1158,8 @@ static struct clk_rcg2 vfe1_clk_src = {
.freq_tbl = ftbl_vfe0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1146,8 +1180,8 @@ static struct clk_rcg2 cpp_clk_src = {
.freq_tbl = ftbl_cpp_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1168,8 +1202,8 @@ static struct clk_rcg2 csi0_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1181,8 +1215,8 @@ static struct clk_rcg2 csi1_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1194,8 +1228,8 @@ static struct clk_rcg2 csi2_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1207,8 +1241,8 @@ static struct clk_rcg2 csi3_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi3_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1227,8 +1261,8 @@ static struct clk_rcg2 fd_core_clk_src = {
.freq_tbl = ftbl_fd_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "fd_core_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1240,7 +1274,9 @@ static struct clk_branch mmss_mmagic_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmagic_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1255,7 +1291,9 @@ static struct clk_branch mmss_mmagic_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmagic_cfg_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1270,7 +1308,9 @@ static struct clk_branch mmss_misc_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_misc_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1285,7 +1325,9 @@ static struct clk_branch mmss_misc_cxo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_misc_cxo_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -1299,7 +1341,9 @@ static struct clk_branch mmss_mmagic_maxi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmagic_maxi_clk",
- .parent_names = (const char *[]){ "maxi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &maxi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1314,7 +1358,9 @@ static struct clk_branch mmagic_camss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_camss_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1329,7 +1375,9 @@ static struct clk_branch mmagic_camss_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_camss_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1344,7 +1392,9 @@ static struct clk_branch smmu_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_vfe_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1359,7 +1409,9 @@ static struct clk_branch smmu_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_vfe_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1374,7 +1426,9 @@ static struct clk_branch smmu_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_cpp_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1389,7 +1443,9 @@ static struct clk_branch smmu_cpp_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_cpp_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1404,7 +1460,9 @@ static struct clk_branch smmu_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_jpeg_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1419,7 +1477,9 @@ static struct clk_branch smmu_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_jpeg_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1434,7 +1494,9 @@ static struct clk_branch mmagic_mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_mdss_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1449,7 +1511,9 @@ static struct clk_branch mmagic_mdss_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_mdss_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1464,7 +1528,9 @@ static struct clk_branch smmu_rot_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_rot_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1479,7 +1545,9 @@ static struct clk_branch smmu_rot_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_rot_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1494,7 +1562,9 @@ static struct clk_branch smmu_mdp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_mdp_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1509,7 +1579,9 @@ static struct clk_branch smmu_mdp_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_mdp_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1524,7 +1596,9 @@ static struct clk_branch mmagic_video_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_video_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1539,7 +1613,9 @@ static struct clk_branch mmagic_video_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_video_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1554,7 +1630,9 @@ static struct clk_branch smmu_video_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_video_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1569,7 +1647,9 @@ static struct clk_branch smmu_video_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_video_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1584,7 +1664,9 @@ static struct clk_branch mmagic_bimc_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_bimc_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1599,7 +1681,9 @@ static struct clk_branch gpu_gx_gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_gx_gfx3d_clk",
- .parent_names = (const char *[]){ "gfx3d_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.rcg.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1614,7 +1698,9 @@ static struct clk_branch gpu_gx_rbbmtimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_gx_rbbmtimer_clk",
- .parent_names = (const char *[]){ "rbbmtimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &rbbmtimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1629,7 +1715,9 @@ static struct clk_branch gpu_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1644,7 +1732,9 @@ static struct clk_branch gpu_aon_isense_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_aon_isense_clk",
- .parent_names = (const char *[]){ "isense_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &isense_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1659,7 +1749,9 @@ static struct clk_branch vmem_maxi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vmem_maxi_clk",
- .parent_names = (const char *[]){ "maxi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &maxi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1674,7 +1766,9 @@ static struct clk_branch vmem_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vmem_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1689,7 +1783,9 @@ static struct clk_branch mmss_rbcpr_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_rbcpr_clk",
- .parent_names = (const char *[]){ "rbcpr_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &rbcpr_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1704,7 +1800,9 @@ static struct clk_branch mmss_rbcpr_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_rbcpr_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1719,7 +1817,9 @@ static struct clk_branch video_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_core_clk",
- .parent_names = (const char *[]){ "video_core_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &video_core_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1734,7 +1834,9 @@ static struct clk_branch video_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1749,7 +1851,9 @@ static struct clk_branch video_maxi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_maxi_clk",
- .parent_names = (const char *[]){ "maxi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &maxi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1764,7 +1868,9 @@ static struct clk_branch video_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1779,7 +1885,9 @@ static struct clk_branch video_subcore0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_subcore0_clk",
- .parent_names = (const char *[]){ "video_subcore0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &video_subcore0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1794,7 +1902,9 @@ static struct clk_branch video_subcore1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_subcore1_clk",
- .parent_names = (const char *[]){ "video_subcore1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &video_subcore1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1809,7 +1919,9 @@ static struct clk_branch mdss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1824,7 +1936,9 @@ static struct clk_branch mdss_hdmi_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_hdmi_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1839,7 +1953,9 @@ static struct clk_branch mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1854,7 +1970,9 @@ static struct clk_branch mdss_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_pclk0_clk",
- .parent_names = (const char *[]){ "pclk0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1869,7 +1987,9 @@ static struct clk_branch mdss_pclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_pclk1_clk",
- .parent_names = (const char *[]){ "pclk1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1884,7 +2004,9 @@ static struct clk_branch mdss_mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_mdp_clk",
- .parent_names = (const char *[]){ "mdp_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1899,7 +2021,9 @@ static struct clk_branch mdss_extpclk_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_extpclk_clk",
- .parent_names = (const char *[]){ "extpclk_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &extpclk_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1914,7 +2038,9 @@ static struct clk_branch mdss_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_vsync_clk",
- .parent_names = (const char *[]){ "vsync_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vsync_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1929,7 +2055,9 @@ static struct clk_branch mdss_hdmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_hdmi_clk",
- .parent_names = (const char *[]){ "hdmi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &hdmi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1944,7 +2072,9 @@ static struct clk_branch mdss_byte0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_byte0_clk",
- .parent_names = (const char *[]){ "byte0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &byte0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1959,7 +2089,9 @@ static struct clk_branch mdss_byte1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_byte1_clk",
- .parent_names = (const char *[]){ "byte1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1974,7 +2106,9 @@ static struct clk_branch mdss_esc0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_esc0_clk",
- .parent_names = (const char *[]){ "esc0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &esc0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1989,7 +2123,9 @@ static struct clk_branch mdss_esc1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_esc1_clk",
- .parent_names = (const char *[]){ "esc1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2004,7 +2140,9 @@ static struct clk_branch camss_top_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_top_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2019,7 +2157,9 @@ static struct clk_branch camss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2034,7 +2174,9 @@ static struct clk_branch camss_micro_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_micro_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2049,7 +2191,9 @@ static struct clk_branch camss_gp0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk",
- .parent_names = (const char *[]){ "camss_gp0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2064,7 +2208,9 @@ static struct clk_branch camss_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk",
- .parent_names = (const char *[]){ "camss_gp1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2079,7 +2225,9 @@ static struct clk_branch camss_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk0_clk",
- .parent_names = (const char *[]){ "mclk0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2094,7 +2242,9 @@ static struct clk_branch camss_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk1_clk",
- .parent_names = (const char *[]){ "mclk1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2109,7 +2259,9 @@ static struct clk_branch camss_mclk2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk2_clk",
- .parent_names = (const char *[]){ "mclk2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2124,7 +2276,9 @@ static struct clk_branch camss_mclk3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk3_clk",
- .parent_names = (const char *[]){ "mclk3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2139,7 +2293,9 @@ static struct clk_branch camss_cci_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cci_clk",
- .parent_names = (const char *[]){ "cci_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &cci_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2154,7 +2310,9 @@ static struct clk_branch camss_cci_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cci_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2169,7 +2327,9 @@ static struct clk_branch camss_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0phytimer_clk",
- .parent_names = (const char *[]){ "csi0phytimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0phytimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2184,7 +2344,9 @@ static struct clk_branch camss_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1phytimer_clk",
- .parent_names = (const char *[]){ "csi1phytimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1phytimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2199,7 +2361,9 @@ static struct clk_branch camss_csi2phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2phytimer_clk",
- .parent_names = (const char *[]){ "csi2phytimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2phytimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2214,7 +2378,9 @@ static struct clk_branch camss_csiphy0_3p_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csiphy0_3p_clk",
- .parent_names = (const char *[]){ "csiphy0_3p_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphy0_3p_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2229,7 +2395,9 @@ static struct clk_branch camss_csiphy1_3p_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csiphy1_3p_clk",
- .parent_names = (const char *[]){ "csiphy1_3p_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphy1_3p_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2244,7 +2412,9 @@ static struct clk_branch camss_csiphy2_3p_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csiphy2_3p_clk",
- .parent_names = (const char *[]){ "csiphy2_3p_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphy2_3p_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2259,7 +2429,9 @@ static struct clk_branch camss_jpeg0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg0_clk",
- .parent_names = (const char *[]){ "jpeg0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2274,7 +2446,9 @@ static struct clk_branch camss_jpeg2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg2_clk",
- .parent_names = (const char *[]){ "jpeg2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2289,7 +2463,9 @@ static struct clk_branch camss_jpeg_dma_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_dma_clk",
- .parent_names = (const char *[]){ "jpeg_dma_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg_dma_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2304,7 +2480,9 @@ static struct clk_branch camss_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2319,7 +2497,9 @@ static struct clk_branch camss_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2334,7 +2514,9 @@ static struct clk_branch camss_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2349,7 +2531,9 @@ static struct clk_branch camss_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2364,7 +2548,9 @@ static struct clk_branch camss_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe0_clk",
- .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2379,7 +2565,9 @@ static struct clk_branch camss_vfe0_stream_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe0_stream_clk",
- .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2394,7 +2582,9 @@ static struct clk_branch camss_vfe0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe0_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2409,7 +2599,9 @@ static struct clk_branch camss_vfe1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe1_clk",
- .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2424,7 +2616,9 @@ static struct clk_branch camss_vfe1_stream_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe1_stream_clk",
- .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2439,7 +2633,9 @@ static struct clk_branch camss_vfe1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe1_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2454,7 +2650,9 @@ static struct clk_branch camss_csi_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi_vfe0_clk",
- .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2469,7 +2667,9 @@ static struct clk_branch camss_csi_vfe1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi_vfe1_clk",
- .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2484,7 +2684,9 @@ static struct clk_branch camss_cpp_vbif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_vbif_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2499,7 +2701,9 @@ static struct clk_branch camss_cpp_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2514,7 +2718,9 @@ static struct clk_branch camss_cpp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_clk",
- .parent_names = (const char *[]){ "cpp_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &cpp_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2529,7 +2735,9 @@ static struct clk_branch camss_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2544,7 +2752,9 @@ static struct clk_branch camss_csi0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2559,7 +2769,9 @@ static struct clk_branch camss_csi0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2574,7 +2786,9 @@ static struct clk_branch camss_csi0phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0phy_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2589,7 +2803,9 @@ static struct clk_branch camss_csi0rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0rdi_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2604,7 +2820,9 @@ static struct clk_branch camss_csi0pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0pix_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2619,7 +2837,9 @@ static struct clk_branch camss_csi1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2634,7 +2854,9 @@ static struct clk_branch camss_csi1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2649,7 +2871,9 @@ static struct clk_branch camss_csi1phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1phy_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2664,7 +2888,9 @@ static struct clk_branch camss_csi1rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1rdi_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2679,7 +2905,9 @@ static struct clk_branch camss_csi1pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1pix_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2694,7 +2922,9 @@ static struct clk_branch camss_csi2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2709,7 +2939,9 @@ static struct clk_branch camss_csi2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2724,7 +2956,9 @@ static struct clk_branch camss_csi2phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2phy_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2739,7 +2973,9 @@ static struct clk_branch camss_csi2rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2rdi_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2754,7 +2990,9 @@ static struct clk_branch camss_csi2pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2pix_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2769,7 +3007,9 @@ static struct clk_branch camss_csi3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2784,7 +3024,9 @@ static struct clk_branch camss_csi3_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2799,7 +3041,9 @@ static struct clk_branch camss_csi3phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3phy_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2814,7 +3058,9 @@ static struct clk_branch camss_csi3rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3rdi_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2829,7 +3075,9 @@ static struct clk_branch camss_csi3pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3pix_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2844,7 +3092,9 @@ static struct clk_branch camss_ispif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_ispif_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2859,7 +3109,9 @@ static struct clk_branch fd_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "fd_core_clk",
- .parent_names = (const char *[]){ "fd_core_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &fd_core_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2874,7 +3126,9 @@ static struct clk_branch fd_core_uar_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "fd_core_uar_clk",
- .parent_names = (const char *[]){ "fd_core_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &fd_core_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2889,7 +3143,9 @@ static struct clk_branch fd_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "fd_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
index 8617454e4a77..f28f2cb051d7 100644
--- a/drivers/clk/qcom/videocc-sm8250.c
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -277,7 +277,6 @@ static struct gdsc mvs0c_gdsc = {
},
.flags = 0,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct gdsc mvs1c_gdsc = {
@@ -287,7 +286,6 @@ static struct gdsc mvs1c_gdsc = {
},
.flags = 0,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct gdsc mvs0_gdsc = {
@@ -297,7 +295,6 @@ static struct gdsc mvs0_gdsc = {
},
.flags = HW_CTRL,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct gdsc mvs1_gdsc = {
@@ -307,7 +304,6 @@ static struct gdsc mvs1_gdsc = {
},
.flags = HW_CTRL,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct clk_regmap *video_cc_sm8250_clocks[] = {
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index cfed11c659d9..f45c2c45808b 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -18,7 +18,6 @@
struct r8a73a4_cpg {
struct clk_onecell_data data;
spinlock_t lock;
- void __iomem *reg;
};
#define CPG_CKSCR 0xc0
@@ -59,7 +58,7 @@ static const struct clk_div_table div4_div_table[] = {
static struct clk * __init
r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
- const char *name)
+ void __iomem *base, const char *name)
{
const struct clk_div_table *table = NULL;
const char *parent_name;
@@ -69,7 +68,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
if (!strcmp(name, "main")) {
- u32 ckscr = readl(cpg->reg + CPG_CKSCR);
+ u32 ckscr = readl(base + CPG_CKSCR);
switch ((ckscr >> 28) & 3) {
case 0: /* extal1 */
@@ -93,14 +92,14 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = readl(cpg->reg + CPG_PLL0CR);
+ u32 value = readl(base + CPG_PLL0CR);
parent_name = "main";
mult = ((value >> 24) & 0x7f) + 1;
if (value & BIT(20))
div = 2;
} else if (!strcmp(name, "pll1")) {
- u32 value = readl(cpg->reg + CPG_PLL1CR);
+ u32 value = readl(base + CPG_PLL1CR);
parent_name = "main";
/* XXX: enable bit? */
@@ -123,7 +122,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
default:
return ERR_PTR(-EINVAL);
}
- value = readl(cpg->reg + cr);
+ value = readl(base + cr);
switch ((value >> 5) & 7) {
case 0:
parent_name = "main";
@@ -159,7 +158,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
shift = 0;
}
div *= 32;
- mult = 0x20 - ((readl(cpg->reg + CPG_FRQCRC) >> shift) & 0x1f);
+ mult = 0x20 - ((readl(base + CPG_FRQCRC) >> shift) & 0x1f);
} else {
struct div4_clk *c;
@@ -181,7 +180,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
mult, div);
} else {
return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + reg, shift, 4, 0,
+ base + reg, shift, 4, 0,
table, &cpg->lock);
}
}
@@ -189,6 +188,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
{
struct r8a73a4_cpg *cpg;
+ void __iomem *base;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -213,8 +213,8 @@ static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
+ base = of_iomap(np, 0);
+ if (WARN_ON(base == NULL))
return;
for (i = 0; i < num_clks; ++i) {
@@ -224,7 +224,7 @@ static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a73a4_cpg_register_clock(np, cpg, name);
+ clk = r8a73a4_cpg_register_clock(np, cpg, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index d8190f007a81..3ee3f57e4e9a 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -18,7 +18,6 @@
struct r8a7740_cpg {
struct clk_onecell_data data;
spinlock_t lock;
- void __iomem *reg;
};
#define CPG_FRQCRA 0x00
@@ -61,7 +60,7 @@ static u32 cpg_mode __initdata;
static struct clk * __init
r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
- const char *name)
+ void __iomem *base, const char *name)
{
const struct clk_div_table *table = NULL;
const char *parent_name;
@@ -96,20 +95,20 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = readl(cpg->reg + CPG_FRQCRC);
+ u32 value = readl(base + CPG_FRQCRC);
parent_name = "system";
mult = ((value >> 24) & 0x7f) + 1;
} else if (!strcmp(name, "pllc1")) {
- u32 value = readl(cpg->reg + CPG_FRQCRA);
+ u32 value = readl(base + CPG_FRQCRA);
parent_name = "system";
mult = ((value >> 24) & 0x7f) + 1;
div = 2;
} else if (!strcmp(name, "pllc2")) {
- u32 value = readl(cpg->reg + CPG_PLLC2CR);
+ u32 value = readl(base + CPG_PLLC2CR);
parent_name = "system";
mult = ((value >> 24) & 0x3f) + 1;
} else if (!strcmp(name, "usb24s")) {
- u32 value = readl(cpg->reg + CPG_USBCKCR);
+ u32 value = readl(base + CPG_USBCKCR);
if (value & BIT(7))
/* extal2 */
parent_name = of_clk_get_parent_name(np, 1);
@@ -137,7 +136,7 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
mult, div);
} else {
return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + reg, shift, 4, 0,
+ base + reg, shift, 4, 0,
table, &cpg->lock);
}
}
@@ -145,6 +144,7 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
static void __init r8a7740_cpg_clocks_init(struct device_node *np)
{
struct r8a7740_cpg *cpg;
+ void __iomem *base;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -172,8 +172,8 @@ static void __init r8a7740_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
+ base = of_iomap(np, 0);
+ if (WARN_ON(base == NULL))
return;
for (i = 0; i < num_clks; ++i) {
@@ -183,7 +183,7 @@ static void __init r8a7740_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a7740_cpg_register_clock(np, cpg, name);
+ clk = r8a7740_cpg_register_clock(np, cpg, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c
index 3ccc53685bdd..797556259370 100644
--- a/drivers/clk/renesas/clk-r8a7778.c
+++ b/drivers/clk/renesas/clk-r8a7778.c
@@ -11,12 +11,6 @@
#include <linux/slab.h>
#include <linux/soc/renesas/rcar-rst.h>
-struct r8a7778_cpg {
- struct clk_onecell_data data;
- spinlock_t lock;
- void __iomem *reg;
-};
-
/* PLL multipliers per bits 11, 12, and 18 of MODEMR */
static const struct {
unsigned long plla_mult;
@@ -47,8 +41,7 @@ static u32 cpg_mode_rates __initdata;
static u32 cpg_mode_divs __initdata;
static struct clk * __init
-r8a7778_cpg_register_clock(struct device_node *np, struct r8a7778_cpg *cpg,
- const char *name)
+r8a7778_cpg_register_clock(struct device_node *np, const char *name)
{
if (!strcmp(name, "plla")) {
return clk_register_fixed_factor(NULL, "plla",
@@ -77,7 +70,7 @@ r8a7778_cpg_register_clock(struct device_node *np, struct r8a7778_cpg *cpg,
static void __init r8a7778_cpg_clocks_init(struct device_node *np)
{
- struct r8a7778_cpg *cpg;
+ struct clk_onecell_data *data;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -100,23 +93,17 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
return;
}
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
- if (cpg == NULL || clks == NULL) {
+ if (data == NULL || clks == NULL) {
/* We're leaking memory on purpose, there's no point in cleaning
* up as the system won't boot anyway.
*/
return;
}
- spin_lock_init(&cpg->lock);
-
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
-
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
- return;
+ data->clks = clks;
+ data->clk_num = num_clks;
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -125,15 +112,15 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a7778_cpg_register_clock(np, cpg, name);
+ clk = r8a7778_cpg_register_clock(np, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
else
- cpg->data.clks[i] = clk;
+ data->clks[i] = clk;
}
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+ of_clk_add_provider(np, of_clk_src_onecell_get, data);
cpg_mstp_add_clk_domain(np);
}
diff --git a/drivers/clk/renesas/clk-r8a7779.c b/drivers/clk/renesas/clk-r8a7779.c
index 9f3b5522eef5..9a2fea8cf4d7 100644
--- a/drivers/clk/renesas/clk-r8a7779.c
+++ b/drivers/clk/renesas/clk-r8a7779.c
@@ -21,12 +21,6 @@
#define CPG_NUM_CLOCKS (R8A7779_CLK_OUT + 1)
-struct r8a7779_cpg {
- struct clk_onecell_data data;
- spinlock_t lock;
- void __iomem *reg;
-};
-
/* -----------------------------------------------------------------------------
* CPG Clock Data
*/
@@ -87,7 +81,7 @@ static const unsigned int cpg_plla_mult[4] __initconst = { 42, 48, 56, 64 };
*/
static struct clk * __init
-r8a7779_cpg_register_clock(struct device_node *np, struct r8a7779_cpg *cpg,
+r8a7779_cpg_register_clock(struct device_node *np,
const struct cpg_clk_config *config,
unsigned int plla_mult, const char *name)
{
@@ -119,7 +113,7 @@ r8a7779_cpg_register_clock(struct device_node *np, struct r8a7779_cpg *cpg,
static void __init r8a7779_cpg_clocks_init(struct device_node *np)
{
const struct cpg_clk_config *config;
- struct r8a7779_cpg *cpg;
+ struct clk_onecell_data *data;
struct clk **clks;
unsigned int i, plla_mult;
int num_clks;
@@ -134,19 +128,17 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
return;
}
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
clks = kcalloc(CPG_NUM_CLOCKS, sizeof(*clks), GFP_KERNEL);
- if (cpg == NULL || clks == NULL) {
+ if (data == NULL || clks == NULL) {
/* We're leaking memory on purpose, there's no point in cleaning
* up as the system won't boot anyway.
*/
return;
}
- spin_lock_init(&cpg->lock);
-
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
+ data->clks = clks;
+ data->clk_num = num_clks;
config = &cpg_clk_configs[CPG_CLK_CONFIG_INDEX(mode)];
plla_mult = cpg_plla_mult[CPG_PLLA_MULT_INDEX(mode)];
@@ -158,16 +150,15 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a7779_cpg_register_clock(np, cpg, config,
- plla_mult, name);
+ clk = r8a7779_cpg_register_clock(np, config, plla_mult, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
else
- cpg->data.clks[i] = clk;
+ data->clks[i] = clk;
}
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+ of_clk_add_provider(np, of_clk_src_onecell_get, data);
cpg_mstp_add_clk_domain(np);
}
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 7b703f14e20b..e770f09a27ed 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -15,11 +15,6 @@
#include <linux/of_address.h>
#include <linux/slab.h>
-struct rz_cpg {
- struct clk_onecell_data data;
- void __iomem *reg;
-};
-
#define CPG_FRQCR 0x10
#define CPG_FRQCR2 0x14
@@ -49,7 +44,8 @@ static u16 __init rz_cpg_read_mode_pins(void)
}
static struct clk * __init
-rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *name)
+rz_cpg_register_clock(struct device_node *np, void __iomem *base,
+ const char *name)
{
u32 val;
unsigned mult;
@@ -65,7 +61,7 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
}
/* If mapping regs failed, skip non-pll clocks. System will boot anyhow */
- if (!cpg->reg)
+ if (!base)
return ERR_PTR(-ENXIO);
/* FIXME:"i" and "g" are variable clocks with non-integer dividers (e.g. 2/3)
@@ -73,9 +69,9 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
* let them run at fixed current speed and implement the details later.
*/
if (strcmp(name, "i") == 0)
- val = (readl(cpg->reg + CPG_FRQCR) >> 8) & 3;
+ val = (readl(base + CPG_FRQCR) >> 8) & 3;
else if (strcmp(name, "g") == 0)
- val = readl(cpg->reg + CPG_FRQCR2) & 3;
+ val = readl(base + CPG_FRQCR2) & 3;
else
return ERR_PTR(-EINVAL);
@@ -85,8 +81,9 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
static void __init rz_cpg_clocks_init(struct device_node *np)
{
- struct rz_cpg *cpg;
+ struct clk_onecell_data *data;
struct clk **clks;
+ void __iomem *base;
unsigned i;
int num_clks;
@@ -94,14 +91,14 @@ static void __init rz_cpg_clocks_init(struct device_node *np)
if (WARN(num_clks <= 0, "can't count CPG clocks\n"))
return;
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
- BUG_ON(!cpg || !clks);
+ BUG_ON(!data || !clks);
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
+ data->clks = clks;
+ data->clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
+ base = of_iomap(np, 0);
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -109,15 +106,15 @@ static void __init rz_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i, &name);
- clk = rz_cpg_register_clock(np, cpg, name);
+ clk = rz_cpg_register_clock(np, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
else
- cpg->data.clks[i] = clk;
+ data->clks[i] = clk;
}
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+ of_clk_add_provider(np, of_clk_src_onecell_get, data);
cpg_mstp_add_clk_domain(np);
}
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index 4146c1d717b9..8c51090f13e1 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -18,7 +18,6 @@
struct sh73a0_cpg {
struct clk_onecell_data data;
spinlock_t lock;
- void __iomem *reg;
};
#define CPG_FRQCRA 0x00
@@ -73,7 +72,7 @@ static const struct clk_div_table z_div_table[] = {
static struct clk * __init
sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
- const char *name)
+ void __iomem *base, const char *name)
{
const struct clk_div_table *table = NULL;
unsigned int shift, reg, width;
@@ -83,12 +82,12 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
if (!strcmp(name, "main")) {
/* extal1, extal1_div2, extal2, extal2_div2 */
- u32 parent_idx = (readl(cpg->reg + CPG_CKSCR) >> 28) & 3;
+ u32 parent_idx = (readl(base + CPG_CKSCR) >> 28) & 3;
parent_name = of_clk_get_parent_name(np, parent_idx >> 1);
div = (parent_idx & 1) + 1;
} else if (!strncmp(name, "pll", 3)) {
- void __iomem *enable_reg = cpg->reg;
+ void __iomem *enable_reg = base;
u32 enable_bit = name[3] - '0';
parent_name = "main";
@@ -108,7 +107,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
default:
return ERR_PTR(-EINVAL);
}
- if (readl(cpg->reg + CPG_PLLECR) & BIT(enable_bit)) {
+ if (readl(base + CPG_PLLECR) & BIT(enable_bit)) {
mult = ((readl(enable_reg) >> 24) & 0x3f) + 1;
/* handle CFG bit for PLL1 and PLL2 */
if (enable_bit == 1 || enable_bit == 2)
@@ -117,7 +116,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
}
} else if (!strcmp(name, "dsi0phy") || !strcmp(name, "dsi1phy")) {
u32 phy_no = name[3] - '0';
- void __iomem *dsi_reg = cpg->reg +
+ void __iomem *dsi_reg = base +
(phy_no ? CPG_DSI1PHYCR : CPG_DSI0PHYCR);
parent_name = phy_no ? "dsi1pck" : "dsi0pck";
@@ -154,7 +153,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
mult, div);
} else {
return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + reg, shift, width, 0,
+ base + reg, shift, width, 0,
table, &cpg->lock);
}
}
@@ -162,6 +161,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
static void __init sh73a0_cpg_clocks_init(struct device_node *np)
{
struct sh73a0_cpg *cpg;
+ void __iomem *base;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -186,14 +186,14 @@ static void __init sh73a0_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
+ base = of_iomap(np, 0);
+ if (WARN_ON(base == NULL))
return;
/* Set SDHI clocks to a known state */
- writel(0x108, cpg->reg + CPG_SD0CKCR);
- writel(0x108, cpg->reg + CPG_SD1CKCR);
- writel(0x108, cpg->reg + CPG_SD2CKCR);
+ writel(0x108, base + CPG_SD0CKCR);
+ writel(0x108, base + CPG_SD1CKCR);
+ writel(0x108, base + CPG_SD2CKCR);
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -202,7 +202,7 @@ static void __init sh73a0_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = sh73a0_cpg_register_clock(np, cpg, name);
+ clk = sh73a0_cpg_register_clock(np, cpg, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
index c17ebe6b5992..cd80b6084ece 100644
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -77,6 +77,8 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
/* Core Clock Outputs */
+ DEF_GEN4_Z("z0", R8A779F0_CLK_Z0, CLK_TYPE_GEN4_Z, CLK_PLL2, 2, 0),
+ DEF_GEN4_Z("z1", R8A779F0_CLK_Z1, CLK_TYPE_GEN4_Z, CLK_PLL2, 2, 8),
DEF_FIXED("s0d2", R8A779F0_CLK_S0D2, CLK_S0, 2, 1),
DEF_FIXED("s0d3", R8A779F0_CLK_S0D3, CLK_S0, 3, 1),
DEF_FIXED("s0d4", R8A779F0_CLK_S0D4, CLK_S0, 4, 1),
@@ -118,20 +120,28 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+ DEF_MOD("hscif0", 514, R8A779F0_CLK_S0D3),
+ DEF_MOD("hscif1", 515, R8A779F0_CLK_S0D3),
+ DEF_MOD("hscif2", 516, R8A779F0_CLK_S0D3),
+ DEF_MOD("hscif3", 517, R8A779F0_CLK_S0D3),
DEF_MOD("i2c0", 518, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c1", 519, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c2", 520, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c3", 521, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c4", 522, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c5", 523, R8A779F0_CLK_S0D6_PER),
+ DEF_MOD("pcie0", 624, R8A779F0_CLK_S0D2),
+ DEF_MOD("pcie1", 625, R8A779F0_CLK_S0D2),
DEF_MOD("scif0", 702, R8A779F0_CLK_S0D12_PER),
DEF_MOD("scif1", 703, R8A779F0_CLK_S0D12_PER),
DEF_MOD("scif3", 704, R8A779F0_CLK_S0D12_PER),
DEF_MOD("scif4", 705, R8A779F0_CLK_S0D12_PER),
+ DEF_MOD("sdhi0", 706, R8A779F0_CLK_SD0),
DEF_MOD("sys-dmac0", 709, R8A779F0_CLK_S0D3_PER),
DEF_MOD("sys-dmac1", 710, R8A779F0_CLK_S0D3_PER),
DEF_MOD("wdt", 907, R8A779F0_CLK_R),
DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M),
+ DEF_MOD("tsc", 919, R8A779F0_CLK_CL16M),
DEF_MOD("ufs", 1514, R8A779F0_CLK_S0D4_HSC),
};
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 35ffc462af1a..1488c9d6e639 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -51,11 +51,9 @@ struct r9a06g032_clkdesc {
struct {
u16 div, mul;
};
- unsigned int factor;
- unsigned int frequency;
/* for dual gate */
struct {
- uint16_t group : 1, index: 3;
+ uint16_t group : 1;
u16 sel, g1, r1, g2, r2;
} dual;
};
@@ -85,10 +83,10 @@ struct r9a06g032_clkdesc {
.source = 1 + R9A06G032_##_src, .name = _n, \
.reg = _reg, .div_min = _min, .div_max = _max, \
.div_table = { __VA_ARGS__ } }
-#define D_UGATE(_idx, _n, _src, _g, _gi, _g1, _r1, _g2, _r2) \
+#define D_UGATE(_idx, _n, _src, _g, _g1, _r1, _g2, _r2) \
{ .type = K_DUALGATE, .index = R9A06G032_##_idx, \
.source = 1 + R9A06G032_##_src, .name = _n, \
- .dual = { .group = _g, .index = _gi, \
+ .dual = { .group = _g, \
.g1 = _g1, .r1 = _r1, .g2 = _g2, .r2 = _r2 }, }
enum { K_GATE = 0, K_FFC, K_DIV, K_BITSEL, K_DUALGATE };
@@ -290,8 +288,8 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
.name = "uart_group_012",
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_UART,
- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
- .dual.sel = ((0xec / 4) << 5) | 24,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
+ .dual.sel = ((0x34 / 4) << 5) | 30,
.dual.group = 0,
},
{
@@ -299,18 +297,18 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
.name = "uart_group_34567",
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_P2_PG,
- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
- .dual.sel = ((0x34 / 4) << 5) | 30,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
+ .dual.sel = ((0xec / 4) << 5) | 24,
.dual.group = 1,
},
- D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
- D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0, 1, 0x1b6, 0x1b7, 0x1b8, 0x1b9),
- D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0, 2, 0x1ba, 0x1bb, 0x1bc, 0x1bd),
- D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1, 0, 0x760, 0x761, 0x762, 0x763),
- D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1, 1, 0x764, 0x765, 0x766, 0x767),
- D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1, 2, 0x768, 0x769, 0x76a, 0x76b),
- D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1, 3, 0x76c, 0x76d, 0x76e, 0x76f),
- D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1, 4, 0x770, 0x771, 0x772, 0x773),
+ D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
+ D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0, 0x1b6, 0x1b7, 0x1b8, 0x1b9),
+ D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0, 0x1ba, 0x1bb, 0x1bc, 0x1bd),
+ D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1, 0x760, 0x761, 0x762, 0x763),
+ D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1, 0x764, 0x765, 0x766, 0x767),
+ D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1, 0x768, 0x769, 0x76a, 0x76b),
+ D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1, 0x76c, 0x76d, 0x76e, 0x76f),
+ D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1, 0x770, 0x771, 0x772, 0x773),
};
struct r9a06g032_priv {
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index 33c2bd8df2e5..37475465100d 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -36,9 +36,11 @@ enum clk_ids {
CLK_PLL3_DIV2_4_2,
CLK_SEL_PLL3_3,
CLK_DIV_PLL3_C,
+#ifdef CONFIG_ARM64
CLK_PLL5,
CLK_PLL5_500,
CLK_PLL5_250,
+#endif
CLK_PLL6,
CLK_PLL6_250,
CLK_P1_DIV2,
@@ -100,9 +102,11 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3),
DEF_MUX_RO(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3, sel_pll3_3),
DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3, DIVPL3C, dtable_1_32),
+#ifdef CONFIG_ARM64
DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1),
DEF_FIXED(".pll5_500", CLK_PLL5_500, CLK_PLL5, 1, 6),
DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_500, 1, 2),
+#endif
DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6),
DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
@@ -126,12 +130,20 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
};
static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
+#ifdef CONFIG_ARM64
DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1,
0x514, 0),
DEF_MOD("ia55_pclk", R9A07G043_IA55_PCLK, R9A07G043_CLK_P2,
0x518, 0),
DEF_MOD("ia55_clk", R9A07G043_IA55_CLK, R9A07G043_CLK_P1,
0x518, 1),
+#endif
+#ifdef CONFIG_RISCV
+ DEF_MOD("iax45_pclk", R9A07G043_IAX45_PCLK, R9A07G043_CLK_P2,
+ 0x518, 0),
+ DEF_MOD("iax45_clk", R9A07G043_IAX45_CLK, R9A07G043_CLK_P1,
+ 0x518, 1),
+#endif
DEF_MOD("dmac_aclk", R9A07G043_DMAC_ACLK, R9A07G043_CLK_P1,
0x52c, 0),
DEF_MOD("dmac_pclk", R9A07G043_DMAC_PCLK, CLK_P1_DIV2,
@@ -243,9 +255,14 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
};
static struct rzg2l_reset r9a07g043_resets[] = {
+#ifdef CONFIG_ARM64
DEF_RST(R9A07G043_GIC600_GICRESET_N, 0x814, 0),
DEF_RST(R9A07G043_GIC600_DBG_GICRESET_N, 0x814, 1),
DEF_RST(R9A07G043_IA55_RESETN, 0x818, 0),
+#endif
+#ifdef CONFIG_RISCV
+ DEF_RST(R9A07G043_IAX45_RESETN, 0x818, 0),
+#endif
DEF_RST(R9A07G043_DMAC_ARESETN, 0x82c, 0),
DEF_RST(R9A07G043_DMAC_RST_ASYNC, 0x82c, 1),
DEF_RST(R9A07G043_OSTM0_PRESETZ, 0x834, 0),
@@ -291,8 +308,13 @@ static struct rzg2l_reset r9a07g043_resets[] = {
};
static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
+#ifdef CONFIG_ARM64
MOD_CLK_BASE + R9A07G043_GIC600_GICCLK,
MOD_CLK_BASE + R9A07G043_IA55_CLK,
+#endif
+#ifdef CONFIG_RISCV
+ MOD_CLK_BASE + R9A07G043_IAX45_CLK,
+#endif
MOD_CLK_BASE + R9A07G043_DMAC_ACLK,
};
@@ -310,11 +332,21 @@ const struct rzg2l_cpg_info r9a07g043_cpg_info = {
/* Module Clocks */
.mod_clks = r9a07g043_mod_clks,
.num_mod_clks = ARRAY_SIZE(r9a07g043_mod_clks),
+#ifdef CONFIG_ARM64
.num_hw_mod_clks = R9A07G043_TSU_PCLK + 1,
+#endif
+#ifdef CONFIG_RISCV
+ .num_hw_mod_clks = R9A07G043_IAX45_PCLK + 1,
+#endif
/* Resets */
.resets = r9a07g043_resets,
+#ifdef CONFIG_ARM64
.num_resets = R9A07G043_TSU_PRESETN + 1, /* Last reset ID + 1 */
+#endif
+#ifdef CONFIG_RISCV
+ .num_resets = R9A07G043_IAX45_RESETN + 1, /* Last reset ID + 1 */
+#endif
.has_clk_mon_regs = true,
};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index b288897852c7..fd7c4eecd398 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -182,7 +182,7 @@ static const struct {
};
static const struct {
- struct rzg2l_mod_clk common[71];
+ struct rzg2l_mod_clk common[76];
#ifdef CONFIG_CLK_R9A07G054
struct rzg2l_mod_clk drp[0];
#endif
@@ -204,6 +204,16 @@ static const struct {
0x534, 1),
DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
0x534, 2),
+ DEF_MOD("gpt_pclk", R9A07G044_GPT_PCLK, R9A07G044_CLK_P0,
+ 0x540, 0),
+ DEF_MOD("poeg_a_clkp", R9A07G044_POEG_A_CLKP, R9A07G044_CLK_P0,
+ 0x544, 0),
+ DEF_MOD("poeg_b_clkp", R9A07G044_POEG_B_CLKP, R9A07G044_CLK_P0,
+ 0x544, 1),
+ DEF_MOD("poeg_c_clkp", R9A07G044_POEG_C_CLKP, R9A07G044_CLK_P0,
+ 0x544, 2),
+ DEF_MOD("poeg_d_clkp", R9A07G044_POEG_D_CLKP, R9A07G044_CLK_P0,
+ 0x544, 3),
DEF_MOD("wdt0_pclk", R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
0x548, 0),
DEF_MOD("wdt0_clk", R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
@@ -346,6 +356,11 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_OSTM0_PRESETZ, 0x834, 0),
DEF_RST(R9A07G044_OSTM1_PRESETZ, 0x834, 1),
DEF_RST(R9A07G044_OSTM2_PRESETZ, 0x834, 2),
+ DEF_RST(R9A07G044_GPT_RST_C, 0x840, 0),
+ DEF_RST(R9A07G044_POEG_A_RST, 0x844, 0),
+ DEF_RST(R9A07G044_POEG_B_RST, 0x844, 1),
+ DEF_RST(R9A07G044_POEG_C_RST, 0x844, 2),
+ DEF_RST(R9A07G044_POEG_D_RST, 0x844, 3),
DEF_RST(R9A07G044_WDT0_PRESETN, 0x848, 0),
DEF_RST(R9A07G044_WDT1_PRESETN, 0x848, 1),
DEF_RST(R9A07G044_WDT2_PRESETN, 0x848, 2),
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
index 40693bb85b80..b21915cf6648 100644
--- a/drivers/clk/renesas/r9a09g011-cpg.c
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -126,19 +126,24 @@ static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = {
};
static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
+ DEF_MOD("pfc", R9A09G011_PFC_PCLK, CLK_MAIN, 0x400, 2),
DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5),
DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8),
DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8),
DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9),
DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12),
+ DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12),
+ DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13),
DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4),
DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5),
DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0),
};
static const struct rzg2l_reset r9a09g011_resets[] = {
+ DEF_RST(R9A09G011_PFC_PRESETN, 0x600, 2),
DEF_RST_MON(R9A09G011_ETH0_RST_HW_N, 0x608, 11, 11),
DEF_RST_MON(R9A09G011_SYC_RST_N, 0x610, 9, 13),
+ DEF_RST_MON(R9A09G011_WDT0_PRESETN, 0x614, 12, 19),
};
static const unsigned int r9a09g011_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index c7ed43d6aa67..e27832e5114f 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -23,7 +23,7 @@
#include "rcar-gen4-cpg.h"
#include "rcar-cpg-lib.h"
-static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initconst;
+static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index e2999ab2b53c..3ff6ecd61756 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -1180,7 +1180,7 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
s8 monbit = info->resets[id].monbit;
if (info->has_clk_mon_regs) {
- return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
+ return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
} else if (monbit >= 0) {
u32 monbitmask = BIT(monbit);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index b7962e5149a5..001582ea71ba 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -143,17 +143,6 @@ static struct ccu_common *sun50i_h6_r_ccu_clks[] = {
&w1_clk.common,
};
-static struct ccu_common *sun50i_h616_r_ccu_clks[] = {
- &r_apb1_clk.common,
- &r_apb2_clk.common,
- &r_apb1_twd_clk.common,
- &r_apb2_i2c_clk.common,
- &r_apb2_rsb_clk.common,
- &r_apb1_ir_clk.common,
- &r_apb1_rtc_clk.common,
- &ir_clk.common,
-};
-
static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
.hws = {
[CLK_AR100] = &ar100_clk.common.hw,
@@ -219,8 +208,8 @@ static const struct sunxi_ccu_desc sun50i_h6_r_ccu_desc = {
};
static const struct sunxi_ccu_desc sun50i_h616_r_ccu_desc = {
- .ccu_clks = sun50i_h616_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_h616_r_ccu_clks),
+ .ccu_clks = sun50i_h6_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h6_r_ccu_clks),
.hw_clks = &sun50i_h616_r_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 1a5e418923f6..30056da3e0af 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -95,13 +95,13 @@ static struct ccu_nkmp pll_periph1_clk = {
},
};
+/* For GPU PLL, using an output divider for DFS causes system to fail */
#define SUN50I_H6_PLL_GPU_REG 0x030
static struct ccu_nkmp pll_gpu_clk = {
.enable = BIT(31),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
- .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
.common = {
.reg = 0x030,
.hw.init = CLK_HW_INIT("pll-gpu", "osc24M",
@@ -294,9 +294,9 @@ static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace",
static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "psi-ahb1-ahb2",
0x62c, BIT(0), 0);
+/* Keep GPU_CLK divider const to avoid DFS instability. */
static const char * const gpu_parents[] = { "pll-gpu" };
-static SUNXI_CCU_M_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
- 0, 3, /* M */
+static SUNXI_CCU_MUX_WITH_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
24, 1, /* mux */
BIT(31), /* gate */
CLK_SET_RATE_PARENT);
@@ -1191,6 +1191,16 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
+ /* Force PLL_GPU output divider bits to 0 */
+ val = readl(reg + SUN50I_H6_PLL_GPU_REG);
+ val &= ~BIT(0);
+ writel(val, reg + SUN50I_H6_PLL_GPU_REG);
+
+ /* Force GPU_CLK divider bits to 0 */
+ val = readl(reg + gpu_clk.common.reg);
+ val &= ~GENMASK(3, 0);
+ writel(val, reg + gpu_clk.common.reg);
+
/* Enable the lock bits on all PLLs */
for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
val = readl(reg + pll_regs[i]);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index e7e3ddf4a227..2f6f02f00be2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -53,65 +53,26 @@ static SUNXI_CCU_M(wb_div_a83_clk, "wb-div", "pll-de", 0x0c, 8, 4,
static SUNXI_CCU_M(rot_div_a83_clk, "rot-div", "pll-de", 0x0c, 0x0c, 4,
CLK_SET_RATE_PARENT);
-static struct ccu_common *sun8i_a83t_de2_clks[] = {
+static struct ccu_common *sun8i_de2_ccu_clks[] = {
&mixer0_clk.common,
&mixer1_clk.common,
&wb_clk.common,
-
- &bus_mixer0_clk.common,
- &bus_mixer1_clk.common,
- &bus_wb_clk.common,
-
- &mixer0_div_a83_clk.common,
- &mixer1_div_a83_clk.common,
- &wb_div_a83_clk.common,
-
- &bus_rot_clk.common,
&rot_clk.common,
- &rot_div_a83_clk.common,
-};
-
-static struct ccu_common *sun8i_h3_de2_clks[] = {
- &mixer0_clk.common,
- &mixer1_clk.common,
- &wb_clk.common,
-
- &bus_mixer0_clk.common,
- &bus_mixer1_clk.common,
- &bus_wb_clk.common,
-
- &mixer0_div_clk.common,
- &mixer1_div_clk.common,
- &wb_div_clk.common,
-};
-
-static struct ccu_common *sun8i_v3s_de2_clks[] = {
- &mixer0_clk.common,
- &wb_clk.common,
-
- &bus_mixer0_clk.common,
- &bus_wb_clk.common,
-
- &mixer0_div_clk.common,
- &wb_div_clk.common,
-};
-
-static struct ccu_common *sun50i_a64_de2_clks[] = {
- &mixer0_clk.common,
- &mixer1_clk.common,
- &wb_clk.common,
&bus_mixer0_clk.common,
&bus_mixer1_clk.common,
&bus_wb_clk.common,
+ &bus_rot_clk.common,
&mixer0_div_clk.common,
&mixer1_div_clk.common,
&wb_div_clk.common,
-
- &bus_rot_clk.common,
- &rot_clk.common,
&rot_div_clk.common,
+
+ &mixer0_div_a83_clk.common,
+ &mixer1_div_a83_clk.common,
+ &wb_div_a83_clk.common,
+ &rot_div_a83_clk.common,
};
static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
@@ -219,8 +180,8 @@ static struct ccu_reset_map sun50i_h5_de2_resets[] = {
};
static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = {
- .ccu_clks = sun8i_a83t_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_a83t_de2_hw_clks,
@@ -229,8 +190,8 @@ static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun8i_h3_de2_clk_desc = {
- .ccu_clks = sun8i_h3_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_h3_de2_hw_clks,
@@ -239,8 +200,8 @@ static const struct sunxi_ccu_desc sun8i_h3_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun8i_r40_de2_clk_desc = {
- .ccu_clks = sun50i_a64_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun50i_a64_de2_hw_clks,
@@ -249,8 +210,8 @@ static const struct sunxi_ccu_desc sun8i_r40_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
- .ccu_clks = sun8i_v3s_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_v3s_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_v3s_de2_hw_clks,
@@ -259,8 +220,8 @@ static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
- .ccu_clks = sun50i_a64_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun50i_a64_de2_hw_clks,
@@ -269,8 +230,8 @@ static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
- .ccu_clks = sun8i_h3_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_h3_de2_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index e058cf691aea..d3fcb983c17c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -562,6 +562,7 @@ static struct ccu_common *sun8i_h3_ccu_clks[] = {
&bus_uart2_clk.common,
&bus_uart3_clk.common,
&bus_scr0_clk.common,
+ &bus_scr1_clk.common,
&bus_ephy_clk.common,
&bus_dbg_clk.common,
&ths_clk.common,
@@ -612,114 +613,6 @@ static struct ccu_common *sun8i_h3_ccu_clks[] = {
&gpu_clk.common,
};
-static struct ccu_common *sun50i_h5_ccu_clks[] = {
- &pll_cpux_clk.common,
- &pll_audio_base_clk.common,
- &pll_video_clk.common,
- &pll_ve_clk.common,
- &pll_ddr_clk.common,
- &pll_periph0_clk.common,
- &pll_gpu_clk.common,
- &pll_periph1_clk.common,
- &pll_de_clk.common,
- &cpux_clk.common,
- &axi_clk.common,
- &ahb1_clk.common,
- &apb1_clk.common,
- &apb2_clk.common,
- &ahb2_clk.common,
- &bus_ce_clk.common,
- &bus_dma_clk.common,
- &bus_mmc0_clk.common,
- &bus_mmc1_clk.common,
- &bus_mmc2_clk.common,
- &bus_nand_clk.common,
- &bus_dram_clk.common,
- &bus_emac_clk.common,
- &bus_ts_clk.common,
- &bus_hstimer_clk.common,
- &bus_spi0_clk.common,
- &bus_spi1_clk.common,
- &bus_otg_clk.common,
- &bus_ehci0_clk.common,
- &bus_ehci1_clk.common,
- &bus_ehci2_clk.common,
- &bus_ehci3_clk.common,
- &bus_ohci0_clk.common,
- &bus_ohci1_clk.common,
- &bus_ohci2_clk.common,
- &bus_ohci3_clk.common,
- &bus_ve_clk.common,
- &bus_tcon0_clk.common,
- &bus_tcon1_clk.common,
- &bus_deinterlace_clk.common,
- &bus_csi_clk.common,
- &bus_tve_clk.common,
- &bus_hdmi_clk.common,
- &bus_de_clk.common,
- &bus_gpu_clk.common,
- &bus_msgbox_clk.common,
- &bus_spinlock_clk.common,
- &bus_codec_clk.common,
- &bus_spdif_clk.common,
- &bus_pio_clk.common,
- &bus_ths_clk.common,
- &bus_i2s0_clk.common,
- &bus_i2s1_clk.common,
- &bus_i2s2_clk.common,
- &bus_i2c0_clk.common,
- &bus_i2c1_clk.common,
- &bus_i2c2_clk.common,
- &bus_uart0_clk.common,
- &bus_uart1_clk.common,
- &bus_uart2_clk.common,
- &bus_uart3_clk.common,
- &bus_scr0_clk.common,
- &bus_scr1_clk.common,
- &bus_ephy_clk.common,
- &bus_dbg_clk.common,
- &ths_clk.common,
- &nand_clk.common,
- &mmc0_clk.common,
- &mmc1_clk.common,
- &mmc2_clk.common,
- &ts_clk.common,
- &ce_clk.common,
- &spi0_clk.common,
- &spi1_clk.common,
- &i2s0_clk.common,
- &i2s1_clk.common,
- &i2s2_clk.common,
- &spdif_clk.common,
- &usb_phy0_clk.common,
- &usb_phy1_clk.common,
- &usb_phy2_clk.common,
- &usb_phy3_clk.common,
- &usb_ohci0_clk.common,
- &usb_ohci1_clk.common,
- &usb_ohci2_clk.common,
- &usb_ohci3_clk.common,
- &dram_clk.common,
- &dram_ve_clk.common,
- &dram_csi_clk.common,
- &dram_deinterlace_clk.common,
- &dram_ts_clk.common,
- &de_clk.common,
- &tcon_clk.common,
- &tve_clk.common,
- &deinterlace_clk.common,
- &csi_misc_clk.common,
- &csi_sclk_clk.common,
- &csi_mclk_clk.common,
- &ve_clk.common,
- &ac_dig_clk.common,
- &avs_clk.common,
- &hdmi_clk.common,
- &hdmi_ddc_clk.common,
- &mbus_clk.common,
- &gpu_clk.common,
-};
-
static const struct clk_hw *clk_parent_pll_audio[] = {
&pll_audio_base_clk.common.hw
};
@@ -1116,8 +1009,8 @@ static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {
};
static const struct sunxi_ccu_desc sun50i_h5_ccu_desc = {
- .ccu_clks = sun50i_h5_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_h5_ccu_clks),
+ .ccu_clks = sun8i_h3_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_h3_ccu_clks),
.hw_clks = &sun50i_h5_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 5b7fab832a52..4221649b311f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -114,32 +114,7 @@ static struct ccu_mp a83t_ir_clk = {
},
};
-static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
- &ar100_clk.common,
- &apb0_clk.common,
- &apb0_pio_clk.common,
- &apb0_ir_clk.common,
- &apb0_timer_clk.common,
- &apb0_rsb_clk.common,
- &apb0_uart_clk.common,
- &apb0_i2c_clk.common,
- &apb0_twd_clk.common,
- &a83t_ir_clk.common,
-};
-
-static struct ccu_common *sun8i_h3_r_ccu_clks[] = {
- &ar100_clk.common,
- &apb0_clk.common,
- &apb0_pio_clk.common,
- &apb0_ir_clk.common,
- &apb0_timer_clk.common,
- &apb0_uart_clk.common,
- &apb0_i2c_clk.common,
- &apb0_twd_clk.common,
- &ir_clk.common,
-};
-
-static struct ccu_common *sun50i_a64_r_ccu_clks[] = {
+static struct ccu_common *sun8i_r_ccu_clks[] = {
&ar100_clk.common,
&apb0_clk.common,
&apb0_pio_clk.common,
@@ -150,6 +125,7 @@ static struct ccu_common *sun50i_a64_r_ccu_clks[] = {
&apb0_i2c_clk.common,
&apb0_twd_clk.common,
&ir_clk.common,
+ &a83t_ir_clk.common,
};
static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = {
@@ -226,8 +202,8 @@ static struct ccu_reset_map sun50i_a64_r_ccu_resets[] = {
};
static const struct sunxi_ccu_desc sun8i_a83t_r_ccu_desc = {
- .ccu_clks = sun8i_a83t_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_r_ccu_clks),
+ .ccu_clks = sun8i_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_r_ccu_clks),
.hw_clks = &sun8i_a83t_r_hw_clks,
@@ -236,8 +212,8 @@ static const struct sunxi_ccu_desc sun8i_a83t_r_ccu_desc = {
};
static const struct sunxi_ccu_desc sun8i_h3_r_ccu_desc = {
- .ccu_clks = sun8i_h3_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_r_ccu_clks),
+ .ccu_clks = sun8i_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_r_ccu_clks),
.hw_clks = &sun8i_h3_r_hw_clks,
@@ -246,8 +222,8 @@ static const struct sunxi_ccu_desc sun8i_h3_r_ccu_desc = {
};
static const struct sunxi_ccu_desc sun50i_a64_r_ccu_desc = {
- .ccu_clks = sun50i_a64_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_a64_r_ccu_clks),
+ .ccu_clks = sun8i_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_r_ccu_clks),
.hw_clks = &sun50i_a64_r_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 87f87d6ea3ad..fbb3529f0d3e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -421,6 +421,7 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = {
&bus_de_clk.common,
&bus_codec_clk.common,
&bus_pio_clk.common,
+ &bus_i2s0_clk.common,
&bus_i2c0_clk.common,
&bus_i2c1_clk.common,
&bus_uart0_clk.common,
@@ -439,6 +440,7 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = {
&mmc2_output_clk.common,
&ce_clk.common,
&spi0_clk.common,
+ &i2s0_clk.common,
&usb_phy0_clk.common,
&usb_ohci0_clk.common,
&dram_clk.common,
@@ -463,80 +465,6 @@ static const struct clk_hw *clk_parent_pll_audio[] = {
&pll_audio_base_clk.common.hw
};
-static struct ccu_common *sun8i_v3_ccu_clks[] = {
- &pll_cpu_clk.common,
- &pll_audio_base_clk.common,
- &pll_video_clk.common,
- &pll_ve_clk.common,
- &pll_ddr0_clk.common,
- &pll_periph0_clk.common,
- &pll_isp_clk.common,
- &pll_periph1_clk.common,
- &pll_ddr1_clk.common,
- &cpu_clk.common,
- &axi_clk.common,
- &ahb1_clk.common,
- &apb1_clk.common,
- &apb2_clk.common,
- &ahb2_clk.common,
- &bus_ce_clk.common,
- &bus_dma_clk.common,
- &bus_mmc0_clk.common,
- &bus_mmc1_clk.common,
- &bus_mmc2_clk.common,
- &bus_dram_clk.common,
- &bus_emac_clk.common,
- &bus_hstimer_clk.common,
- &bus_spi0_clk.common,
- &bus_otg_clk.common,
- &bus_ehci0_clk.common,
- &bus_ohci0_clk.common,
- &bus_ve_clk.common,
- &bus_tcon0_clk.common,
- &bus_csi_clk.common,
- &bus_de_clk.common,
- &bus_codec_clk.common,
- &bus_pio_clk.common,
- &bus_i2s0_clk.common,
- &bus_i2c0_clk.common,
- &bus_i2c1_clk.common,
- &bus_uart0_clk.common,
- &bus_uart1_clk.common,
- &bus_uart2_clk.common,
- &bus_ephy_clk.common,
- &bus_dbg_clk.common,
- &mmc0_clk.common,
- &mmc0_sample_clk.common,
- &mmc0_output_clk.common,
- &mmc1_clk.common,
- &mmc1_sample_clk.common,
- &mmc1_output_clk.common,
- &mmc2_clk.common,
- &mmc2_sample_clk.common,
- &mmc2_output_clk.common,
- &ce_clk.common,
- &spi0_clk.common,
- &i2s0_clk.common,
- &usb_phy0_clk.common,
- &usb_ohci0_clk.common,
- &dram_clk.common,
- &dram_ve_clk.common,
- &dram_csi_clk.common,
- &dram_ohci_clk.common,
- &dram_ehci_clk.common,
- &de_clk.common,
- &tcon_clk.common,
- &csi_misc_clk.common,
- &csi0_mclk_clk.common,
- &csi1_sclk_clk.common,
- &csi1_mclk_clk.common,
- &ve_clk.common,
- &ac_dig_clk.common,
- &avs_clk.common,
- &mbus_clk.common,
- &mipi_csi_clk.common,
-};
-
/* We hardcode the divider to 1 for SDM support */
static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio",
clk_parent_pll_audio,
@@ -798,8 +726,8 @@ static const struct sunxi_ccu_desc sun8i_v3s_ccu_desc = {
};
static const struct sunxi_ccu_desc sun8i_v3_ccu_desc = {
- .ccu_clks = sun8i_v3_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_v3_ccu_clks),
+ .ccu_clks = sun8i_v3s_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_v3s_ccu_clks),
.hw_clks = &sun8i_v3_hw_clks,
diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig
index 3fba3d3ac9a2..1c4e543366dd 100644
--- a/drivers/clk/sunxi/Kconfig
+++ b/drivers/clk/sunxi/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CLK_SUNXI
bool "Legacy clock support for Allwinner SoCs"
- depends on ARCH_SUNXI || COMPILE_TEST
+ depends on (ARM && ARCH_SUNXI) || COMPILE_TEST
default y
if CLK_SUNXI
@@ -19,7 +19,6 @@ config CLK_SUNXI_CLOCKS
config CLK_SUNXI_PRCM_SUN6I
bool "Legacy A31 PRCM driver"
- select MFD_SUN6I_PRCM
default y
help
Legacy clock driver for the A31 PRCM clocks. Those are
@@ -27,7 +26,6 @@ config CLK_SUNXI_PRCM_SUN6I
config CLK_SUNXI_PRCM_SUN8I
bool "Legacy sun8i PRCM driver"
- select MFD_SUN6I_PRCM
default y
help
Legacy clock driver for the sun8i family PRCM clocks.
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index d078e5d73ed9..868bc7af21b0 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -56,7 +56,7 @@ static const struct omap_clkctrl_bit_data omap4_aess_bit_data[] __initconst = {
};
static const char * const omap4_func_dmic_abe_gfclk_parents[] __initconst = {
- "abe_cm:clk:0018:26",
+ "abe-clkctrl:0018:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -76,7 +76,7 @@ static const struct omap_clkctrl_bit_data omap4_dmic_bit_data[] __initconst = {
};
static const char * const omap4_func_mcasp_abe_gfclk_parents[] __initconst = {
- "abe_cm:clk:0020:26",
+ "abe-clkctrl:0020:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -89,7 +89,7 @@ static const struct omap_clkctrl_bit_data omap4_mcasp_bit_data[] __initconst = {
};
static const char * const omap4_func_mcbsp1_gfclk_parents[] __initconst = {
- "abe_cm:clk:0028:26",
+ "abe-clkctrl:0028:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -102,7 +102,7 @@ static const struct omap_clkctrl_bit_data omap4_mcbsp1_bit_data[] __initconst =
};
static const char * const omap4_func_mcbsp2_gfclk_parents[] __initconst = {
- "abe_cm:clk:0030:26",
+ "abe-clkctrl:0030:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -115,7 +115,7 @@ static const struct omap_clkctrl_bit_data omap4_mcbsp2_bit_data[] __initconst =
};
static const char * const omap4_func_mcbsp3_gfclk_parents[] __initconst = {
- "abe_cm:clk:0038:26",
+ "abe-clkctrl:0038:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -183,18 +183,18 @@ static const struct omap_clkctrl_bit_data omap4_timer8_bit_data[] __initconst =
static const struct omap_clkctrl_reg_data omap4_abe_clkctrl_regs[] __initconst = {
{ OMAP4_L4_ABE_CLKCTRL, NULL, 0, "ocp_abe_iclk" },
- { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
+ { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" },
{ OMAP4_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
- { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
- { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe_cm:clk:0020:24" },
- { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
- { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe_cm:clk:0030:24" },
- { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe_cm:clk:0038:24" },
- { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0040:8" },
- { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe_cm:clk:0048:24" },
- { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe_cm:clk:0050:24" },
- { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe_cm:clk:0058:24" },
- { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe_cm:clk:0060:24" },
+ { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" },
+ { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe-clkctrl:0020:24" },
+ { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" },
+ { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" },
+ { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" },
+ { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0040:8" },
+ { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" },
+ { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" },
+ { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" },
+ { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" },
{ OMAP4_WD_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ 0 },
};
@@ -287,7 +287,7 @@ static const struct omap_clkctrl_bit_data omap4_fdif_bit_data[] __initconst = {
static const struct omap_clkctrl_reg_data omap4_iss_clkctrl_regs[] __initconst = {
{ OMAP4_ISS_CLKCTRL, omap4_iss_bit_data, CLKF_SW_SUP, "ducati_clk_mux_ck" },
- { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss_cm:clk:0008:24" },
+ { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss-clkctrl:0008:24" },
{ 0 },
};
@@ -320,7 +320,7 @@ static const struct omap_clkctrl_bit_data omap4_dss_core_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap4_l3_dss_clkctrl_regs[] __initconst = {
- { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3_dss_cm:clk:0000:8" },
+ { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3-dss-clkctrl:0000:8" },
{ 0 },
};
@@ -336,7 +336,7 @@ static const struct omap_clkctrl_bit_data omap4_gpu_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data omap4_l3_gfx_clkctrl_regs[] __initconst = {
- { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3_gfx_cm:clk:0000:24" },
+ { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3-gfx-clkctrl:0000:24" },
{ 0 },
};
@@ -372,12 +372,12 @@ static const struct omap_clkctrl_bit_data omap4_hsi_bit_data[] __initconst = {
};
static const char * const omap4_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
- "l3_init_cm:clk:0038:24",
+ "l3-init-clkctrl:0038:24",
NULL,
};
static const char * const omap4_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
- "l3_init_cm:clk:0038:25",
+ "l3-init-clkctrl:0038:25",
NULL,
};
@@ -418,7 +418,7 @@ static const struct omap_clkctrl_bit_data omap4_usb_host_hs_bit_data[] __initcon
};
static const char * const omap4_usb_otg_hs_xclk_parents[] __initconst = {
- "l3_init_cm:clk:0040:24",
+ "l3-init-clkctrl:0040:24",
NULL,
};
@@ -452,14 +452,14 @@ static const struct omap_clkctrl_bit_data omap4_ocp2scp_usb_phy_bit_data[] __ini
};
static const struct omap_clkctrl_reg_data omap4_l3_init_clkctrl_regs[] __initconst = {
- { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3_init_cm:clk:0008:24" },
- { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3_init_cm:clk:0010:24" },
- { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3_init_cm:clk:0018:24" },
+ { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0008:24" },
+ { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0010:24" },
+ { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:0018:24" },
{ OMAP4_USB_HOST_HS_CLKCTRL, omap4_usb_host_hs_bit_data, CLKF_SW_SUP, "init_60m_fclk" },
{ OMAP4_USB_OTG_HS_CLKCTRL, omap4_usb_otg_hs_bit_data, CLKF_HW_SUP, "l3_div_ck" },
{ OMAP4_USB_TLL_HS_CLKCTRL, omap4_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_div_ck" },
{ OMAP4_USB_HOST_FS_CLKCTRL, NULL, CLKF_SW_SUP, "func_48mc_fclk" },
- { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3_init_cm:clk:00c0:8" },
+ { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:00c0:8" },
{ 0 },
};
@@ -530,7 +530,7 @@ static const struct omap_clkctrl_bit_data omap4_gpio6_bit_data[] __initconst = {
};
static const char * const omap4_per_mcbsp4_gfclk_parents[] __initconst = {
- "l4_per_cm:clk:00c0:26",
+ "l4-per-clkctrl:00c0:26",
"pad_clks_ck",
NULL,
};
@@ -570,12 +570,12 @@ static const struct omap_clkctrl_bit_data omap4_slimbus2_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initconst = {
- { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0008:24" },
- { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0010:24" },
- { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0018:24" },
- { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0020:24" },
- { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0028:24" },
- { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0030:24" },
+ { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0008:24" },
+ { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0010:24" },
+ { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0018:24" },
+ { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0020:24" },
+ { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0028:24" },
+ { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0030:24" },
{ OMAP4_ELM_CLKCTRL, NULL, 0, "l4_div_ck" },
{ OMAP4_GPIO2_CLKCTRL, omap4_gpio2_bit_data, CLKF_HW_SUP, "l4_div_ck" },
{ OMAP4_GPIO3_CLKCTRL, omap4_gpio3_bit_data, CLKF_HW_SUP, "l4_div_ck" },
@@ -588,14 +588,14 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons
{ OMAP4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
{ OMAP4_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
{ OMAP4_L4_PER_CLKCTRL, NULL, 0, "l4_div_ck" },
- { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:00c0:24" },
+ { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:00c0:24" },
{ OMAP4_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MMC4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
- { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0118:8" },
+ { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0118:8" },
{ OMAP4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
@@ -630,7 +630,7 @@ static const struct omap_clkctrl_reg_data omap4_l4_wkup_clkctrl_regs[] __initcon
{ OMAP4_L4_WKUP_CLKCTRL, NULL, 0, "l4_wkup_clk_mux_ck" },
{ OMAP4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ OMAP4_GPIO1_CLKCTRL, omap4_gpio1_bit_data, CLKF_HW_SUP, "l4_wkup_clk_mux_ck" },
- { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0020:24" },
+ { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4-wkup-clkctrl:0020:24" },
{ OMAP4_COUNTER_32K_CLKCTRL, NULL, 0, "sys_32k_ck" },
{ OMAP4_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ 0 },
@@ -644,7 +644,7 @@ static const char * const omap4_pmd_stm_clock_mux_ck_parents[] __initconst = {
};
static const char * const omap4_trace_clk_div_div_ck_parents[] __initconst = {
- "emu_sys_cm:clk:0000:22",
+ "emu-sys-clkctrl:0000:22",
NULL,
};
@@ -662,7 +662,7 @@ static const struct omap_clkctrl_div_data omap4_trace_clk_div_div_ck_data __init
};
static const char * const omap4_stm_clk_div_ck_parents[] __initconst = {
- "emu_sys_cm:clk:0000:20",
+ "emu-sys-clkctrl:0000:20",
NULL,
};
@@ -716,73 +716,73 @@ static struct ti_dt_clk omap44xx_clks[] = {
* hwmod support. Once hwmod is removed, these can be removed
* also.
*/
- DT_CLK(NULL, "aess_fclk", "abe_cm:0008:24"),
- DT_CLK(NULL, "cm2_dm10_mux", "l4_per_cm:0008:24"),
- DT_CLK(NULL, "cm2_dm11_mux", "l4_per_cm:0010:24"),
- DT_CLK(NULL, "cm2_dm2_mux", "l4_per_cm:0018:24"),
- DT_CLK(NULL, "cm2_dm3_mux", "l4_per_cm:0020:24"),
- DT_CLK(NULL, "cm2_dm4_mux", "l4_per_cm:0028:24"),
- DT_CLK(NULL, "cm2_dm9_mux", "l4_per_cm:0030:24"),
- DT_CLK(NULL, "dmic_sync_mux_ck", "abe_cm:0018:26"),
- DT_CLK(NULL, "dmt1_clk_mux", "l4_wkup_cm:0020:24"),
- DT_CLK(NULL, "dss_48mhz_clk", "l3_dss_cm:0000:9"),
- DT_CLK(NULL, "dss_dss_clk", "l3_dss_cm:0000:8"),
- DT_CLK(NULL, "dss_sys_clk", "l3_dss_cm:0000:10"),
- DT_CLK(NULL, "dss_tv_clk", "l3_dss_cm:0000:11"),
- DT_CLK(NULL, "fdif_fck", "iss_cm:0008:24"),
- DT_CLK(NULL, "func_dmic_abe_gfclk", "abe_cm:0018:24"),
- DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe_cm:0020:24"),
- DT_CLK(NULL, "func_mcbsp1_gfclk", "abe_cm:0028:24"),
- DT_CLK(NULL, "func_mcbsp2_gfclk", "abe_cm:0030:24"),
- DT_CLK(NULL, "func_mcbsp3_gfclk", "abe_cm:0038:24"),
- DT_CLK(NULL, "gpio1_dbclk", "l4_wkup_cm:0018:8"),
- DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:0040:8"),
- DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:0048:8"),
- DT_CLK(NULL, "gpio4_dbclk", "l4_per_cm:0050:8"),
- DT_CLK(NULL, "gpio5_dbclk", "l4_per_cm:0058:8"),
- DT_CLK(NULL, "gpio6_dbclk", "l4_per_cm:0060:8"),
- DT_CLK(NULL, "hsi_fck", "l3_init_cm:0018:24"),
- DT_CLK(NULL, "hsmmc1_fclk", "l3_init_cm:0008:24"),
- DT_CLK(NULL, "hsmmc2_fclk", "l3_init_cm:0010:24"),
- DT_CLK(NULL, "iss_ctrlclk", "iss_cm:0000:8"),
- DT_CLK(NULL, "mcasp_sync_mux_ck", "abe_cm:0020:26"),
- DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe_cm:0028:26"),
- DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe_cm:0030:26"),
- DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe_cm:0038:26"),
- DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4_per_cm:00c0:26"),
- DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3_init_cm:00c0:8"),
- DT_CLK(NULL, "otg_60m_gfclk", "l3_init_cm:0040:24"),
- DT_CLK(NULL, "per_mcbsp4_gfclk", "l4_per_cm:00c0:24"),
- DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu_sys_cm:0000:20"),
- DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu_sys_cm:0000:22"),
- DT_CLK(NULL, "sgx_clk_mux", "l3_gfx_cm:0000:24"),
- DT_CLK(NULL, "slimbus1_fclk_0", "abe_cm:0040:8"),
- DT_CLK(NULL, "slimbus1_fclk_1", "abe_cm:0040:9"),
- DT_CLK(NULL, "slimbus1_fclk_2", "abe_cm:0040:10"),
- DT_CLK(NULL, "slimbus1_slimbus_clk", "abe_cm:0040:11"),
- DT_CLK(NULL, "slimbus2_fclk_0", "l4_per_cm:0118:8"),
- DT_CLK(NULL, "slimbus2_fclk_1", "l4_per_cm:0118:9"),
- DT_CLK(NULL, "slimbus2_slimbus_clk", "l4_per_cm:0118:10"),
- DT_CLK(NULL, "stm_clk_div_ck", "emu_sys_cm:0000:27"),
- DT_CLK(NULL, "timer5_sync_mux", "abe_cm:0048:24"),
- DT_CLK(NULL, "timer6_sync_mux", "abe_cm:0050:24"),
- DT_CLK(NULL, "timer7_sync_mux", "abe_cm:0058:24"),
- DT_CLK(NULL, "timer8_sync_mux", "abe_cm:0060:24"),
- DT_CLK(NULL, "trace_clk_div_div_ck", "emu_sys_cm:0000:24"),
- DT_CLK(NULL, "usb_host_hs_func48mclk", "l3_init_cm:0038:15"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3_init_cm:0038:13"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3_init_cm:0038:14"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3_init_cm:0038:11"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3_init_cm:0038:12"),
- DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3_init_cm:0038:8"),
- DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3_init_cm:0038:9"),
- DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init_cm:0038:10"),
- DT_CLK(NULL, "usb_otg_hs_xclk", "l3_init_cm:0040:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3_init_cm:0048:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3_init_cm:0048:9"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3_init_cm:0048:10"),
- DT_CLK(NULL, "utmi_p1_gfclk", "l3_init_cm:0038:24"),
- DT_CLK(NULL, "utmi_p2_gfclk", "l3_init_cm:0038:25"),
+ DT_CLK(NULL, "aess_fclk", "abe-clkctrl:0008:24"),
+ DT_CLK(NULL, "cm2_dm10_mux", "l4-per-clkctrl:0008:24"),
+ DT_CLK(NULL, "cm2_dm11_mux", "l4-per-clkctrl:0010:24"),
+ DT_CLK(NULL, "cm2_dm2_mux", "l4-per-clkctrl:0018:24"),
+ DT_CLK(NULL, "cm2_dm3_mux", "l4-per-clkctrl:0020:24"),
+ DT_CLK(NULL, "cm2_dm4_mux", "l4-per-clkctrl:0028:24"),
+ DT_CLK(NULL, "cm2_dm9_mux", "l4-per-clkctrl:0030:24"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"),
+ DT_CLK(NULL, "dmt1_clk_mux", "l4-wkup-clkctrl:0020:24"),
+ DT_CLK(NULL, "dss_48mhz_clk", "l3-dss-clkctrl:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "l3-dss-clkctrl:0000:8"),
+ DT_CLK(NULL, "dss_sys_clk", "l3-dss-clkctrl:0000:10"),
+ DT_CLK(NULL, "dss_tv_clk", "l3-dss-clkctrl:0000:11"),
+ DT_CLK(NULL, "fdif_fck", "iss-clkctrl:0008:24"),
+ DT_CLK(NULL, "func_dmic_abe_gfclk", "abe-clkctrl:0018:24"),
+ DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe-clkctrl:0020:24"),
+ DT_CLK(NULL, "func_mcbsp1_gfclk", "abe-clkctrl:0028:24"),
+ DT_CLK(NULL, "func_mcbsp2_gfclk", "abe-clkctrl:0030:24"),
+ DT_CLK(NULL, "func_mcbsp3_gfclk", "abe-clkctrl:0038:24"),
+ DT_CLK(NULL, "gpio1_dbclk", "l4-wkup-clkctrl:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4-per-clkctrl:0040:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4-per-clkctrl:0048:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4-per-clkctrl:0050:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4-per-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4-per-clkctrl:0060:8"),
+ DT_CLK(NULL, "hsi_fck", "l3-init-clkctrl:0018:24"),
+ DT_CLK(NULL, "hsmmc1_fclk", "l3-init-clkctrl:0008:24"),
+ DT_CLK(NULL, "hsmmc2_fclk", "l3-init-clkctrl:0010:24"),
+ DT_CLK(NULL, "iss_ctrlclk", "iss-clkctrl:0000:8"),
+ DT_CLK(NULL, "mcasp_sync_mux_ck", "abe-clkctrl:0020:26"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"),
+ DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"),
+ DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"),
+ DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"),
+ DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"),
+ DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"),
+ DT_CLK(NULL, "sgx_clk_mux", "l3-gfx-clkctrl:0000:24"),
+ DT_CLK(NULL, "slimbus1_fclk_0", "abe-clkctrl:0040:8"),
+ DT_CLK(NULL, "slimbus1_fclk_1", "abe-clkctrl:0040:9"),
+ DT_CLK(NULL, "slimbus1_fclk_2", "abe-clkctrl:0040:10"),
+ DT_CLK(NULL, "slimbus1_slimbus_clk", "abe-clkctrl:0040:11"),
+ DT_CLK(NULL, "slimbus2_fclk_0", "l4-per-clkctrl:0118:8"),
+ DT_CLK(NULL, "slimbus2_fclk_1", "l4-per-clkctrl:0118:9"),
+ DT_CLK(NULL, "slimbus2_slimbus_clk", "l4-per-clkctrl:0118:10"),
+ DT_CLK(NULL, "stm_clk_div_ck", "emu-sys-clkctrl:0000:27"),
+ DT_CLK(NULL, "timer5_sync_mux", "abe-clkctrl:0048:24"),
+ DT_CLK(NULL, "timer6_sync_mux", "abe-clkctrl:0050:24"),
+ DT_CLK(NULL, "timer7_sync_mux", "abe-clkctrl:0058:24"),
+ DT_CLK(NULL, "timer8_sync_mux", "abe-clkctrl:0060:24"),
+ DT_CLK(NULL, "trace_clk_div_div_ck", "emu-sys-clkctrl:0000:24"),
+ DT_CLK(NULL, "usb_host_hs_func48mclk", "l3-init-clkctrl:0038:15"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3-init-clkctrl:0038:13"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3-init-clkctrl:0038:14"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3-init-clkctrl:0038:11"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3-init-clkctrl:0038:12"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3-init-clkctrl:0038:8"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3-init-clkctrl:0038:9"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init-clkctrl:0038:10"),
+ DT_CLK(NULL, "usb_otg_hs_xclk", "l3-init-clkctrl:0040:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3-init-clkctrl:0048:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3-init-clkctrl:0048:9"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3-init-clkctrl:0048:10"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "l3-init-clkctrl:0038:24"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "l3-init-clkctrl:0038:25"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 90e0a9ea6351..b4aff76eb373 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -50,7 +50,7 @@ static const struct omap_clkctrl_bit_data omap5_aess_bit_data[] __initconst = {
};
static const char * const omap5_dmic_gfclk_parents[] __initconst = {
- "abe_cm:clk:0018:26",
+ "abe-clkctrl:0018:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -70,7 +70,7 @@ static const struct omap_clkctrl_bit_data omap5_dmic_bit_data[] __initconst = {
};
static const char * const omap5_mcbsp1_gfclk_parents[] __initconst = {
- "abe_cm:clk:0028:26",
+ "abe-clkctrl:0028:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -83,7 +83,7 @@ static const struct omap_clkctrl_bit_data omap5_mcbsp1_bit_data[] __initconst =
};
static const char * const omap5_mcbsp2_gfclk_parents[] __initconst = {
- "abe_cm:clk:0030:26",
+ "abe-clkctrl:0030:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -96,7 +96,7 @@ static const struct omap_clkctrl_bit_data omap5_mcbsp2_bit_data[] __initconst =
};
static const char * const omap5_mcbsp3_gfclk_parents[] __initconst = {
- "abe_cm:clk:0038:26",
+ "abe-clkctrl:0038:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -136,16 +136,16 @@ static const struct omap_clkctrl_bit_data omap5_timer8_bit_data[] __initconst =
static const struct omap_clkctrl_reg_data omap5_abe_clkctrl_regs[] __initconst = {
{ OMAP5_L4_ABE_CLKCTRL, NULL, 0, "abe_iclk" },
- { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
+ { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" },
{ OMAP5_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
- { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
- { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
- { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe_cm:clk:0030:24" },
- { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe_cm:clk:0038:24" },
- { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe_cm:clk:0048:24" },
- { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe_cm:clk:0050:24" },
- { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe_cm:clk:0058:24" },
- { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe_cm:clk:0060:24" },
+ { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" },
+ { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" },
+ { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" },
+ { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" },
+ { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" },
+ { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" },
+ { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" },
+ { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" },
{ 0 },
};
@@ -268,12 +268,12 @@ static const struct omap_clkctrl_bit_data omap5_gpio8_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst = {
- { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0008:24" },
- { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0010:24" },
- { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0018:24" },
- { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0020:24" },
- { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0028:24" },
- { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0030:24" },
+ { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0008:24" },
+ { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0010:24" },
+ { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0018:24" },
+ { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0020:24" },
+ { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0028:24" },
+ { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0030:24" },
{ OMAP5_GPIO2_CLKCTRL, omap5_gpio2_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
{ OMAP5_GPIO3_CLKCTRL, omap5_gpio3_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
{ OMAP5_GPIO4_CLKCTRL, omap5_gpio4_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
@@ -345,7 +345,7 @@ static const struct omap_clkctrl_bit_data omap5_dss_core_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap5_dss_clkctrl_regs[] __initconst = {
- { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss_cm:clk:0000:8" },
+ { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss-clkctrl:0000:8" },
{ 0 },
};
@@ -378,7 +378,7 @@ static const struct omap_clkctrl_bit_data omap5_gpu_core_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap5_gpu_clkctrl_regs[] __initconst = {
- { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24" },
+ { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24" },
{ 0 },
};
@@ -389,7 +389,7 @@ static const char * const omap5_mmc1_fclk_mux_parents[] __initconst = {
};
static const char * const omap5_mmc1_fclk_parents[] __initconst = {
- "l3init_cm:clk:0008:24",
+ "l3init-clkctrl:0008:24",
NULL,
};
@@ -405,7 +405,7 @@ static const struct omap_clkctrl_bit_data omap5_mmc1_bit_data[] __initconst = {
};
static const char * const omap5_mmc2_fclk_parents[] __initconst = {
- "l3init_cm:clk:0010:24",
+ "l3init-clkctrl:0010:24",
NULL,
};
@@ -430,12 +430,12 @@ static const char * const omap5_usb_host_hs_hsic480m_p3_clk_parents[] __initcons
};
static const char * const omap5_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
- "l3init_cm:clk:0038:24",
+ "l3init-clkctrl:0038:24",
NULL,
};
static const char * const omap5_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
- "l3init_cm:clk:0038:25",
+ "l3init-clkctrl:0038:25",
NULL,
};
@@ -494,8 +494,8 @@ static const struct omap_clkctrl_bit_data omap5_usb_otg_ss_bit_data[] __initcons
};
static const struct omap_clkctrl_reg_data omap5_l3init_clkctrl_regs[] __initconst = {
- { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0008:25" },
- { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" },
+ { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0008:25" },
+ { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" },
{ OMAP5_USB_HOST_HS_CLKCTRL, omap5_usb_host_hs_bit_data, CLKF_SW_SUP, "l3init_60m_fclk" },
{ OMAP5_USB_TLL_HS_CLKCTRL, omap5_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
{ OMAP5_SATA_CLKCTRL, omap5_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
@@ -519,7 +519,7 @@ static const struct omap_clkctrl_reg_data omap5_wkupaon_clkctrl_regs[] __initcon
{ OMAP5_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ OMAP5_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ OMAP5_GPIO1_CLKCTRL, omap5_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
- { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" },
+ { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" },
{ OMAP5_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ OMAP5_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ 0 },
@@ -549,58 +549,58 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = {
static struct ti_dt_clk omap54xx_clks[] = {
DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
DT_CLK(NULL, "sys_clkin_ck", "sys_clkin"),
- DT_CLK(NULL, "dmic_gfclk", "abe_cm:0018:24"),
- DT_CLK(NULL, "dmic_sync_mux_ck", "abe_cm:0018:26"),
- DT_CLK(NULL, "dss_32khz_clk", "dss_cm:0000:11"),
- DT_CLK(NULL, "dss_48mhz_clk", "dss_cm:0000:9"),
- DT_CLK(NULL, "dss_dss_clk", "dss_cm:0000:8"),
- DT_CLK(NULL, "dss_sys_clk", "dss_cm:0000:10"),
- DT_CLK(NULL, "gpio1_dbclk", "wkupaon_cm:0018:8"),
- DT_CLK(NULL, "gpio2_dbclk", "l4per_cm:0040:8"),
- DT_CLK(NULL, "gpio3_dbclk", "l4per_cm:0048:8"),
- DT_CLK(NULL, "gpio4_dbclk", "l4per_cm:0050:8"),
- DT_CLK(NULL, "gpio5_dbclk", "l4per_cm:0058:8"),
- DT_CLK(NULL, "gpio6_dbclk", "l4per_cm:0060:8"),
- DT_CLK(NULL, "gpio7_dbclk", "l4per_cm:00f0:8"),
- DT_CLK(NULL, "gpio8_dbclk", "l4per_cm:00f8:8"),
- DT_CLK(NULL, "mcbsp1_gfclk", "abe_cm:0028:24"),
- DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe_cm:0028:26"),
- DT_CLK(NULL, "mcbsp2_gfclk", "abe_cm:0030:24"),
- DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe_cm:0030:26"),
- DT_CLK(NULL, "mcbsp3_gfclk", "abe_cm:0038:24"),
- DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe_cm:0038:26"),
- DT_CLK(NULL, "mmc1_32khz_clk", "l3init_cm:0008:8"),
- DT_CLK(NULL, "mmc1_fclk", "l3init_cm:0008:25"),
- DT_CLK(NULL, "mmc1_fclk_mux", "l3init_cm:0008:24"),
- DT_CLK(NULL, "mmc2_fclk", "l3init_cm:0010:25"),
- DT_CLK(NULL, "mmc2_fclk_mux", "l3init_cm:0010:24"),
- DT_CLK(NULL, "sata_ref_clk", "l3init_cm:0068:8"),
- DT_CLK(NULL, "timer10_gfclk_mux", "l4per_cm:0008:24"),
- DT_CLK(NULL, "timer11_gfclk_mux", "l4per_cm:0010:24"),
- DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon_cm:0020:24"),
- DT_CLK(NULL, "timer2_gfclk_mux", "l4per_cm:0018:24"),
- DT_CLK(NULL, "timer3_gfclk_mux", "l4per_cm:0020:24"),
- DT_CLK(NULL, "timer4_gfclk_mux", "l4per_cm:0028:24"),
- DT_CLK(NULL, "timer5_gfclk_mux", "abe_cm:0048:24"),
- DT_CLK(NULL, "timer6_gfclk_mux", "abe_cm:0050:24"),
- DT_CLK(NULL, "timer7_gfclk_mux", "abe_cm:0058:24"),
- DT_CLK(NULL, "timer8_gfclk_mux", "abe_cm:0060:24"),
- DT_CLK(NULL, "timer9_gfclk_mux", "l4per_cm:0030:24"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init_cm:0038:13"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init_cm:0038:14"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init_cm:0038:7"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init_cm:0038:11"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init_cm:0038:12"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init_cm:0038:6"),
- DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init_cm:0038:8"),
- DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init_cm:0038:9"),
- DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init_cm:0038:10"),
- DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init_cm:00d0:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init_cm:0048:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init_cm:0048:9"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init_cm:0048:10"),
- DT_CLK(NULL, "utmi_p1_gfclk", "l3init_cm:0038:24"),
- DT_CLK(NULL, "utmi_p2_gfclk", "l3init_cm:0038:25"),
+ DT_CLK(NULL, "dmic_gfclk", "abe-clkctrl:0018:24"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss-clkctrl:0000:11"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss-clkctrl:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "dss-clkctrl:0000:8"),
+ DT_CLK(NULL, "dss_sys_clk", "dss-clkctrl:0000:10"),
+ DT_CLK(NULL, "gpio1_dbclk", "wkupaon-clkctrl:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4per-clkctrl:0040:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4per-clkctrl:0048:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4per-clkctrl:0050:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4per-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4per-clkctrl:0060:8"),
+ DT_CLK(NULL, "gpio7_dbclk", "l4per-clkctrl:00f0:8"),
+ DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"),
+ DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"),
+ DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
+ DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
+ DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "l4per-clkctrl:0018:24"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "l4per-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "l4per-clkctrl:0028:24"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "abe-clkctrl:0048:24"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "abe-clkctrl:0050:24"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "abe-clkctrl:0058:24"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "abe-clkctrl:0060:24"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "l4per-clkctrl:0030:24"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init-clkctrl:0038:13"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init-clkctrl:0038:14"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init-clkctrl:0038:7"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init-clkctrl:0038:11"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init-clkctrl:0038:12"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init-clkctrl:0038:6"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init-clkctrl:0038:8"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init-clkctrl:0038:9"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init-clkctrl:0038:10"),
+ DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init-clkctrl:00d0:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init-clkctrl:0048:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init-clkctrl:0048:9"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init-clkctrl:0048:10"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "l3init-clkctrl:0038:24"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "l3init-clkctrl:0038:25"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 3b5ebae3740f..ae5862879417 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -520,10 +520,6 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
char *c;
u16 soc_mask = 0;
- if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
- of_node_name_eq(node, "clk"))
- ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
-
addrp = of_get_address(node, 0, NULL, NULL);
addr = (u32)of_translate_address(node, addrp);
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index 1244c4e568ff..c2088b3c4081 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_PMC_ATOM) += clk-pmc-atom.o
obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE) += clk-fch.o
-clk-x86-lpss-y := clk-lpss-atom.o
-obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
+obj-$(CONFIG_X86_INTEL_LPSS) += clk-lpss-atom.o clk-pmc-atom.o
obj-$(CONFIG_CLK_LGM_CGU) += clk-cgu.o clk-cgu-pll.o clk-lgm.o
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 593d5a957b69..969a552da8d2 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -7,6 +7,9 @@
* either be read from the "time" and "timeh" CSRs, and can use the SBI to
* setup events, or directly accessed using MMIO registers.
*/
+
+#define pr_fmt(fmt) "riscv-timer: " fmt
+
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
@@ -20,14 +23,28 @@
#include <linux/of_irq.h>
#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
+#include <asm/hwcap.h>
#include <asm/sbi.h>
#include <asm/timex.h>
+static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
+
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
+ u64 next_tval = get_cycles64() + delta;
+
csr_set(CSR_IE, IE_TIE);
- sbi_set_timer(get_cycles64() + delta);
+ if (static_branch_likely(&riscv_sstc_available)) {
+#if defined(CONFIG_32BIT)
+ csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
+ csr_write(CSR_STIMECMPH, next_tval >> 32);
+#else
+ csr_write(CSR_STIMECMP, next_tval);
+#endif
+ } else
+ sbi_set_timer(next_tval);
+
return 0;
}
@@ -101,20 +118,21 @@ static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
static int __init riscv_timer_init_dt(struct device_node *n)
{
- int cpuid, hartid, error;
+ int cpuid, error;
+ unsigned long hartid;
struct device_node *child;
struct irq_domain *domain;
- hartid = riscv_of_processor_hartid(n);
- if (hartid < 0) {
- pr_warn("Not valid hartid for node [%pOF] error = [%d]\n",
+ error = riscv_of_processor_hartid(n, &hartid);
+ if (error < 0) {
+ pr_warn("Not valid hartid for node [%pOF] error = [%lu]\n",
n, hartid);
- return hartid;
+ return error;
}
cpuid = riscv_hartid_to_cpuid(hartid);
if (cpuid < 0) {
- pr_warn("Invalid cpuid for hartid [%d]\n", hartid);
+ pr_warn("Invalid cpuid for hartid [%lu]\n", hartid);
return cpuid;
}
@@ -140,7 +158,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
return -ENODEV;
}
- pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
+ pr_info("%s: Registering clocksource cpuid [%d] hartid [%lu]\n",
__func__, cpuid, hartid);
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
if (error) {
@@ -165,6 +183,12 @@ static int __init riscv_timer_init_dt(struct device_node *n)
if (error)
pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
error);
+
+ if (riscv_isa_extension_available(NULL, SSTC)) {
+ pr_info("Timer interrupt in S-mode is available via sstc extension\n");
+ static_branch_enable(&riscv_sstc_available);
+ }
+
return error;
}
diff --git a/drivers/comedi/drivers/comedi_isadma.c b/drivers/comedi/drivers/comedi_isadma.c
index 700982464c53..020b3d1e1ac0 100644
--- a/drivers/comedi/drivers/comedi_isadma.c
+++ b/drivers/comedi/drivers/comedi_isadma.c
@@ -8,7 +8,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <asm/dma.h>
+#include <linux/isa-dma.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_isadma.h>
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 8fcaba541539..d69d13a26414 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -29,9 +29,9 @@ struct private_data {
cpumask_var_t cpus;
struct device *cpu_dev;
- struct opp_table *opp_table;
struct cpufreq_frequency_table *freq_table;
bool have_static_opps;
+ int opp_token;
};
static LIST_HEAD(priv_list);
@@ -193,7 +193,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
struct private_data *priv;
struct device *cpu_dev;
bool fallback = false;
- const char *reg_name;
+ const char *reg_name[] = { NULL, NULL };
int ret;
/* Check if this CPU is already covered by some other policy */
@@ -218,12 +218,11 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
* OPP layer will be taking care of regulators now, but it needs to know
* the name of the regulator first.
*/
- reg_name = find_supply_name(cpu_dev);
- if (reg_name) {
- priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, &reg_name,
- 1);
- if (IS_ERR(priv->opp_table)) {
- ret = PTR_ERR(priv->opp_table);
+ reg_name[0] = find_supply_name(cpu_dev);
+ if (reg_name[0]) {
+ priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name);
+ if (priv->opp_token < 0) {
+ ret = priv->opp_token;
if (ret != -EPROBE_DEFER)
dev_err(cpu_dev, "failed to set regulators: %d\n",
ret);
@@ -295,7 +294,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
out:
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
- dev_pm_opp_put_regulators(priv->opp_table);
+ dev_pm_opp_put_regulators(priv->opp_token);
free_cpumask:
free_cpumask_var(priv->cpus);
return ret;
@@ -309,7 +308,7 @@ static void dt_cpufreq_release(void)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
- dev_pm_opp_put_regulators(priv->opp_table);
+ dev_pm_opp_put_regulators(priv->opp_token);
free_cpumask_var(priv->cpus);
list_del(&priv->node);
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 954eef26685f..69b3d61852ac 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -532,7 +532,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
target_freq = clamp_val(target_freq, policy->min, policy->max);
- if (!cpufreq_driver->target_index)
+ if (!policy->freq_table)
return target_freq;
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
@@ -1348,15 +1348,15 @@ static int cpufreq_online(unsigned int cpu)
}
if (!new_policy && cpufreq_driver->online) {
+ /* Recover policy->cpus using related_cpus */
+ cpumask_copy(policy->cpus, policy->related_cpus);
+
ret = cpufreq_driver->online(policy);
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
goto out_exit_policy;
}
-
- /* Recover policy->cpus using related_cpus */
- cpumask_copy(policy->cpus, policy->related_cpus);
} else {
cpumask_copy(policy->cpus, cpumask_of(cpu));
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 3fe9125156b4..76e553af2071 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -31,8 +31,8 @@
/* cpufreq-dt device registered by imx-cpufreq-dt */
static struct platform_device *cpufreq_dt_pdev;
-static struct opp_table *cpufreq_opp_table;
static struct device *cpu_dev;
+static int cpufreq_opp_token;
enum IMX7ULP_CPUFREQ_CLKS {
ARM,
@@ -153,9 +153,9 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "cpu speed grade %d mkt segment %d supported-hw %#x %#x\n",
speed_grade, mkt_segment, supported_hw[0], supported_hw[1]);
- cpufreq_opp_table = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
- if (IS_ERR(cpufreq_opp_table)) {
- ret = PTR_ERR(cpufreq_opp_table);
+ cpufreq_opp_token = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
+ if (cpufreq_opp_token < 0) {
+ ret = cpufreq_opp_token;
dev_err(&pdev->dev, "Failed to set supported opp: %d\n", ret);
return ret;
}
@@ -163,7 +163,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
cpufreq_dt_pdev = platform_device_register_data(
&pdev->dev, "cpufreq-dt", -1, NULL, 0);
if (IS_ERR(cpufreq_dt_pdev)) {
- dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ dev_pm_opp_put_supported_hw(cpufreq_opp_token);
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
return ret;
@@ -176,7 +176,7 @@ static int imx_cpufreq_dt_remove(struct platform_device *pdev)
{
platform_device_unregister(cpufreq_dt_pdev);
if (!of_machine_is_compatible("fsl,imx7ulp"))
- dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ dev_pm_opp_put_supported_hw(cpufreq_opp_token);
else
clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 76f6b3884e6b..7f2680bc9a0f 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -478,6 +478,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
if (info->soc_data->ccifreq_supported) {
info->vproc_on_boot = regulator_get_voltage(info->proc_reg);
if (info->vproc_on_boot < 0) {
+ ret = info->vproc_on_boot;
dev_err(info->cpu_dev,
"invalid Vproc value: %d\n", info->vproc_on_boot);
goto out_disable_inter_clock;
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 36c79580fba2..d5ef3c66c762 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -15,6 +15,7 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/units.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
@@ -26,8 +27,6 @@
#define GT_IRQ_STATUS BIT(2)
-#define HZ_PER_KHZ 1000
-
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_domain_state;
@@ -428,7 +427,7 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return 0;
}
- ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
@@ -445,7 +444,11 @@ static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
if (data->throttle_irq <= 0)
return 0;
- ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ mutex_lock(&data->throttle_lock);
+ data->cancel_throttle = false;
+ mutex_unlock(&data->throttle_lock);
+
+ ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
@@ -465,7 +468,8 @@ static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
mutex_unlock(&data->throttle_lock);
cancel_delayed_work_sync(&data->throttle_work);
- irq_set_affinity_hint(data->throttle_irq, NULL);
+ irq_set_affinity_and_hint(data->throttle_irq, NULL);
+ disable_irq_nosync(data->throttle_irq);
return 0;
}
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 6dfa86971a75..863548f59c3e 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -55,9 +55,7 @@ struct qcom_cpufreq_match_data {
};
struct qcom_cpufreq_drv {
- struct opp_table **names_opp_tables;
- struct opp_table **hw_opp_tables;
- struct opp_table **genpd_opp_tables;
+ int *opp_tokens;
u32 versions;
const struct qcom_cpufreq_match_data *data;
};
@@ -315,72 +313,43 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
of_node_put(np);
- drv->names_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->names_opp_tables),
+ drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
GFP_KERNEL);
- if (!drv->names_opp_tables) {
+ if (!drv->opp_tokens) {
ret = -ENOMEM;
goto free_drv;
}
- drv->hw_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->hw_opp_tables),
- GFP_KERNEL);
- if (!drv->hw_opp_tables) {
- ret = -ENOMEM;
- goto free_opp_names;
- }
-
- drv->genpd_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->genpd_opp_tables),
- GFP_KERNEL);
- if (!drv->genpd_opp_tables) {
- ret = -ENOMEM;
- goto free_opp;
- }
for_each_possible_cpu(cpu) {
+ struct dev_pm_opp_config config = {
+ .supported_hw = NULL,
+ };
+
cpu_dev = get_cpu_device(cpu);
if (NULL == cpu_dev) {
ret = -ENODEV;
- goto free_genpd_opp;
+ goto free_opp;
}
if (drv->data->get_version) {
+ config.supported_hw = &drv->versions;
+ config.supported_hw_count = 1;
- if (pvs_name) {
- drv->names_opp_tables[cpu] = dev_pm_opp_set_prop_name(
- cpu_dev,
- pvs_name);
- if (IS_ERR(drv->names_opp_tables[cpu])) {
- ret = PTR_ERR(drv->names_opp_tables[cpu]);
- dev_err(cpu_dev, "Failed to add OPP name %s\n",
- pvs_name);
- goto free_opp;
- }
- }
-
- drv->hw_opp_tables[cpu] = dev_pm_opp_set_supported_hw(
- cpu_dev, &drv->versions, 1);
- if (IS_ERR(drv->hw_opp_tables[cpu])) {
- ret = PTR_ERR(drv->hw_opp_tables[cpu]);
- dev_err(cpu_dev,
- "Failed to set supported hardware\n");
- goto free_genpd_opp;
- }
+ if (pvs_name)
+ config.prop_name = pvs_name;
}
if (drv->data->genpd_names) {
- drv->genpd_opp_tables[cpu] =
- dev_pm_opp_attach_genpd(cpu_dev,
- drv->data->genpd_names,
- NULL);
- if (IS_ERR(drv->genpd_opp_tables[cpu])) {
- ret = PTR_ERR(drv->genpd_opp_tables[cpu]);
- if (ret != -EPROBE_DEFER)
- dev_err(cpu_dev,
- "Could not attach to pm_domain: %d\n",
- ret);
- goto free_genpd_opp;
+ config.genpd_names = drv->data->genpd_names;
+ config.virt_devs = NULL;
+ }
+
+ if (config.supported_hw || config.genpd_names) {
+ drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
+ if (drv->opp_tokens[cpu] < 0) {
+ ret = drv->opp_tokens[cpu];
+ dev_err(cpu_dev, "Failed to set OPP config\n");
+ goto free_opp;
}
}
}
@@ -395,27 +364,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(cpu_dev, "Failed to register platform device\n");
-free_genpd_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->genpd_opp_tables[cpu]))
- break;
- dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
- }
- kfree(drv->genpd_opp_tables);
free_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->names_opp_tables[cpu]))
- break;
- dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]);
- }
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->hw_opp_tables[cpu]))
- break;
- dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
- }
- kfree(drv->hw_opp_tables);
-free_opp_names:
- kfree(drv->names_opp_tables);
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+ kfree(drv->opp_tokens);
free_drv:
kfree(drv);
@@ -429,15 +381,10 @@ static int qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu) {
- dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
- dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
- dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
- }
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
- kfree(drv->names_opp_tables);
- kfree(drv->hw_opp_tables);
- kfree(drv->genpd_opp_tables);
+ kfree(drv->opp_tokens);
kfree(drv);
return 0;
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index fdb0a722d881..a67df90848c2 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -156,9 +156,13 @@ static int sti_cpufreq_set_opp_info(void)
unsigned int hw_info_offset;
unsigned int version[VERSION_ELEMENTS];
int pcode, substrate, major, minor;
- int ret;
+ int opp_token, ret;
char name[MAX_PCODE_NAME_LEN];
- struct opp_table *opp_table;
+ struct dev_pm_opp_config config = {
+ .supported_hw = version,
+ .supported_hw_count = ARRAY_SIZE(version),
+ .prop_name = name,
+ };
reg_fields = sti_cpufreq_match();
if (!reg_fields) {
@@ -210,21 +214,14 @@ use_defaults:
snprintf(name, MAX_PCODE_NAME_LEN, "pcode%d", pcode);
- opp_table = dev_pm_opp_set_prop_name(dev, name);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to set prop name\n");
- return PTR_ERR(opp_table);
- }
-
version[0] = BIT(major);
version[1] = BIT(minor);
version[2] = BIT(substrate);
- opp_table = dev_pm_opp_set_supported_hw(dev, version, VERSION_ELEMENTS);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to set supported hardware\n");
- ret = PTR_ERR(opp_table);
- goto err_put_prop_name;
+ opp_token = dev_pm_opp_set_config(dev, &config);
+ if (opp_token < 0) {
+ dev_err(dev, "Failed to set OPP config\n");
+ return opp_token;
}
dev_dbg(dev, "pcode: %d major: %d minor: %d substrate: %d\n",
@@ -233,10 +230,6 @@ use_defaults:
version[0], version[1], version[2]);
return 0;
-
-err_put_prop_name:
- dev_pm_opp_put_prop_name(opp_table);
- return ret;
}
static int sti_cpufreq_fetch_syscon_registers(void)
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 75e1bf3a08f7..a4922580ce06 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -86,20 +86,20 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
{
- struct opp_table **opp_tables;
+ int *opp_tokens;
char name[MAX_NAME_LEN];
unsigned int cpu;
u32 speed = 0;
int ret;
- opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables),
+ opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens),
GFP_KERNEL);
- if (!opp_tables)
+ if (!opp_tokens)
return -ENOMEM;
ret = sun50i_cpufreq_get_efuse(&speed);
if (ret) {
- kfree(opp_tables);
+ kfree(opp_tokens);
return ret;
}
@@ -113,9 +113,9 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
goto free_opp;
}
- opp_tables[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
- if (IS_ERR(opp_tables[cpu])) {
- ret = PTR_ERR(opp_tables[cpu]);
+ opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
+ if (opp_tokens[cpu] < 0) {
+ ret = opp_tokens[cpu];
pr_err("Failed to set prop name\n");
goto free_opp;
}
@@ -124,7 +124,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
NULL, 0);
if (!IS_ERR(cpufreq_dt_pdev)) {
- platform_set_drvdata(pdev, opp_tables);
+ platform_set_drvdata(pdev, opp_tokens);
return 0;
}
@@ -132,27 +132,24 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
pr_err("Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR_OR_NULL(opp_tables[cpu]))
- break;
- dev_pm_opp_put_prop_name(opp_tables[cpu]);
- }
- kfree(opp_tables);
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ kfree(opp_tokens);
return ret;
}
static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
{
- struct opp_table **opp_tables = platform_get_drvdata(pdev);
+ int *opp_tokens = platform_get_drvdata(pdev);
unsigned int cpu;
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tables[cpu]);
+ dev_pm_opp_put_prop_name(opp_tokens[cpu]);
- kfree(opp_tables);
+ kfree(opp_tokens);
return 0;
}
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 2a6a98764a8c..1216046cf4c2 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -162,7 +162,7 @@ static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
.set_cpu_ndiv = tegra234_set_cpu_ndiv,
};
-const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
+static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
.ops = &tegra234_cpufreq_ops,
.actmon_cntr_base = 0x9000,
.maxcpus_per_cluster = 4,
@@ -430,7 +430,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
.set_cpu_ndiv = tegra194_set_cpu_ndiv,
};
-const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
+static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
.ops = &tegra194_cpufreq_ops,
.maxcpus_per_cluster = 2,
};
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index e8db3d75be25..ab7ac7df9e62 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -32,9 +32,9 @@ static bool cpu0_node_has_opp_v2_prop(void)
return ret;
}
-static void tegra20_cpufreq_put_supported_hw(void *opp_table)
+static void tegra20_cpufreq_put_supported_hw(void *opp_token)
{
- dev_pm_opp_put_supported_hw(opp_table);
+ dev_pm_opp_put_supported_hw((unsigned long) opp_token);
}
static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
@@ -45,7 +45,6 @@ static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
struct platform_device *cpufreq_dt;
- struct opp_table *opp_table;
struct device *cpu_dev;
u32 versions[2];
int err;
@@ -71,16 +70,15 @@ static int tegra20_cpufreq_probe(struct platform_device *pdev)
if (WARN_ON(!cpu_dev))
return -ENODEV;
- opp_table = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
- err = PTR_ERR_OR_ZERO(opp_table);
- if (err) {
+ err = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
+ if (err < 0) {
dev_err(&pdev->dev, "failed to set supported hw: %d\n", err);
return err;
}
err = devm_add_action_or_reset(&pdev->dev,
tegra20_cpufreq_put_supported_hw,
- opp_table);
+ (void *)((unsigned long) err));
if (err)
return err;
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 8f9fdd864391..df85a77d476b 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -60,7 +60,6 @@ struct ti_cpufreq_data {
struct device_node *opp_node;
struct regmap *syscon;
const struct ti_cpufreq_soc_data *soc_data;
- struct opp_table *opp_table;
};
static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
@@ -173,7 +172,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = {
* seems to always read as 0).
*/
-static const char * const omap3_reg_names[] = {"cpu0", "vbb"};
+static const char * const omap3_reg_names[] = {"cpu0", "vbb", NULL};
static struct ti_cpufreq_soc_data omap36xx_soc_data = {
.reg_names = omap3_reg_names,
@@ -324,10 +323,13 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
const struct of_device_id *match;
- struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
- const char * const default_reg_names[] = {"vdd", "vbb"};
+ const char * const default_reg_names[] = {"vdd", "vbb", NULL};
int ret;
+ struct dev_pm_opp_config config = {
+ .supported_hw = version,
+ .supported_hw_count = ARRAY_SIZE(version),
+ };
match = dev_get_platdata(&pdev->dev);
if (!match)
@@ -370,33 +372,21 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
if (ret)
goto fail_put_node;
- ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
- version, VERSION_COUNT);
- if (IS_ERR(ti_opp_table)) {
- dev_err(opp_data->cpu_dev,
- "Failed to set supported hardware\n");
- ret = PTR_ERR(ti_opp_table);
- goto fail_put_node;
- }
-
- opp_data->opp_table = ti_opp_table;
-
if (opp_data->soc_data->multi_regulator) {
- const char * const *reg_names = default_reg_names;
-
if (opp_data->soc_data->reg_names)
- reg_names = opp_data->soc_data->reg_names;
- ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
- reg_names,
- ARRAY_SIZE(default_reg_names));
- if (IS_ERR(ti_opp_table)) {
- dev_pm_opp_put_supported_hw(opp_data->opp_table);
- ret = PTR_ERR(ti_opp_table);
- goto fail_put_node;
- }
+ config.regulator_names = opp_data->soc_data->reg_names;
+ else
+ config.regulator_names = default_reg_names;
+ }
+
+ ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
+ if (ret < 0) {
+ dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
+ goto fail_put_node;
}
of_node_put(opp_data->opp_node);
+
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 62dd956025f3..6eceb1988243 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -8,6 +8,7 @@
* This code is licenced under the GPL.
*/
+#include "linux/percpu-defs.h"
#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -279,6 +280,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* Shallower states are enabled, so update. */
dev->states_usage[entered_state].above++;
+ trace_cpu_idle_miss(dev->cpu, entered_state, false);
break;
}
} else if (diff > delay) {
@@ -290,8 +292,10 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* Update if a deeper state would have been a
* better match for the observed idle duration.
*/
- if (diff - delay >= drv->states[i].target_residency_ns)
+ if (diff - delay >= drv->states[i].target_residency_ns) {
dev->states_usage[entered_state].below++;
+ trace_cpu_idle_miss(dev->cpu, entered_state, true);
+ }
break;
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index f64e3984689f..768ced3d6fe8 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -2,6 +2,7 @@
menuconfig CXL_BUS
tristate "CXL (Compute Express Link) Devices Support"
depends on PCI
+ select PCI_DOE
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
@@ -102,4 +103,12 @@ config CXL_SUSPEND
def_bool y
depends on SUSPEND && CXL_MEM
+config CXL_REGION
+ bool
+ default CXL_BUS
+ # For MAX_PHYSMEM_BITS
+ depends on SPARSEMEM
+ select MEMREGION
+ select GET_FREE_REGION
+
endif
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 40286f5df812..fb649683dd3a 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -9,10 +9,6 @@
#include "cxlpci.h"
#include "cxl.h"
-/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
-#define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways)
-#define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
-
static unsigned long cfmws_to_decoder_flags(int restrictions)
{
unsigned long flags = CXL_DECODER_F_ENABLE;
@@ -34,7 +30,8 @@ static unsigned long cfmws_to_decoder_flags(int restrictions)
static int cxl_acpi_cfmws_verify(struct device *dev,
struct acpi_cedt_cfmws *cfmws)
{
- int expected_len;
+ int rc, expected_len;
+ unsigned int ways;
if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) {
dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n");
@@ -51,14 +48,14 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
return -EINVAL;
}
- if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) {
- dev_err(dev, "CFMWS Interleave Ways (%d) too large\n",
- CFMWS_INTERLEAVE_WAYS(cfmws));
+ rc = cxl_to_ways(cfmws->interleave_ways, &ways);
+ if (rc) {
+ dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
+ cfmws->interleave_ways);
return -EINVAL;
}
- expected_len = struct_size((cfmws), interleave_targets,
- CFMWS_INTERLEAVE_WAYS(cfmws));
+ expected_len = struct_size(cfmws, interleave_targets, ways);
if (cfmws->header.length < expected_len) {
dev_err(dev, "CFMWS length %d less than expected %d\n",
@@ -76,6 +73,8 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
struct cxl_cfmws_context {
struct device *dev;
struct cxl_port *root_port;
+ struct resource *cxl_res;
+ int id;
};
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -84,10 +83,14 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
+ struct resource *cxl_res = ctx->cxl_res;
+ struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
struct acpi_cedt_cfmws *cfmws;
struct cxl_decoder *cxld;
- int rc, i;
+ unsigned int ways, i, ig;
+ struct resource *res;
+ int rc;
cfmws = (struct acpi_cedt_cfmws *) header;
@@ -99,19 +102,51 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
return 0;
}
- for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
+ rc = cxl_to_ways(cfmws->interleave_ways, &ways);
+ if (rc)
+ return rc;
+ rc = cxl_to_granularity(cfmws->granularity, &ig);
+ if (rc)
+ return rc;
+ for (i = 0; i < ways; i++)
target_map[i] = cfmws->interleave_targets[i];
- cxld = cxl_root_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws));
- if (IS_ERR(cxld))
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++);
+ if (!res->name)
+ goto err_name;
+
+ res->start = cfmws->base_hpa;
+ res->end = cfmws->base_hpa + cfmws->window_size - 1;
+ res->flags = IORESOURCE_MEM;
+
+ /* add to the local resource tracking to establish a sort order */
+ rc = insert_resource(cxl_res, res);
+ if (rc)
+ goto err_insert;
+
+ cxlrd = cxl_root_decoder_alloc(root_port, ways);
+ if (IS_ERR(cxlrd))
return 0;
+ cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_EXPANDER;
- cxld->platform_res = (struct resource)DEFINE_RES_MEM(cfmws->base_hpa,
- cfmws->window_size);
- cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
- cxld->interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY(cfmws);
+ cxld->hpa_range = (struct range) {
+ .start = res->start,
+ .end = res->end,
+ };
+ cxld->interleave_ways = ways;
+ /*
+ * Minimize the x1 granularity to advertise support for any
+ * valid region granularity
+ */
+ if (ways == 1)
+ ig = CXL_DECODER_MIN_GRANULARITY;
+ cxld->interleave_granularity = ig;
rc = cxl_decoder_add(cxld, target_map);
if (rc)
@@ -119,15 +154,22 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
else
rc = cxl_decoder_autoremove(dev, cxld);
if (rc) {
- dev_err(dev, "Failed to add decoder for %pr\n",
- &cxld->platform_res);
+ dev_err(dev, "Failed to add decode range [%#llx - %#llx]\n",
+ cxld->hpa_range.start, cxld->hpa_range.end);
return 0;
}
- dev_dbg(dev, "add: %s node: %d range %pr\n", dev_name(&cxld->dev),
- phys_to_target_node(cxld->platform_res.start),
- &cxld->platform_res);
+ dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
+ dev_name(&cxld->dev),
+ phys_to_target_node(cxld->hpa_range.start),
+ cxld->hpa_range.start, cxld->hpa_range.end);
return 0;
+
+err_insert:
+ kfree(res->name);
+err_name:
+ kfree(res);
+ return -ENOMEM;
}
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
@@ -175,8 +217,7 @@ static int add_host_bridge_uport(struct device *match, void *arg)
if (rc)
return rc;
- port = devm_cxl_add_port(host, match, dport->component_reg_phys,
- root_port);
+ port = devm_cxl_add_port(host, match, dport->component_reg_phys, dport);
if (IS_ERR(port))
return PTR_ERR(port);
dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
@@ -282,9 +323,127 @@ static void cxl_acpi_lock_reset_class(void *dev)
device_lock_reset_class(dev);
}
+static void del_cxl_resource(struct resource *res)
+{
+ kfree(res->name);
+ kfree(res);
+}
+
+static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
+{
+ priv->desc = (unsigned long) pub;
+}
+
+static struct resource *cxl_get_public_resource(struct resource *priv)
+{
+ return (struct resource *) priv->desc;
+}
+
+static void remove_cxl_resources(void *data)
+{
+ struct resource *res, *next, *cxl = data;
+
+ for (res = cxl->child; res; res = next) {
+ struct resource *victim = cxl_get_public_resource(res);
+
+ next = res->sibling;
+ remove_resource(res);
+
+ if (victim) {
+ remove_resource(victim);
+ kfree(victim);
+ }
+
+ del_cxl_resource(res);
+ }
+}
+
+/**
+ * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
+ * @cxl_res: A standalone resource tree where each CXL window is a sibling
+ *
+ * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
+ * expanding its boundaries to ensure that any conflicting resources become
+ * children. If a window is expanded it may then conflict with a another window
+ * entry and require the window to be truncated or trimmed. Consider this
+ * situation:
+ *
+ * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
+ * |--------------- "System RAM" -------------|
+ *
+ * ...where platform firmware has established as System RAM resource across 2
+ * windows, but has left some portion of window 1 for dynamic CXL region
+ * provisioning. In this case "Window 0" will span the entirety of the "System
+ * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
+ * of that "System RAM" resource.
+ */
+static int add_cxl_resources(struct resource *cxl_res)
+{
+ struct resource *res, *new, *next;
+
+ for (res = cxl_res->child; res; res = next) {
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->name = res->name;
+ new->start = res->start;
+ new->end = res->end;
+ new->flags = IORESOURCE_MEM;
+ new->desc = IORES_DESC_CXL;
+
+ /*
+ * Record the public resource in the private cxl_res tree for
+ * later removal.
+ */
+ cxl_set_public_resource(res, new);
+
+ insert_resource_expand_to_fit(&iomem_resource, new);
+
+ next = res->sibling;
+ while (next && resource_overlaps(new, next)) {
+ if (resource_contains(new, next)) {
+ struct resource *_next = next->sibling;
+
+ remove_resource(next);
+ del_cxl_resource(next);
+ next = _next;
+ } else
+ next->start = new->end + 1;
+ }
+ }
+ return 0;
+}
+
+static int pair_cxl_resource(struct device *dev, void *data)
+{
+ struct resource *cxl_res = data;
+ struct resource *p;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ for (p = cxl_res->child; p; p = p->sibling) {
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct resource res = {
+ .start = cxld->hpa_range.start,
+ .end = cxld->hpa_range.end,
+ .flags = IORESOURCE_MEM,
+ };
+
+ if (resource_contains(p, &res)) {
+ cxlrd->res = cxl_get_public_resource(p);
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
+ struct resource *cxl_res;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
@@ -296,6 +455,14 @@ static int cxl_acpi_probe(struct platform_device *pdev)
if (rc)
return rc;
+ cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL);
+ if (!cxl_res)
+ return -ENOMEM;
+ cxl_res->name = "CXL mem";
+ cxl_res->start = 0;
+ cxl_res->end = -1;
+ cxl_res->flags = IORESOURCE_MEM;
+
root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(root_port))
return PTR_ERR(root_port);
@@ -306,11 +473,28 @@ static int cxl_acpi_probe(struct platform_device *pdev)
if (rc < 0)
return rc;
+ rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
+ if (rc)
+ return rc;
+
ctx = (struct cxl_cfmws_context) {
.dev = host,
.root_port = root_port,
+ .cxl_res = cxl_res,
};
- acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
+ rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
+ if (rc < 0)
+ return -ENXIO;
+
+ rc = add_cxl_resources(cxl_res);
+ if (rc)
+ return rc;
+
+ /*
+ * Populate the root decoders with their related iomem resource,
+ * if present
+ */
+ device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
/*
* Root level scanned with host-bridge as dports, now scan host-bridges
@@ -337,12 +521,19 @@ static const struct acpi_device_id cxl_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
+static const struct platform_device_id cxl_test_ids[] = {
+ { "cxl_acpi" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, cxl_test_ids);
+
static struct platform_driver cxl_acpi_driver = {
.probe = cxl_acpi_probe,
.driver = {
.name = KBUILD_MODNAME,
.acpi_match_table = cxl_acpi_ids,
},
+ .id_table = cxl_test_ids,
};
module_platform_driver(cxl_acpi_driver);
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 9d35085d25af..79c7257f4107 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -10,3 +10,4 @@ cxl_core-y += memdev.o
cxl_core-y += mbox.o
cxl_core-y += pci.o
cxl_core-y += hdm.o
+cxl_core-$(CONFIG_CXL_REGION) += region.o
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 1a50c0fc399c..1d8f87be283f 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -9,6 +9,36 @@ extern const struct device_type cxl_nvdimm_type;
extern struct attribute_group cxl_base_attribute_group;
+#ifdef CONFIG_CXL_REGION
+extern struct device_attribute dev_attr_create_pmem_region;
+extern struct device_attribute dev_attr_delete_region;
+extern struct device_attribute dev_attr_region;
+extern const struct device_type cxl_pmem_region_type;
+extern const struct device_type cxl_region_type;
+void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
+#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
+#define CXL_REGION_TYPE(x) (&cxl_region_type)
+#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
+#define CXL_PMEM_REGION_TYPE(x) (&cxl_pmem_region_type)
+int cxl_region_init(void);
+void cxl_region_exit(void);
+#else
+static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+{
+}
+static inline int cxl_region_init(void)
+{
+ return 0;
+}
+static inline void cxl_region_exit(void)
+{
+}
+#define CXL_REGION_ATTR(x) NULL
+#define CXL_REGION_TYPE(x) NULL
+#define SET_CXL_REGION_ATTR(x)
+#define CXL_PMEM_REGION_TYPE(x) NULL
+#endif
+
struct cxl_send_command;
struct cxl_mem_query_commands;
int cxl_query_cmd(struct cxl_memdev *cxlmd,
@@ -17,9 +47,28 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s);
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
resource_size_t length);
+struct dentry *cxl_debugfs_create_dir(const char *dir);
+int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
+ enum cxl_decoder_mode mode);
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
+int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
+resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
+resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
+extern struct rw_semaphore cxl_dpa_rwsem;
+
+bool is_switch_decoder(struct device *dev);
+struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
+static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
+ struct cxl_memdev *cxlmd)
+{
+ if (!port)
+ return NULL;
+
+ return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
+}
+
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
void cxl_mbox_init(void);
-void cxl_mbox_exit(void);
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index bfc8ee876278..d1d2caea5c62 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -16,6 +17,8 @@
* for enumerating these registers and capabilities.
*/
+DECLARE_RWSEM(cxl_dpa_rwsem);
+
static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map)
{
@@ -46,20 +49,22 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
*/
int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
- struct cxl_decoder *cxld;
- struct cxl_dport *dport;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_dport *dport = NULL;
int single_port_map[1];
+ unsigned long index;
- cxld = cxl_switch_decoder_alloc(port, 1);
- if (IS_ERR(cxld))
- return PTR_ERR(cxld);
+ cxlsd = cxl_switch_decoder_alloc(port, 1);
+ if (IS_ERR(cxlsd))
+ return PTR_ERR(cxlsd);
device_lock_assert(&port->dev);
- dport = list_first_entry(&port->dports, typeof(*dport), list);
+ xa_for_each(&port->dports, index, dport)
+ break;
single_port_map[0] = dport->port_id;
- return add_hdm_decoder(port, cxld, single_port_map);
+ return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
@@ -124,47 +129,577 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
return ERR_PTR(-ENXIO);
}
+ dev_set_drvdata(dev, cxlhdm);
+
return cxlhdm;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
-static int to_interleave_granularity(u32 ctrl)
+static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
+{
+ unsigned long long start = r->start, end = r->end;
+
+ seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
+ r->name);
+}
+
+void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
+{
+ struct resource *p1, *p2;
+
+ down_read(&cxl_dpa_rwsem);
+ for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
+ __cxl_dpa_debug(file, p1, 0);
+ for (p2 = p1->child; p2; p2 = p2->sibling)
+ __cxl_dpa_debug(file, p2, 1);
+ }
+ up_read(&cxl_dpa_rwsem);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
+
+/*
+ * Must be called in a context that synchronizes against this decoder's
+ * port ->remove() callback (like an endpoint decoder sysfs attribute)
+ */
+static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct resource *res = cxled->dpa_res;
+ resource_size_t skip_start;
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+
+ /* save @skip_start, before @res is released */
+ skip_start = res->start - cxled->skip;
+ __release_region(&cxlds->dpa_res, res->start, resource_size(res));
+ if (cxled->skip)
+ __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
+ cxled->skip = 0;
+ cxled->dpa_res = NULL;
+ put_device(&cxled->cxld.dev);
+ port->hdm_end--;
+}
+
+static void cxl_dpa_release(void *cxled)
+{
+ down_write(&cxl_dpa_rwsem);
+ __cxl_dpa_release(cxled);
+ up_write(&cxl_dpa_rwsem);
+}
+
+/*
+ * Must be called from context that will not race port device
+ * unregistration, like decoder sysfs attribute methods
+ */
+static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+ devm_remove_action(&port->dev, cxl_dpa_release, cxled);
+ __cxl_dpa_release(cxled);
+}
+
+static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &port->dev;
+ struct resource *res;
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+
+ if (!len)
+ goto success;
+
+ if (cxled->dpa_res) {
+ dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
+ port->id, cxled->cxld.id, cxled->dpa_res);
+ return -EBUSY;
+ }
+
+ if (port->hdm_end + 1 != cxled->cxld.id) {
+ /*
+ * Assumes alloc and commit order is always in hardware instance
+ * order per expectations from 8.2.5.12.20 Committing Decoder
+ * Programming that enforce decoder[m] committed before
+ * decoder[m+1] commit start.
+ */
+ dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
+ cxled->cxld.id, port->id, port->hdm_end + 1);
+ return -EBUSY;
+ }
+
+ if (skipped) {
+ res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
+ dev_name(&cxled->cxld.dev), 0);
+ if (!res) {
+ dev_dbg(dev,
+ "decoder%d.%d: failed to reserve skipped space\n",
+ port->id, cxled->cxld.id);
+ return -EBUSY;
+ }
+ }
+ res = __request_region(&cxlds->dpa_res, base, len,
+ dev_name(&cxled->cxld.dev), 0);
+ if (!res) {
+ dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
+ port->id, cxled->cxld.id);
+ if (skipped)
+ __release_region(&cxlds->dpa_res, base - skipped,
+ skipped);
+ return -EBUSY;
+ }
+ cxled->dpa_res = res;
+ cxled->skip = skipped;
+
+ if (resource_contains(&cxlds->pmem_res, res))
+ cxled->mode = CXL_DECODER_PMEM;
+ else if (resource_contains(&cxlds->ram_res, res))
+ cxled->mode = CXL_DECODER_RAM;
+ else {
+ dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
+ cxled->cxld.id, cxled->dpa_res);
+ cxled->mode = CXL_DECODER_MIXED;
+ }
+
+success:
+ port->hdm_end++;
+ get_device(&cxled->cxld.dev);
+ return 0;
+}
+
+static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped)
{
- int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl);
+ struct cxl_port *port = cxled_to_port(cxled);
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ rc = __cxl_dpa_reserve(cxled, base, len, skipped);
+ up_write(&cxl_dpa_rwsem);
+
+ if (rc)
+ return rc;
- return 256 << val;
+ return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
}
-static int to_interleave_ways(u32 ctrl)
+resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl);
+ resource_size_t size = 0;
+
+ down_read(&cxl_dpa_rwsem);
+ if (cxled->dpa_res)
+ size = resource_size(cxled->dpa_res);
+ up_read(&cxl_dpa_rwsem);
- switch (val) {
- case 0 ... 4:
- return 1 << val;
- case 8 ... 10:
- return 3 << (val - 8);
+ return size;
+}
+
+resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
+{
+ resource_size_t base = -1;
+
+ down_read(&cxl_dpa_rwsem);
+ if (cxled->dpa_res)
+ base = cxled->dpa_res->start;
+ up_read(&cxl_dpa_rwsem);
+
+ return base;
+}
+
+int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct device *dev = &cxled->cxld.dev;
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ if (!cxled->dpa_res) {
+ rc = 0;
+ goto out;
+ }
+ if (cxled->cxld.region) {
+ dev_dbg(dev, "decoder assigned to: %s\n",
+ dev_name(&cxled->cxld.region->dev));
+ rc = -EBUSY;
+ goto out;
+ }
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ dev_dbg(dev, "decoder enabled\n");
+ rc = -EBUSY;
+ goto out;
+ }
+ if (cxled->cxld.id != port->hdm_end) {
+ dev_dbg(dev, "expected decoder%d.%d\n", port->id,
+ port->hdm_end);
+ rc = -EBUSY;
+ goto out;
+ }
+ devm_cxl_dpa_release(cxled);
+ rc = 0;
+out:
+ up_write(&cxl_dpa_rwsem);
+ return rc;
+}
+
+int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
+ enum cxl_decoder_mode mode)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &cxled->cxld.dev;
+ int rc;
+
+ switch (mode) {
+ case CXL_DECODER_RAM:
+ case CXL_DECODER_PMEM:
+ break;
default:
+ dev_dbg(dev, "unsupported mode: %d\n", mode);
+ return -EINVAL;
+ }
+
+ down_write(&cxl_dpa_rwsem);
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Only allow modes that are supported by the current partition
+ * configuration
+ */
+ if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
+ dev_dbg(dev, "no available pmem capacity\n");
+ rc = -ENXIO;
+ goto out;
+ }
+ if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
+ dev_dbg(dev, "no available ram capacity\n");
+ rc = -ENXIO;
+ goto out;
+ }
+
+ cxled->mode = mode;
+ rc = 0;
+out:
+ up_write(&cxl_dpa_rwsem);
+
+ return rc;
+}
+
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ resource_size_t free_ram_start, free_pmem_start;
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &cxled->cxld.dev;
+ resource_size_t start, avail, skip;
+ struct resource *p, *last;
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ if (cxled->cxld.region) {
+ dev_dbg(dev, "decoder attached to %s\n",
+ dev_name(&cxled->cxld.region->dev));
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ dev_dbg(dev, "decoder enabled\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ free_ram_start = last->end + 1;
+ else
+ free_ram_start = cxlds->ram_res.start;
+
+ for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ free_pmem_start = last->end + 1;
+ else
+ free_pmem_start = cxlds->pmem_res.start;
+
+ if (cxled->mode == CXL_DECODER_RAM) {
+ start = free_ram_start;
+ avail = cxlds->ram_res.end - start + 1;
+ skip = 0;
+ } else if (cxled->mode == CXL_DECODER_PMEM) {
+ resource_size_t skip_start, skip_end;
+
+ start = free_pmem_start;
+ avail = cxlds->pmem_res.end - start + 1;
+ skip_start = free_ram_start;
+
+ /*
+ * If some pmem is already allocated, then that allocation
+ * already handled the skip.
+ */
+ if (cxlds->pmem_res.child &&
+ skip_start == cxlds->pmem_res.child->start)
+ skip_end = skip_start - 1;
+ else
+ skip_end = start - 1;
+ skip = skip_end - skip_start + 1;
+ } else {
+ dev_dbg(dev, "mode not set\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (size > avail) {
+ dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
+ cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
+ &avail);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ rc = __cxl_dpa_reserve(cxled, start, size, skip);
+out:
+ up_write(&cxl_dpa_rwsem);
+
+ if (rc)
+ return rc;
+
+ return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
+}
+
+static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
+{
+ u16 eig;
+ u8 eiw;
+
+ /*
+ * Input validation ensures these warns never fire, but otherwise
+ * suppress unititalized variable usage warnings.
+ */
+ if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw),
+ "invalid interleave_ways: %d\n", cxld->interleave_ways))
+ return;
+ if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig),
+ "invalid interleave_granularity: %d\n",
+ cxld->interleave_granularity))
+ return;
+
+ u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
+ u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
+ *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
+}
+
+static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
+{
+ u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
+ CXL_HDM_DECODER0_CTRL_TYPE);
+}
+
+static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+{
+ struct cxl_dport **t = &cxlsd->target[0];
+ int ways = cxlsd->cxld.interleave_ways;
+
+ if (dev_WARN_ONCE(&cxlsd->cxld.dev,
+ ways > 8 || ways > cxlsd->nr_targets,
+ "ways: %d overflows targets: %d\n", ways,
+ cxlsd->nr_targets))
+ return -ENXIO;
+
+ *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
+ if (ways > 1)
+ *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
+ if (ways > 2)
+ *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
+ if (ways > 3)
+ *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
+ if (ways > 4)
+ *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
+ if (ways > 5)
+ *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
+ if (ways > 6)
+ *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
+ if (ways > 7)
+ *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
+
+ return 0;
+}
+
+/*
+ * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
+ * committed or error within 10ms, but just be generous with 20ms to account for
+ * clock skew and other marginal behavior
+ */
+#define COMMIT_TIMEOUT_MS 20
+static int cxld_await_commit(void __iomem *hdm, int id)
+{
+ u32 ctrl;
+ int i;
+
+ for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
+ ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ return -EIO;
+ }
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
+ return 0;
+ fsleep(1000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int cxl_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id, rc;
+ u64 base, size;
+ u32 ctrl;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ if (port->commit_end + 1 != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end + 1);
+ return -EBUSY;
+ }
+
+ down_read(&cxl_dpa_rwsem);
+ /* common decoder settings */
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
+ cxld_set_interleave(cxld, &ctrl);
+ cxld_set_type(cxld, &ctrl);
+ base = cxld->hpa_range.start;
+ size = range_len(&cxld->hpa_range);
+
+ writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
+ writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
+ writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
+ writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
+
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd =
+ to_cxl_switch_decoder(&cxld->dev);
+ void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
+ void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
+ u64 targets;
+
+ rc = cxlsd_set_targets(cxlsd, &targets);
+ if (rc) {
+ dev_dbg(&port->dev, "%s: target configuration error\n",
+ dev_name(&cxld->dev));
+ goto err;
+ }
+
+ writel(upper_32_bits(targets), tl_hi);
+ writel(lower_32_bits(targets), tl_lo);
+ } else {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+ void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
+ void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
+
+ writel(upper_32_bits(cxled->skip), sk_hi);
+ writel(lower_32_bits(cxled->skip), sk_lo);
+ }
+
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ up_read(&cxl_dpa_rwsem);
+
+ port->commit_end++;
+ rc = cxld_await_commit(hdm, cxld->id);
+err:
+ if (rc) {
+ dev_dbg(&port->dev, "%s: error %d committing decoder\n",
+ dev_name(&cxld->dev), rc);
+ cxld->reset(cxld);
+ return rc;
+ }
+ cxld->flags |= CXL_DECODER_F_ENABLE;
+
+ return 0;
+}
+
+static int cxl_decoder_reset(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id;
+ u32 ctrl;
+
+ if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
return 0;
+
+ if (port->commit_end != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order reset, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end);
+ return -EBUSY;
}
+
+ down_read(&cxl_dpa_rwsem);
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+
+ writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
+ up_read(&cxl_dpa_rwsem);
+
+ port->commit_end--;
+ cxld->flags &= ~CXL_DECODER_F_ENABLE;
+
+ return 0;
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map, void __iomem *hdm, int which)
+ int *target_map, void __iomem *hdm, int which,
+ u64 *dpa_base)
{
- u64 size, base;
+ struct cxl_endpoint_decoder *cxled = NULL;
+ u64 size, base, skip, dpa_size;
+ bool committed;
+ u32 remainder;
+ int i, rc;
u32 ctrl;
- int i;
union {
u64 value;
unsigned char target_id[8];
} target_list;
+ if (is_endpoint_decoder(&cxld->dev))
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
+ committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
+ cxld->commit = cxl_decoder_commit;
+ cxld->reset = cxl_decoder_reset;
- if (!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED))
+ if (!committed)
size = 0;
if (base == U64_MAX || size == U64_MAX) {
dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
@@ -172,39 +707,77 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return -ENXIO;
}
- cxld->decoder_range = (struct range) {
+ cxld->hpa_range = (struct range) {
.start = base,
.end = base + size - 1,
};
- /* switch decoders are always enabled if committed */
- if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) {
+ /* decoders are enabled if committed */
+ if (committed) {
cxld->flags |= CXL_DECODER_F_ENABLE;
if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
cxld->flags |= CXL_DECODER_F_LOCK;
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
+ cxld->target_type = CXL_DECODER_EXPANDER;
+ else
+ cxld->target_type = CXL_DECODER_ACCELERATOR;
+ if (cxld->id != port->commit_end + 1) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Committed out of order\n",
+ port->id, cxld->id);
+ return -ENXIO;
+ }
+ port->commit_end = cxld->id;
+ } else {
+ /* unless / until type-2 drivers arrive, assume type-3 */
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
+ ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
+ }
+ cxld->target_type = CXL_DECODER_EXPANDER;
}
- cxld->interleave_ways = to_interleave_ways(ctrl);
- if (!cxld->interleave_ways) {
+ rc = cxl_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
+ &cxld->interleave_ways);
+ if (rc) {
dev_warn(&port->dev,
"decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
port->id, cxld->id, ctrl);
- return -ENXIO;
+ return rc;
}
- cxld->interleave_granularity = to_interleave_granularity(ctrl);
+ rc = cxl_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
+ &cxld->interleave_granularity);
+ if (rc)
+ return rc;
- if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
- cxld->target_type = CXL_DECODER_EXPANDER;
- else
- cxld->target_type = CXL_DECODER_ACCELERATOR;
+ if (!cxled) {
+ target_list.value =
+ ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
+ for (i = 0; i < cxld->interleave_ways; i++)
+ target_map[i] = target_list.target_id[i];
- if (is_endpoint_decoder(&cxld->dev))
return 0;
+ }
- target_list.value =
- ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
- for (i = 0; i < cxld->interleave_ways; i++)
- target_map[i] = target_list.target_id[i];
+ if (!committed)
+ return 0;
+ dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
+ if (remainder) {
+ dev_err(&port->dev,
+ "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
+ port->id, cxld->id, size, cxld->interleave_ways);
+ return -ENXIO;
+ }
+ skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+ rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
+ if (rc) {
+ dev_err(&port->dev,
+ "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+ port->id, cxld->id, *dpa_base,
+ *dpa_base + dpa_size + skip - 1, rc);
+ return rc;
+ }
+ *dpa_base += dpa_size + skip;
return 0;
}
@@ -216,7 +789,8 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
- int i, committed, failed;
+ int i, committed;
+ u64 dpa_base = 0;
u32 ctrl;
/*
@@ -236,27 +810,37 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
if (committed != cxlhdm->decoder_count)
msleep(20);
- for (i = 0, failed = 0; i < cxlhdm->decoder_count; i++) {
+ for (i = 0; i < cxlhdm->decoder_count; i++) {
int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
struct cxl_decoder *cxld;
- if (is_cxl_endpoint(port))
- cxld = cxl_endpoint_decoder_alloc(port);
- else
- cxld = cxl_switch_decoder_alloc(port, target_count);
- if (IS_ERR(cxld)) {
- dev_warn(&port->dev,
- "Failed to allocate the decoder\n");
- return PTR_ERR(cxld);
+ if (is_cxl_endpoint(port)) {
+ struct cxl_endpoint_decoder *cxled;
+
+ cxled = cxl_endpoint_decoder_alloc(port);
+ if (IS_ERR(cxled)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxled);
+ }
+ cxld = &cxled->cxld;
+ } else {
+ struct cxl_switch_decoder *cxlsd;
+
+ cxlsd = cxl_switch_decoder_alloc(port, target_count);
+ if (IS_ERR(cxlsd)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxlsd);
+ }
+ cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map,
- cxlhdm->regs.hdm_decoder, i);
+ rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base);
if (rc) {
put_device(&cxld->dev);
- failed++;
- continue;
+ return rc;
}
rc = add_hdm_decoder(port, cxld, target_map);
if (rc) {
@@ -266,11 +850,6 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
}
}
- if (failed == cxlhdm->decoder_count) {
- dev_err(&port->dev, "No valid decoders found\n");
- return -ENXIO;
- }
-
return 0;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index cbf23beebebe..16176b9278b4 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -718,12 +718,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
*/
static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
{
- struct cxl_mbox_get_partition_info {
- __le64 active_volatile_cap;
- __le64 active_persistent_cap;
- __le64 next_volatile_cap;
- __le64 next_persistent_cap;
- } __packed pi;
+ struct cxl_mbox_get_partition_info pi;
int rc;
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
@@ -773,15 +768,6 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
cxlds->partition_align_bytes =
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
- dev_dbg(cxlds->dev,
- "Identify Memory Device\n"
- " total_bytes = %#llx\n"
- " volatile_only_bytes = %#llx\n"
- " persistent_only_bytes = %#llx\n"
- " partition_align_bytes = %#llx\n",
- cxlds->total_bytes, cxlds->volatile_only_bytes,
- cxlds->persistent_only_bytes, cxlds->partition_align_bytes);
-
cxlds->lsa_size = le32_to_cpu(id.lsa_size);
memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
@@ -789,42 +775,63 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
-int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
+static int add_dpa_res(struct device *dev, struct resource *parent,
+ struct resource *res, resource_size_t start,
+ resource_size_t size, const char *type)
{
int rc;
- if (cxlds->partition_align_bytes == 0) {
- cxlds->ram_range.start = 0;
- cxlds->ram_range.end = cxlds->volatile_only_bytes - 1;
- cxlds->pmem_range.start = cxlds->volatile_only_bytes;
- cxlds->pmem_range.end = cxlds->volatile_only_bytes +
- cxlds->persistent_only_bytes - 1;
+ res->name = type;
+ res->start = start;
+ res->end = start + size - 1;
+ res->flags = IORESOURCE_MEM;
+ if (resource_size(res) == 0) {
+ dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
return 0;
}
-
- rc = cxl_mem_get_partition_info(cxlds);
+ rc = request_resource(parent, res);
if (rc) {
- dev_err(cxlds->dev, "Failed to query partition information\n");
+ dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
+ res, rc);
return rc;
}
- dev_dbg(cxlds->dev,
- "Get Partition Info\n"
- " active_volatile_bytes = %#llx\n"
- " active_persistent_bytes = %#llx\n"
- " next_volatile_bytes = %#llx\n"
- " next_persistent_bytes = %#llx\n",
- cxlds->active_volatile_bytes, cxlds->active_persistent_bytes,
- cxlds->next_volatile_bytes, cxlds->next_persistent_bytes);
+ dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+
+ return 0;
+}
- cxlds->ram_range.start = 0;
- cxlds->ram_range.end = cxlds->active_volatile_bytes - 1;
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
+{
+ struct device *dev = cxlds->dev;
+ int rc;
- cxlds->pmem_range.start = cxlds->active_volatile_bytes;
- cxlds->pmem_range.end =
- cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1;
+ cxlds->dpa_res =
+ (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
- return 0;
+ if (cxlds->partition_align_bytes == 0) {
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
+ cxlds->volatile_only_bytes, "ram");
+ if (rc)
+ return rc;
+ return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
+ cxlds->volatile_only_bytes,
+ cxlds->persistent_only_bytes, "pmem");
+ }
+
+ rc = cxl_mem_get_partition_info(cxlds);
+ if (rc) {
+ dev_err(dev, "Failed to query partition information\n");
+ return rc;
+ }
+
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
+ cxlds->active_volatile_bytes, "ram");
+ if (rc)
+ return rc;
+ return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
+ cxlds->active_volatile_bytes,
+ cxlds->active_persistent_bytes, "pmem");
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
@@ -845,19 +852,11 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
-static struct dentry *cxl_debugfs;
-
void __init cxl_mbox_init(void)
{
struct dentry *mbox_debugfs;
- cxl_debugfs = debugfs_create_dir("cxl", NULL);
- mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
+ mbox_debugfs = cxl_debugfs_create_dir("mbox");
debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
&cxl_raw_allow_all);
}
-
-void cxl_mbox_exit(void)
-{
- debugfs_remove_recursive(cxl_debugfs);
-}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index f7cdcd33504a..20ce488a7754 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -68,7 +68,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = range_len(&cxlds->ram_range);
+ unsigned long long len = resource_size(&cxlds->ram_res);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -81,7 +81,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = range_len(&cxlds->pmem_range);
+ unsigned long long len = resource_size(&cxlds->pmem_res);
return sysfs_emit(buf, "%#llx\n", len);
}
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index c4c99ff7b55e..9240df53ed87 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/pci-doe.h>
#include <cxlpci.h>
#include <cxlmem.h>
#include <cxl.h>
@@ -225,7 +226,6 @@ static int dvsec_range_allowed(struct device *dev, void *arg)
{
struct range *dev_range = arg;
struct cxl_decoder *cxld;
- struct range root_range;
if (!is_root_decoder(dev))
return 0;
@@ -237,12 +237,7 @@ static int dvsec_range_allowed(struct device *dev, void *arg)
if (!(cxld->flags & CXL_DECODER_F_RAM))
return 0;
- root_range = (struct range) {
- .start = cxld->platform_res.start,
- .end = cxld->platform_res.end,
- };
-
- return range_contains(&root_range, dev_range);
+ return range_contains(&cxld->hpa_range, dev_range);
}
static void disable_hdm(void *_cxlhdm)
@@ -458,3 +453,175 @@ hdm_init:
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
+
+#define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
+#define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
+#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00
+#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0
+#define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000
+#define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff
+#define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
+
+static struct pci_doe_mb *find_cdat_doe(struct device *uport)
+{
+ struct cxl_memdev *cxlmd;
+ struct cxl_dev_state *cxlds;
+ unsigned long index;
+ void *entry;
+
+ cxlmd = to_cxl_memdev(uport);
+ cxlds = cxlmd->cxlds;
+
+ xa_for_each(&cxlds->doe_mbs, index, entry) {
+ struct pci_doe_mb *cur = entry;
+
+ if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL,
+ CXL_DOE_PROTOCOL_TABLE_ACCESS))
+ return cur;
+ }
+
+ return NULL;
+}
+
+#define CDAT_DOE_REQ(entry_handle) \
+ (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
+ CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
+ FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
+ CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \
+ FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
+
+static void cxl_doe_task_complete(struct pci_doe_task *task)
+{
+ complete(task->private);
+}
+
+struct cdat_doe_task {
+ u32 request_pl;
+ u32 response_pl[32];
+ struct completion c;
+ struct pci_doe_task task;
+};
+
+#define DECLARE_CDAT_DOE_TASK(req, cdt) \
+struct cdat_doe_task cdt = { \
+ .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \
+ .request_pl = req, \
+ .task = { \
+ .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \
+ .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \
+ .request_pl = &cdt.request_pl, \
+ .request_pl_sz = sizeof(cdt.request_pl), \
+ .response_pl = cdt.response_pl, \
+ .response_pl_sz = sizeof(cdt.response_pl), \
+ .complete = cxl_doe_task_complete, \
+ .private = &cdt.c, \
+ } \
+}
+
+static int cxl_cdat_get_length(struct device *dev,
+ struct pci_doe_mb *cdat_doe,
+ size_t *length)
+{
+ DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t);
+ int rc;
+
+ rc = pci_doe_submit_task(cdat_doe, &t.task);
+ if (rc < 0) {
+ dev_err(dev, "DOE submit failed: %d", rc);
+ return rc;
+ }
+ wait_for_completion(&t.c);
+ if (t.task.rv < sizeof(u32))
+ return -EIO;
+
+ *length = t.response_pl[1];
+ dev_dbg(dev, "CDAT length %zu\n", *length);
+
+ return 0;
+}
+
+static int cxl_cdat_read_table(struct device *dev,
+ struct pci_doe_mb *cdat_doe,
+ struct cxl_cdat *cdat)
+{
+ size_t length = cdat->length;
+ u32 *data = cdat->table;
+ int entry_handle = 0;
+
+ do {
+ DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
+ size_t entry_dw;
+ u32 *entry;
+ int rc;
+
+ rc = pci_doe_submit_task(cdat_doe, &t.task);
+ if (rc < 0) {
+ dev_err(dev, "DOE submit failed: %d", rc);
+ return rc;
+ }
+ wait_for_completion(&t.c);
+ /* 1 DW header + 1 DW data min */
+ if (t.task.rv < (2 * sizeof(u32)))
+ return -EIO;
+
+ /* Get the CXL table access header entry handle */
+ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
+ t.response_pl[0]);
+ entry = t.response_pl + 1;
+ entry_dw = t.task.rv / sizeof(u32);
+ /* Skip Header */
+ entry_dw -= 1;
+ entry_dw = min(length / sizeof(u32), entry_dw);
+ /* Prevent length < 1 DW from causing a buffer overflow */
+ if (entry_dw) {
+ memcpy(data, entry, entry_dw * sizeof(u32));
+ length -= entry_dw * sizeof(u32);
+ data += entry_dw;
+ }
+ } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
+
+ return 0;
+}
+
+/**
+ * read_cdat_data - Read the CDAT data on this port
+ * @port: Port to read data from
+ *
+ * This call will sleep waiting for responses from the DOE mailbox.
+ */
+void read_cdat_data(struct cxl_port *port)
+{
+ struct pci_doe_mb *cdat_doe;
+ struct device *dev = &port->dev;
+ struct device *uport = port->uport;
+ size_t cdat_length;
+ int rc;
+
+ cdat_doe = find_cdat_doe(uport);
+ if (!cdat_doe) {
+ dev_dbg(dev, "No CDAT mailbox\n");
+ return;
+ }
+
+ port->cdat_available = true;
+
+ if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) {
+ dev_dbg(dev, "No CDAT length\n");
+ return;
+ }
+
+ port->cdat.table = devm_kzalloc(dev, cdat_length, GFP_KERNEL);
+ if (!port->cdat.table)
+ return;
+
+ port->cdat.length = cdat_length;
+ rc = cxl_cdat_read_table(dev, cdat_doe, &port->cdat);
+ if (rc) {
+ /* Don't leave table data allocated on error */
+ devm_kfree(dev, port->cdat.table);
+ port->cdat.table = NULL;
+ port->cdat.length = 0;
+ dev_err(dev, "CDAT data read error\n");
+ }
+}
+EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index bec7cfb54ebf..1d12a8206444 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
return is_cxl_nvdimm_bridge(dev);
}
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
{
- struct cxl_port *port = find_cxl_root(&cxl_nvd->dev);
+ struct cxl_port *port = find_cxl_root(start);
struct device *dev;
if (!port)
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index dbce99bdffab..bffde862de0b 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/memregion.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -42,6 +44,8 @@ static int cxl_device_id(struct device *dev)
return CXL_DEVICE_NVDIMM_BRIDGE;
if (dev->type == &cxl_nvdimm_type)
return CXL_DEVICE_NVDIMM;
+ if (dev->type == CXL_PMEM_REGION_TYPE())
+ return CXL_DEVICE_PMEM_REGION;
if (is_cxl_port(dev)) {
if (is_cxl_root(to_cxl_port(dev)))
return CXL_DEVICE_ROOT;
@@ -49,6 +53,8 @@ static int cxl_device_id(struct device *dev)
}
if (is_cxl_memdev(dev))
return CXL_DEVICE_MEMORY_EXPANDER;
+ if (dev->type == CXL_REGION_TYPE())
+ return CXL_DEVICE_REGION;
return 0;
}
@@ -73,14 +79,8 @@ static ssize_t start_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
- u64 start;
- if (is_root_decoder(dev))
- start = cxld->platform_res.start;
- else
- start = cxld->decoder_range.start;
-
- return sysfs_emit(buf, "%#llx\n", start);
+ return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
}
static DEVICE_ATTR_ADMIN_RO(start);
@@ -88,14 +88,8 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
- u64 size;
-
- if (is_root_decoder(dev))
- size = resource_size(&cxld->platform_res);
- else
- size = range_len(&cxld->decoder_range);
- return sysfs_emit(buf, "%#llx\n", size);
+ return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
}
static DEVICE_ATTR_RO(size);
@@ -131,20 +125,21 @@ static ssize_t target_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(target_type);
-static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf)
+static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
{
+ struct cxl_decoder *cxld = &cxlsd->cxld;
ssize_t offset = 0;
int i, rc = 0;
for (i = 0; i < cxld->interleave_ways; i++) {
- struct cxl_dport *dport = cxld->target[i];
+ struct cxl_dport *dport = cxlsd->target[i];
struct cxl_dport *next = NULL;
if (!dport)
break;
if (i + 1 < cxld->interleave_ways)
- next = cxld->target[i + 1];
+ next = cxlsd->target[i + 1];
rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
next ? "," : "");
if (rc < 0)
@@ -158,15 +153,15 @@ static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf)
static ssize_t target_list_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct cxl_decoder *cxld = to_cxl_decoder(dev);
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
ssize_t offset;
unsigned int seq;
int rc;
do {
- seq = read_seqbegin(&cxld->target_lock);
- rc = emit_target_list(cxld, buf);
- } while (read_seqretry(&cxld->target_lock, seq));
+ seq = read_seqbegin(&cxlsd->target_lock);
+ rc = emit_target_list(cxlsd, buf);
+ } while (read_seqretry(&cxlsd->target_lock, seq));
if (rc < 0)
return rc;
@@ -180,10 +175,121 @@ static ssize_t target_list_show(struct device *dev,
}
static DEVICE_ATTR_RO(target_list);
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+
+ switch (cxled->mode) {
+ case CXL_DECODER_RAM:
+ return sysfs_emit(buf, "ram\n");
+ case CXL_DECODER_PMEM:
+ return sysfs_emit(buf, "pmem\n");
+ case CXL_DECODER_NONE:
+ return sysfs_emit(buf, "none\n");
+ case CXL_DECODER_MIXED:
+ default:
+ return sysfs_emit(buf, "mixed\n");
+ }
+}
+
+static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ enum cxl_decoder_mode mode;
+ ssize_t rc;
+
+ if (sysfs_streq(buf, "pmem"))
+ mode = CXL_DECODER_PMEM;
+ else if (sysfs_streq(buf, "ram"))
+ mode = CXL_DECODER_RAM;
+ else
+ return -EINVAL;
+
+ rc = cxl_dpa_set_mode(cxled, mode);
+ if (rc)
+ return rc;
+
+ return len;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ u64 base = cxl_dpa_resource_start(cxled);
+
+ return sysfs_emit(buf, "%#llx\n", base);
+}
+static DEVICE_ATTR_RO(dpa_resource);
+
+static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ resource_size_t size = cxl_dpa_size(cxled);
+
+ return sysfs_emit(buf, "%pa\n", &size);
+}
+
+static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ unsigned long long size;
+ ssize_t rc;
+
+ rc = kstrtoull(buf, 0, &size);
+ if (rc)
+ return rc;
+
+ if (!IS_ALIGNED(size, SZ_256M))
+ return -EINVAL;
+
+ rc = cxl_dpa_free(cxled);
+ if (rc)
+ return rc;
+
+ if (size == 0)
+ return len;
+
+ rc = cxl_dpa_alloc(cxled, size);
+ if (rc)
+ return rc;
+
+ return len;
+}
+static DEVICE_ATTR_RW(dpa_size);
+
+static ssize_t interleave_granularity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
+}
+
+static DEVICE_ATTR_RO(interleave_granularity);
+
+static ssize_t interleave_ways_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
+}
+
+static DEVICE_ATTR_RO(interleave_ways);
+
static struct attribute *cxl_decoder_base_attrs[] = {
&dev_attr_start.attr,
&dev_attr_size.attr,
&dev_attr_locked.attr,
+ &dev_attr_interleave_granularity.attr,
+ &dev_attr_interleave_ways.attr,
NULL,
};
@@ -197,11 +303,35 @@ static struct attribute *cxl_decoder_root_attrs[] = {
&dev_attr_cap_type2.attr,
&dev_attr_cap_type3.attr,
&dev_attr_target_list.attr,
+ SET_CXL_REGION_ATTR(create_pmem_region)
+ SET_CXL_REGION_ATTR(delete_region)
NULL,
};
+static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
+{
+ unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
+
+ return (cxlrd->cxlsd.cxld.flags & flags) == flags;
+}
+
+static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
+ return 0;
+
+ if (a == CXL_REGION_ATTR(delete_region) && !can_create_pmem(cxlrd))
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_decoder_root_attribute_group = {
.attrs = cxl_decoder_root_attrs,
+ .is_visible = cxl_root_decoder_visible,
};
static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
@@ -214,6 +344,7 @@ static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
static struct attribute *cxl_decoder_switch_attrs[] = {
&dev_attr_target_type.attr,
&dev_attr_target_list.attr,
+ SET_CXL_REGION_ATTR(region)
NULL,
};
@@ -230,6 +361,10 @@ static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
static struct attribute *cxl_decoder_endpoint_attrs[] = {
&dev_attr_target_type.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_dpa_size.attr,
+ &dev_attr_dpa_resource.attr,
+ SET_CXL_REGION_ATTR(region)
NULL,
};
@@ -244,31 +379,64 @@ static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
NULL,
};
-static void cxl_decoder_release(struct device *dev)
+static void __cxl_decoder_release(struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld = to_cxl_decoder(dev);
- struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
ida_free(&port->decoder_ida, cxld->id);
- kfree(cxld);
put_device(&port->dev);
}
+static void cxl_endpoint_decoder_release(struct device *dev)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+
+ __cxl_decoder_release(&cxled->cxld);
+ kfree(cxled);
+}
+
+static void cxl_switch_decoder_release(struct device *dev)
+{
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
+
+ __cxl_decoder_release(&cxlsd->cxld);
+ kfree(cxlsd);
+}
+
+struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
+ "not a cxl_root_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
+
+static void cxl_root_decoder_release(struct device *dev)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ if (atomic_read(&cxlrd->region_id) >= 0)
+ memregion_free(atomic_read(&cxlrd->region_id));
+ __cxl_decoder_release(&cxlrd->cxlsd.cxld);
+ kfree(cxlrd);
+}
+
static const struct device_type cxl_decoder_endpoint_type = {
.name = "cxl_decoder_endpoint",
- .release = cxl_decoder_release,
+ .release = cxl_endpoint_decoder_release,
.groups = cxl_decoder_endpoint_attribute_groups,
};
static const struct device_type cxl_decoder_switch_type = {
.name = "cxl_decoder_switch",
- .release = cxl_decoder_release,
+ .release = cxl_switch_decoder_release,
.groups = cxl_decoder_switch_attribute_groups,
};
static const struct device_type cxl_decoder_root_type = {
.name = "cxl_decoder_root",
- .release = cxl_decoder_release,
+ .release = cxl_root_decoder_release,
.groups = cxl_decoder_root_attribute_groups,
};
@@ -283,39 +451,63 @@ bool is_root_decoder(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
-bool is_cxl_decoder(struct device *dev)
+bool is_switch_decoder(struct device *dev)
{
- return dev->type && dev->type->release == cxl_decoder_release;
+ return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_decoder, CXL);
struct cxl_decoder *to_cxl_decoder(struct device *dev)
{
- if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
+ if (dev_WARN_ONCE(dev,
+ !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
"not a cxl_decoder device\n"))
return NULL;
return container_of(dev, struct cxl_decoder, dev);
}
EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
+struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
+ "not a cxl_endpoint_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
+
+struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
+ "not a cxl_switch_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_switch_decoder, cxld.dev);
+}
+
static void cxl_ep_release(struct cxl_ep *ep)
{
- if (!ep)
- return;
- list_del(&ep->list);
put_device(ep->ep);
kfree(ep);
}
+static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
+{
+ if (!ep)
+ return;
+ xa_erase(&port->endpoints, (unsigned long) ep->ep);
+ cxl_ep_release(ep);
+}
+
static void cxl_port_release(struct device *dev)
{
struct cxl_port *port = to_cxl_port(dev);
- struct cxl_ep *ep, *_e;
+ unsigned long index;
+ struct cxl_ep *ep;
- device_lock(dev);
- list_for_each_entry_safe(ep, _e, &port->endpoints, list)
- cxl_ep_release(ep);
- device_unlock(dev);
+ xa_for_each(&port->endpoints, index, ep)
+ cxl_ep_remove(port, ep);
+ xa_destroy(&port->endpoints);
+ xa_destroy(&port->dports);
+ xa_destroy(&port->regions);
ida_free(&cxl_port_ida, port->id);
kfree(port);
}
@@ -370,7 +562,7 @@ static void unregister_port(void *_port)
lock_dev = &parent->dev;
device_lock_assert(lock_dev);
- port->uport = NULL;
+ port->dead = true;
device_unregister(&port->dev);
}
@@ -395,7 +587,7 @@ static struct lock_class_key cxl_port_key;
static struct cxl_port *cxl_port_alloc(struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port)
+ struct cxl_dport *parent_dport)
{
struct cxl_port *port;
struct device *dev;
@@ -409,6 +601,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
if (rc < 0)
goto err;
port->id = rc;
+ port->uport = uport;
/*
* The top-level cxl_port "cxl_root" does not have a cxl_port as
@@ -417,17 +610,37 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
* description.
*/
dev = &port->dev;
- if (parent_port) {
+ if (parent_dport) {
+ struct cxl_port *parent_port = parent_dport->port;
+ struct cxl_port *iter;
+
dev->parent = &parent_port->dev;
port->depth = parent_port->depth + 1;
+ port->parent_dport = parent_dport;
+
+ /*
+ * walk to the host bridge, or the first ancestor that knows
+ * the host bridge
+ */
+ iter = port;
+ while (!iter->host_bridge &&
+ !is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+ if (iter->host_bridge)
+ port->host_bridge = iter->host_bridge;
+ else
+ port->host_bridge = iter->uport;
+ dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge));
} else
dev->parent = uport;
- port->uport = uport;
port->component_reg_phys = component_reg_phys;
ida_init(&port->decoder_ida);
- INIT_LIST_HEAD(&port->dports);
- INIT_LIST_HEAD(&port->endpoints);
+ port->hdm_end = -1;
+ port->commit_end = -1;
+ xa_init(&port->dports);
+ xa_init(&port->endpoints);
+ xa_init(&port->regions);
device_initialize(dev);
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
@@ -447,24 +660,24 @@ err:
* @host: host device for devm operations
* @uport: "physical" device implementing this upstream port
* @component_reg_phys: (optional) for configurable cxl_port instances
- * @parent_port: next hop up in the CXL memory decode hierarchy
+ * @parent_dport: next hop up in the CXL memory decode hierarchy
*/
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port)
+ struct cxl_dport *parent_dport)
{
struct cxl_port *port;
struct device *dev;
int rc;
- port = cxl_port_alloc(uport, component_reg_phys, parent_port);
+ port = cxl_port_alloc(uport, component_reg_phys, parent_dport);
if (IS_ERR(port))
return port;
dev = &port->dev;
if (is_cxl_memdev(uport))
rc = dev_set_name(dev, "endpoint%d", port->id);
- else if (parent_port)
+ else if (parent_dport)
rc = dev_set_name(dev, "port%d", port->id);
else
rc = dev_set_name(dev, "root%d", port->id);
@@ -556,17 +769,13 @@ static int match_root_child(struct device *dev, const void *match)
return 0;
port = to_cxl_port(dev);
- device_lock(dev);
- list_for_each_entry(dport, &port->dports, list) {
- iter = match;
- while (iter) {
- if (iter == dport->dport)
- goto out;
- iter = iter->parent;
- }
+ iter = match;
+ while (iter) {
+ dport = cxl_find_dport_by_dev(port, iter);
+ if (dport)
+ break;
+ iter = iter->parent;
}
-out:
- device_unlock(dev);
return !!iter;
}
@@ -590,9 +799,10 @@ EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
+ unsigned long index;
device_lock_assert(&port->dev);
- list_for_each_entry (dport, &port->dports, list)
+ xa_for_each(&port->dports, index, dport)
if (dport->port_id == id)
return dport;
return NULL;
@@ -604,15 +814,15 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new)
device_lock_assert(&port->dev);
dup = find_dport(port, new->port_id);
- if (dup)
+ if (dup) {
dev_err(&port->dev,
"unable to add dport%d-%s non-unique port id (%s)\n",
new->port_id, dev_name(new->dport),
dev_name(dup->dport));
- else
- list_add_tail(&new->list, &port->dports);
-
- return dup ? -EEXIST : 0;
+ return -EBUSY;
+ }
+ return xa_insert(&port->dports, (unsigned long)new->dport, new,
+ GFP_KERNEL);
}
/*
@@ -639,10 +849,8 @@ static void cxl_dport_remove(void *data)
struct cxl_dport *dport = data;
struct cxl_port *port = dport->port;
+ xa_erase(&port->dports, (unsigned long) dport->dport);
put_device(dport->dport);
- cond_cxl_root_lock(port);
- list_del(&dport->list);
- cond_cxl_root_unlock(port);
}
static void cxl_dport_unlink(void *data)
@@ -694,7 +902,6 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
if (!dport)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&dport->list);
dport->dport = dport_dev;
dport->port_id = port_id;
dport->component_reg_phys = component_reg_phys;
@@ -723,44 +930,33 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
-static struct cxl_ep *find_ep(struct cxl_port *port, struct device *ep_dev)
-{
- struct cxl_ep *ep;
-
- device_lock_assert(&port->dev);
- list_for_each_entry(ep, &port->endpoints, list)
- if (ep->ep == ep_dev)
- return ep;
- return NULL;
-}
-
-static int add_ep(struct cxl_port *port, struct cxl_ep *new)
+static int add_ep(struct cxl_ep *new)
{
- struct cxl_ep *dup;
+ struct cxl_port *port = new->dport->port;
+ int rc;
device_lock(&port->dev);
if (port->dead) {
device_unlock(&port->dev);
return -ENXIO;
}
- dup = find_ep(port, new->ep);
- if (!dup)
- list_add_tail(&new->list, &port->endpoints);
+ rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
+ GFP_KERNEL);
device_unlock(&port->dev);
- return dup ? -EEXIST : 0;
+ return rc;
}
/**
* cxl_add_ep - register an endpoint's interest in a port
- * @port: a port in the endpoint's topology ancestry
+ * @dport: the dport that routes to @ep_dev
* @ep_dev: device representing the endpoint
*
* Intermediate CXL ports are scanned based on the arrival of endpoints.
* When those endpoints depart the port can be destroyed once all
* endpoints that care about that port have been removed.
*/
-static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
+static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
{
struct cxl_ep *ep;
int rc;
@@ -769,10 +965,10 @@ static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
if (!ep)
return -ENOMEM;
- INIT_LIST_HEAD(&ep->list);
ep->ep = get_device(ep_dev);
+ ep->dport = dport;
- rc = add_ep(port, ep);
+ rc = add_ep(ep);
if (rc)
cxl_ep_release(ep);
return rc;
@@ -781,11 +977,13 @@ static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
struct cxl_find_port_ctx {
const struct device *dport_dev;
const struct cxl_port *parent_port;
+ struct cxl_dport **dport;
};
static int match_port_by_dport(struct device *dev, const void *data)
{
const struct cxl_find_port_ctx *ctx = data;
+ struct cxl_dport *dport;
struct cxl_port *port;
if (!is_cxl_port(dev))
@@ -794,7 +992,10 @@ static int match_port_by_dport(struct device *dev, const void *data)
return 0;
port = to_cxl_port(dev);
- return cxl_find_dport_by_dev(port, ctx->dport_dev) != NULL;
+ dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
+ if (ctx->dport)
+ *ctx->dport = dport;
+ return dport != NULL;
}
static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
@@ -810,24 +1011,32 @@ static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
return NULL;
}
-static struct cxl_port *find_cxl_port(struct device *dport_dev)
+static struct cxl_port *find_cxl_port(struct device *dport_dev,
+ struct cxl_dport **dport)
{
struct cxl_find_port_ctx ctx = {
.dport_dev = dport_dev,
+ .dport = dport,
};
+ struct cxl_port *port;
- return __find_cxl_port(&ctx);
+ port = __find_cxl_port(&ctx);
+ return port;
}
static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
- struct device *dport_dev)
+ struct device *dport_dev,
+ struct cxl_dport **dport)
{
struct cxl_find_port_ctx ctx = {
.dport_dev = dport_dev,
.parent_port = parent_port,
+ .dport = dport,
};
+ struct cxl_port *port;
- return __find_cxl_port(&ctx);
+ port = __find_cxl_port(&ctx);
+ return port;
}
/*
@@ -851,13 +1060,13 @@ static void delete_endpoint(void *data)
struct cxl_port *parent_port;
struct device *parent;
- parent_port = cxl_mem_find_port(cxlmd);
+ parent_port = cxl_mem_find_port(cxlmd, NULL);
if (!parent_port)
goto out;
parent = &parent_port->dev;
device_lock(parent);
- if (parent->driver && endpoint->uport) {
+ if (parent->driver && !endpoint->dead) {
devm_release_action(parent, cxl_unlink_uport, endpoint);
devm_release_action(parent, unregister_port, endpoint);
}
@@ -883,21 +1092,70 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
* for a port to be unregistered is when all memdevs beneath that port have gone
* through ->remove(). This "bottom-up" removal selectively removes individual
* child ports manually. This depends on devm_cxl_add_port() to not change is
- * devm action registration order.
+ * devm action registration order, and for dports to have already been
+ * destroyed by reap_dports().
*/
-static void delete_switch_port(struct cxl_port *port, struct list_head *dports)
+static void delete_switch_port(struct cxl_port *port)
+{
+ devm_release_action(port->dev.parent, cxl_unlink_uport, port);
+ devm_release_action(port->dev.parent, unregister_port, port);
+}
+
+static void reap_dports(struct cxl_port *port)
{
- struct cxl_dport *dport, *_d;
+ struct cxl_dport *dport;
+ unsigned long index;
+
+ device_lock_assert(&port->dev);
- list_for_each_entry_safe(dport, _d, dports, list) {
+ xa_for_each(&port->dports, index, dport) {
devm_release_action(&port->dev, cxl_dport_unlink, dport);
devm_release_action(&port->dev, cxl_dport_remove, dport);
devm_kfree(&port->dev, dport);
}
- devm_release_action(port->dev.parent, cxl_unlink_uport, port);
- devm_release_action(port->dev.parent, unregister_port, port);
}
+int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
+ struct cxl_dport *parent_dport)
+{
+ struct cxl_port *parent_port = parent_dport->port;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_port *endpoint, *iter, *down;
+ int rc;
+
+ /*
+ * Now that the path to the root is established record all the
+ * intervening ports in the chain.
+ */
+ for (iter = parent_port, down = NULL; !is_cxl_root(iter);
+ down = iter, iter = to_cxl_port(iter->dev.parent)) {
+ struct cxl_ep *ep;
+
+ ep = cxl_ep_load(iter, cxlmd);
+ ep->next = down;
+ }
+
+ endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
+ cxlds->component_reg_phys, parent_dport);
+ if (IS_ERR(endpoint))
+ return PTR_ERR(endpoint);
+
+ dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
+
+ rc = cxl_endpoint_autoremove(cxlmd, endpoint);
+ if (rc)
+ return rc;
+
+ if (!endpoint->dev.driver) {
+ dev_err(&cxlmd->dev, "%s failed probe\n",
+ dev_name(&endpoint->dev));
+ return -ENXIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL);
+
static void cxl_detach_ep(void *data)
{
struct cxl_memdev *cxlmd = data;
@@ -906,13 +1164,13 @@ static void cxl_detach_ep(void *data)
for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) {
struct device *dport_dev = grandparent(iter);
struct cxl_port *port, *parent_port;
- LIST_HEAD(reap_dports);
struct cxl_ep *ep;
+ bool died = false;
if (!dport_dev)
break;
- port = find_cxl_port(dport_dev);
+ port = find_cxl_port(dport_dev, NULL);
if (!port)
continue;
@@ -936,26 +1194,27 @@ static void cxl_detach_ep(void *data)
}
device_lock(&port->dev);
- ep = find_ep(port, &cxlmd->dev);
+ ep = cxl_ep_load(port, cxlmd);
dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
- cxl_ep_release(ep);
- if (ep && !port->dead && list_empty(&port->endpoints) &&
+ cxl_ep_remove(port, ep);
+ if (ep && !port->dead && xa_empty(&port->endpoints) &&
!is_cxl_root(parent_port)) {
/*
* This was the last ep attached to a dynamically
* enumerated port. Block new cxl_add_ep() and garbage
* collect the port.
*/
+ died = true;
port->dead = true;
- list_splice_init(&port->dports, &reap_dports);
+ reap_dports(port);
}
device_unlock(&port->dev);
- if (!list_empty(&reap_dports)) {
+ if (died) {
dev_dbg(&cxlmd->dev, "delete %s\n",
dev_name(&port->dev));
- delete_switch_port(port, &reap_dports);
+ delete_switch_port(port);
}
put_device(&port->dev);
device_unlock(&parent_port->dev);
@@ -986,6 +1245,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
{
struct device *dparent = grandparent(dport_dev);
struct cxl_port *port, *parent_port = NULL;
+ struct cxl_dport *dport, *parent_dport;
resource_size_t component_reg_phys;
int rc;
@@ -1000,7 +1260,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return -ENXIO;
}
- parent_port = find_cxl_port(dparent);
+ parent_port = find_cxl_port(dparent, &parent_dport);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
@@ -1015,13 +1275,14 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
goto out;
}
- port = find_cxl_port_at(parent_port, dport_dev);
+ port = find_cxl_port_at(parent_port, dport_dev, &dport);
if (!port) {
component_reg_phys = find_component_registers(uport_dev);
port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_port);
+ component_reg_phys, parent_dport);
+ /* retry find to pick up the new dport information */
if (!IS_ERR(port))
- get_device(&port->dev);
+ port = find_cxl_port_at(parent_port, dport_dev, &dport);
}
out:
device_unlock(&parent_port->dev);
@@ -1031,8 +1292,8 @@ out:
else {
dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport));
- rc = cxl_add_ep(port, &cxlmd->dev);
- if (rc == -EEXIST) {
+ rc = cxl_add_ep(dport, &cxlmd->dev);
+ if (rc == -EBUSY) {
/*
* "can't" happen, but this error code means
* something to the caller, so translate it.
@@ -1065,6 +1326,7 @@ retry:
for (iter = dev; iter; iter = grandparent(iter)) {
struct device *dport_dev = grandparent(iter);
struct device *uport_dev;
+ struct cxl_dport *dport;
struct cxl_port *port;
if (!dport_dev)
@@ -1080,12 +1342,12 @@ retry:
dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
- port = find_cxl_port(dport_dev);
+ port = find_cxl_port(dport_dev, &dport);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport));
- rc = cxl_add_ep(port, &cxlmd->dev);
+ rc = cxl_add_ep(dport, &cxlmd->dev);
/*
* If the endpoint already exists in the port's list,
@@ -1094,7 +1356,7 @@ retry:
* the parent_port lock as the current port may be being
* reaped.
*/
- if (rc && rc != -EEXIST) {
+ if (rc && rc != -EBUSY) {
put_device(&port->dev);
return rc;
}
@@ -1124,30 +1386,14 @@ retry:
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
-struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd)
+struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
+ struct cxl_dport **dport)
{
- return find_cxl_port(grandparent(&cxlmd->dev));
+ return find_cxl_port(grandparent(&cxlmd->dev), dport);
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
-struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
- const struct device *dev)
-{
- struct cxl_dport *dport;
-
- device_lock(&port->dev);
- list_for_each_entry(dport, &port->dports, list)
- if (dport->dport == dev) {
- device_unlock(&port->dev);
- return dport;
- }
-
- device_unlock(&port->dev);
- return NULL;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL);
-
-static int decoder_populate_targets(struct cxl_decoder *cxld,
+static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
struct cxl_port *port, int *target_map)
{
int i, rc = 0;
@@ -1157,88 +1403,92 @@ static int decoder_populate_targets(struct cxl_decoder *cxld,
device_lock_assert(&port->dev);
- if (list_empty(&port->dports))
+ if (xa_empty(&port->dports))
return -EINVAL;
- write_seqlock(&cxld->target_lock);
- for (i = 0; i < cxld->nr_targets; i++) {
+ write_seqlock(&cxlsd->target_lock);
+ for (i = 0; i < cxlsd->nr_targets; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]);
if (!dport) {
rc = -ENXIO;
break;
}
- cxld->target[i] = dport;
+ cxlsd->target[i] = dport;
}
- write_sequnlock(&cxld->target_lock);
+ write_sequnlock(&cxlsd->target_lock);
return rc;
}
+static struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
+{
+ struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
+ struct cxl_decoder *cxld = &cxlsd->cxld;
+ int iw;
+
+ iw = cxld->interleave_ways;
+ if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
+ "misconfigured root decoder\n"))
+ return NULL;
+
+ return cxlrd->cxlsd.target[pos % iw];
+}
+
static struct lock_class_key cxl_decoder_key;
/**
- * cxl_decoder_alloc - Allocate a new CXL decoder
+ * cxl_decoder_init - Common decoder setup / initialization
* @port: owning port of this decoder
- * @nr_targets: downstream targets accessible by this decoder. All upstream
- * ports and root ports must have at least 1 target. Endpoint
- * devices will have 0 targets. Callers wishing to register an
- * endpoint device should specify 0.
- *
- * A port should contain one or more decoders. Each of those decoders enable
- * some address space for CXL.mem utilization. A decoder is expected to be
- * configured by the caller before registering.
+ * @cxld: common decoder properties to initialize
*
- * Return: A new cxl decoder to be registered by cxl_decoder_add(). The decoder
- * is initialized to be a "passthrough" decoder.
+ * A port may contain one or more decoders. Each of those decoders
+ * enable some address space for CXL.mem utilization. A decoder is
+ * expected to be configured by the caller before registering via
+ * cxl_decoder_add()
*/
-static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld;
struct device *dev;
- int rc = 0;
-
- if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
- return ERR_PTR(-EINVAL);
-
- cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
- if (!cxld)
- return ERR_PTR(-ENOMEM);
+ int rc;
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
if (rc < 0)
- goto err;
+ return rc;
/* need parent to stick around to release the id */
get_device(&port->dev);
cxld->id = rc;
- cxld->nr_targets = nr_targets;
- seqlock_init(&cxld->target_lock);
dev = &cxld->dev;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &cxl_decoder_key);
device_set_pm_not_required(dev);
dev->parent = &port->dev;
dev->bus = &cxl_bus_type;
- if (is_cxl_root(port))
- cxld->dev.type = &cxl_decoder_root_type;
- else if (is_cxl_endpoint(port))
- cxld->dev.type = &cxl_decoder_endpoint_type;
- else
- cxld->dev.type = &cxl_decoder_switch_type;
/* Pre initialize an "empty" decoder */
cxld->interleave_ways = 1;
cxld->interleave_granularity = PAGE_SIZE;
cxld->target_type = CXL_DECODER_EXPANDER;
- cxld->platform_res = (struct resource)DEFINE_RES_MEM(0, 0);
+ cxld->hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
- return cxld;
-err:
- kfree(cxld);
- return ERR_PTR(rc);
+ return 0;
+}
+
+static int cxl_switch_decoder_init(struct cxl_port *port,
+ struct cxl_switch_decoder *cxlsd,
+ int nr_targets)
+{
+ if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
+ return -EINVAL;
+
+ cxlsd->nr_targets = nr_targets;
+ seqlock_init(&cxlsd->target_lock);
+ return cxl_decoder_init(port, &cxlsd->cxld);
}
/**
@@ -1251,13 +1501,46 @@ err:
* firmware description of CXL resources into a CXL standard decode
* topology.
*/
-struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets)
{
+ struct cxl_root_decoder *cxlrd;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (!is_cxl_root(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, nr_targets);
+ cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
+ GFP_KERNEL);
+ if (!cxlrd)
+ return ERR_PTR(-ENOMEM);
+
+ cxlsd = &cxlrd->cxlsd;
+ rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
+ if (rc) {
+ kfree(cxlrd);
+ return ERR_PTR(rc);
+ }
+
+ cxlrd->calc_hb = cxl_hb_modulo;
+
+ cxld = &cxlsd->cxld;
+ cxld->dev.type = &cxl_decoder_root_type;
+ /*
+ * cxl_root_decoder_release() special cases negative ids to
+ * detect memregion_alloc() failures.
+ */
+ atomic_set(&cxlrd->region_id, -1);
+ rc = memregion_alloc(GFP_KERNEL);
+ if (rc < 0) {
+ put_device(&cxld->dev);
+ return ERR_PTR(rc);
+ }
+
+ atomic_set(&cxlrd->region_id, rc);
+ return cxlrd;
}
EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
@@ -1272,13 +1555,29 @@ EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
* that sit between Switch Upstream Ports / Switch Downstream Ports and
* Host Bridges / Root Ports.
*/
-struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets)
{
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (is_cxl_root(port) || is_cxl_endpoint(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, nr_targets);
+ cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
+ if (!cxlsd)
+ return ERR_PTR(-ENOMEM);
+
+ rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
+ if (rc) {
+ kfree(cxlsd);
+ return ERR_PTR(rc);
+ }
+
+ cxld = &cxlsd->cxld;
+ cxld->dev.type = &cxl_decoder_switch_type;
+ return cxlsd;
}
EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
@@ -1288,18 +1587,35 @@ EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
*
* Return: A new cxl decoder to be registered by cxl_decoder_add()
*/
-struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
+struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
{
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (!is_cxl_endpoint(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, 0);
+ cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
+ if (!cxled)
+ return ERR_PTR(-ENOMEM);
+
+ cxled->pos = -1;
+ cxld = &cxled->cxld;
+ rc = cxl_decoder_init(port, cxld);
+ if (rc) {
+ kfree(cxled);
+ return ERR_PTR(rc);
+ }
+
+ cxld->dev.type = &cxl_decoder_endpoint_type;
+ return cxled;
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
/**
* cxl_decoder_add_locked - Add a decoder with targets
- * @cxld: The cxl decoder allocated by cxl_decoder_alloc()
+ * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
* @target_map: A list of downstream ports that this decoder can direct memory
* traffic to. These numbers should correspond with the port number
* in the PCIe Link Capabilities structure.
@@ -1335,7 +1651,9 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
if (!is_endpoint_decoder(dev)) {
- rc = decoder_populate_targets(cxld, port, target_map);
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
+
+ rc = decoder_populate_targets(cxlsd, port, target_map);
if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
dev_err(&port->dev,
"Failed to populate active decoder targets\n");
@@ -1347,20 +1665,13 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
if (rc)
return rc;
- /*
- * Platform decoder resources should show up with a reasonable name. All
- * other resources are just sub ranges within the main decoder resource.
- */
- if (is_root_decoder(dev))
- cxld->platform_res.name = dev_name(dev);
-
return device_add(dev);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
/**
* cxl_decoder_add - Add a decoder with targets
- * @cxld: The cxl decoder allocated by cxl_decoder_alloc()
+ * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
* @target_map: A list of downstream ports that this decoder can direct memory
* traffic to. These numbers should correspond with the port number
* in the PCIe Link Capabilities structure.
@@ -1394,6 +1705,13 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
static void cxld_unregister(void *dev)
{
+ struct cxl_endpoint_decoder *cxled;
+
+ if (is_endpoint_decoder(dev)) {
+ cxled = to_cxl_endpoint_decoder(dev);
+ cxl_decoder_kill_region(cxled);
+ }
+
device_unregister(dev);
}
@@ -1521,10 +1839,20 @@ struct bus_type cxl_bus_type = {
};
EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
+static struct dentry *cxl_debugfs;
+
+struct dentry *cxl_debugfs_create_dir(const char *dir)
+{
+ return debugfs_create_dir(dir, cxl_debugfs);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
+
static __init int cxl_core_init(void)
{
int rc;
+ cxl_debugfs = debugfs_create_dir("cxl", NULL);
+
cxl_mbox_init();
rc = cxl_memdev_init();
@@ -1541,22 +1869,28 @@ static __init int cxl_core_init(void)
if (rc)
goto err_bus;
+ rc = cxl_region_init();
+ if (rc)
+ goto err_region;
+
return 0;
+err_region:
+ bus_unregister(&cxl_bus_type);
err_bus:
destroy_workqueue(cxl_bus_wq);
err_wq:
cxl_memdev_exit();
- cxl_mbox_exit();
return rc;
}
static void cxl_core_exit(void)
{
+ cxl_region_exit();
bus_unregister(&cxl_bus_type);
destroy_workqueue(cxl_bus_wq);
cxl_memdev_exit();
- cxl_mbox_exit();
+ debugfs_remove_recursive(cxl_debugfs);
}
module_init(cxl_core_init);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
new file mode 100644
index 000000000000..401148016978
--- /dev/null
+++ b/drivers/cxl/core/region.c
@@ -0,0 +1,1896 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/memregion.h>
+#include <linux/genalloc.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/idr.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+/**
+ * DOC: cxl core region
+ *
+ * CXL Regions represent mapped memory capacity in system physical address
+ * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
+ * Memory ranges, Regions represent the active mapped capacity by the HDM
+ * Decoder Capability structures throughout the Host Bridges, Switches, and
+ * Endpoints in the topology.
+ *
+ * Region configuration has ordering constraints. UUID may be set at any time
+ * but is only visible for persistent regions.
+ * 1. Interleave granularity
+ * 2. Interleave size
+ * 3. Decoder targets
+ */
+
+/*
+ * All changes to the interleave configuration occur with this lock held
+ * for write.
+ */
+static DECLARE_RWSEM(cxl_region_rwsem);
+
+static struct cxl_region *to_cxl_region(struct device *dev);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static int is_dup(struct device *match, void *data)
+{
+ struct cxl_region_params *p;
+ struct cxl_region *cxlr;
+ uuid_t *uuid = data;
+
+ if (!is_cxl_region(match))
+ return 0;
+
+ lockdep_assert_held(&cxl_region_rwsem);
+ cxlr = to_cxl_region(match);
+ p = &cxlr->params;
+
+ if (uuid_equal(&p->uuid, uuid)) {
+ dev_dbg(match, "already has uuid: %pUb\n", uuid);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ uuid_t temp;
+ ssize_t rc;
+
+ if (len != UUID_STRING_LEN + 1)
+ return -EINVAL;
+
+ rc = uuid_parse(buf, &temp);
+ if (rc)
+ return rc;
+
+ if (uuid_is_null(&temp))
+ return -EINVAL;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (uuid_equal(&p->uuid, &temp))
+ goto out;
+
+ rc = -EBUSY;
+ if (p->state >= CXL_CONFIG_ACTIVE)
+ goto out;
+
+ rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
+ if (rc < 0)
+ goto out;
+
+ uuid_copy(&p->uuid, &temp);
+out:
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(uuid);
+
+static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ return xa_load(&port->regions, (unsigned long)cxlr);
+}
+
+static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int i;
+
+ for (i = count - 1; i >= 0; i--) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *iter = cxled_to_port(cxled);
+ struct cxl_ep *ep;
+ int rc;
+
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_decoder *cxld;
+
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ rc = cxld->reset(cxld);
+ if (rc)
+ return rc;
+ }
+
+ rc = cxled->cxld.reset(&cxled->cxld);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int cxl_region_decode_commit(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int i, rc = 0;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_decoder *cxld;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+
+ /* commit bottom up */
+ for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent)) {
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ rc = cxld->commit(cxld);
+ if (rc)
+ break;
+ }
+
+ if (rc) {
+ /* programming @iter failed, teardown */
+ for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ cxld->reset(cxld);
+ }
+
+ cxled->cxld.reset(&cxled->cxld);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ /* undo the targets that were successfully committed */
+ cxl_region_decode_reset(cxlr, i);
+ return rc;
+}
+
+static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ bool commit;
+ ssize_t rc;
+
+ rc = kstrtobool(buf, &commit);
+ if (rc)
+ return rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ /* Already in the requested state? */
+ if (commit && p->state >= CXL_CONFIG_COMMIT)
+ goto out;
+ if (!commit && p->state < CXL_CONFIG_COMMIT)
+ goto out;
+
+ /* Not ready to commit? */
+ if (commit && p->state < CXL_CONFIG_ACTIVE) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (commit)
+ rc = cxl_region_decode_commit(cxlr);
+ else {
+ p->state = CXL_CONFIG_RESET_PENDING;
+ up_write(&cxl_region_rwsem);
+ device_release_driver(&cxlr->dev);
+ down_write(&cxl_region_rwsem);
+
+ /*
+ * The lock was dropped, so need to revalidate that the reset is
+ * still pending.
+ */
+ if (p->state == CXL_CONFIG_RESET_PENDING)
+ rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
+ }
+
+ if (rc)
+ goto out;
+
+ if (commit)
+ p->state = CXL_CONFIG_COMMIT;
+ else if (p->state == CXL_CONFIG_RESET_PENDING)
+ p->state = CXL_CONFIG_ACTIVE;
+
+out:
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+ return len;
+}
+
+static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(commit);
+
+static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
+ return 0;
+ return a->mode;
+}
+
+static ssize_t interleave_ways_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static const struct attribute_group *get_cxl_region_target_group(void);
+
+static ssize_t interleave_ways_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ unsigned int val, save;
+ int rc;
+ u8 iw;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = ways_to_cxl(val, &iw);
+ if (rc)
+ return rc;
+
+ /*
+ * Even for x3, x9, and x12 interleaves the region interleave must be a
+ * power of 2 multiple of the host bridge interleave.
+ */
+ if (!is_power_of_2(val / cxld->interleave_ways) ||
+ (val % cxld->interleave_ways)) {
+ dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
+ return -EINVAL;
+ }
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ save = p->interleave_ways;
+ p->interleave_ways = val;
+ rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
+ if (rc)
+ p->interleave_ways = save;
+out:
+ up_write(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(interleave_ways);
+
+static ssize_t interleave_granularity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static ssize_t interleave_granularity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ int rc, val;
+ u16 ig;
+
+ rc = kstrtoint(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = granularity_to_cxl(val, &ig);
+ if (rc)
+ return rc;
+
+ /*
+ * When the host-bridge is interleaved, disallow region granularity !=
+ * root granularity. Regions with a granularity less than the root
+ * interleave result in needing multiple endpoints to support a single
+ * slot in the interleave (possible to suport in the future). Regions
+ * with a granularity greater than the root interleave result in invalid
+ * DPA translations (invalid to support).
+ */
+ if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
+ return -EINVAL;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ p->interleave_granularity = val;
+out:
+ up_write(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(interleave_granularity);
+
+static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ u64 resource = -1ULL;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->res)
+ resource = p->res->start;
+ rc = sysfs_emit(buf, "%#llx\n", resource);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(resource);
+
+static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ struct resource *res;
+ u32 remainder = 0;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ /* Nothing to do... */
+ if (p->res && resource_size(p->res) == size)
+ return 0;
+
+ /* To change size the old size must be freed first */
+ if (p->res)
+ return -EBUSY;
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
+
+ /* ways, granularity and uuid (if PMEM) need to be set before HPA */
+ if (!p->interleave_ways || !p->interleave_granularity ||
+ (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ return -ENXIO;
+
+ div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
+ if (remainder)
+ return -EINVAL;
+
+ res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
+ dev_name(&cxlr->dev));
+ if (IS_ERR(res)) {
+ dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
+ PTR_ERR(res));
+ return PTR_ERR(res);
+ }
+
+ p->res = res;
+ p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
+
+ return 0;
+}
+
+static void cxl_region_iomem_release(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+
+ if (device_is_registered(&cxlr->dev))
+ lockdep_assert_held_write(&cxl_region_rwsem);
+ if (p->res) {
+ remove_resource(p->res);
+ kfree(p->res);
+ p->res = NULL;
+ }
+}
+
+static int free_hpa(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ if (!p->res)
+ return 0;
+
+ if (p->state >= CXL_CONFIG_ACTIVE)
+ return -EBUSY;
+
+ cxl_region_iomem_release(cxlr);
+ p->state = CXL_CONFIG_IDLE;
+ return 0;
+}
+
+static ssize_t size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ u64 val;
+ int rc;
+
+ rc = kstrtou64(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (val)
+ rc = alloc_hpa(cxlr, val);
+ else
+ rc = free_hpa(cxlr);
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+
+ return len;
+}
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ u64 size = 0;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->res)
+ size = resource_size(p->res);
+ rc = sysfs_emit(buf, "%#llx\n", size);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(size);
+
+static struct attribute *cxl_region_attrs[] = {
+ &dev_attr_uuid.attr,
+ &dev_attr_commit.attr,
+ &dev_attr_interleave_ways.attr,
+ &dev_attr_interleave_granularity.attr,
+ &dev_attr_resource.attr,
+ &dev_attr_size.attr,
+ NULL,
+};
+
+static const struct attribute_group cxl_region_group = {
+ .attrs = cxl_region_attrs,
+ .is_visible = cxl_region_visible,
+};
+
+static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ int rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ cxled = p->targets[pos];
+ if (!cxled)
+ rc = sysfs_emit(buf, "\n");
+ else
+ rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
+out:
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static int match_free_decoder(struct device *dev, void *data)
+{
+ struct cxl_decoder *cxld;
+ int *id = data;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+
+ /* enforce ordered allocation */
+ if (cxld->id != *id)
+ return 0;
+
+ if (!cxld->region)
+ return 1;
+
+ (*id)++;
+
+ return 0;
+}
+
+static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct device *dev;
+ int id = 0;
+
+ dev = device_find_child(&port->dev, &id, match_free_decoder);
+ if (!dev)
+ return NULL;
+ /*
+ * This decoder is pinned registered as long as the endpoint decoder is
+ * registered, and endpoint decoder unregistration holds the
+ * cxl_region_rwsem over unregister events, so no need to hold on to
+ * this extra reference.
+ */
+ put_device(dev);
+ return to_cxl_decoder(dev);
+}
+
+static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_region_ref *cxl_rr, *iter;
+ unsigned long index;
+ int rc;
+
+ xa_for_each(&port->regions, index, iter) {
+ struct cxl_region_params *ip = &iter->region->params;
+
+ if (ip->res->start > p->res->start) {
+ dev_dbg(&cxlr->dev,
+ "%s: HPA order violation %s:%pr vs %pr\n",
+ dev_name(&port->dev),
+ dev_name(&iter->region->dev), ip->res, p->res);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
+ cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
+ if (!cxl_rr)
+ return ERR_PTR(-ENOMEM);
+ cxl_rr->port = port;
+ cxl_rr->region = cxlr;
+ cxl_rr->nr_targets = 1;
+ xa_init(&cxl_rr->endpoints);
+
+ rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to track region reference: %d\n",
+ dev_name(&port->dev), rc);
+ kfree(cxl_rr);
+ return ERR_PTR(rc);
+ }
+
+ return cxl_rr;
+}
+
+static void free_region_ref(struct cxl_region_ref *cxl_rr)
+{
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+
+ dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
+ if (cxld->region == cxlr) {
+ cxld->region = NULL;
+ put_device(&cxlr->dev);
+ }
+
+ xa_erase(&port->regions, (unsigned long)cxlr);
+ xa_destroy(&cxl_rr->endpoints);
+ kfree(cxl_rr);
+}
+
+static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ int rc;
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+ struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
+
+ if (ep) {
+ rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+ }
+ cxl_rr->nr_eps++;
+
+ if (!cxld->region) {
+ cxld->region = cxlr;
+ get_device(&cxlr->dev);
+ }
+
+ return 0;
+}
+
+/**
+ * cxl_port_attach_region() - track a region's interest in a port by endpoint
+ * @port: port to add a new region reference 'struct cxl_region_ref'
+ * @cxlr: region to attach to @port
+ * @cxled: endpoint decoder used to create or further pin a region reference
+ * @pos: interleave position of @cxled in @cxlr
+ *
+ * The attach event is an opportunity to validate CXL decode setup
+ * constraints and record metadata needed for programming HDM decoders,
+ * in particular decoder target lists.
+ *
+ * The steps are:
+ *
+ * - validate that there are no other regions with a higher HPA already
+ * associated with @port
+ * - establish a region reference if one is not already present
+ *
+ * - additionally allocate a decoder instance that will host @cxlr on
+ * @port
+ *
+ * - pin the region reference by the endpoint
+ * - account for how many entries in @port's target list are needed to
+ * cover all of the added endpoints.
+ */
+static int cxl_port_attach_region(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
+ struct cxl_region_ref *cxl_rr;
+ bool nr_targets_inc = false;
+ struct cxl_decoder *cxld;
+ unsigned long index;
+ int rc = -EBUSY;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ cxl_rr = cxl_rr_load(port, cxlr);
+ if (cxl_rr) {
+ struct cxl_ep *ep_iter;
+ int found = 0;
+
+ /*
+ * Walk the existing endpoints that have been attached to
+ * @cxlr at @port and see if they share the same 'next' port
+ * in the downstream direction. I.e. endpoints that share common
+ * upstream switch.
+ */
+ xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
+ if (ep_iter == ep)
+ continue;
+ if (ep_iter->next == ep->next) {
+ found++;
+ break;
+ }
+ }
+
+ /*
+ * New target port, or @port is an endpoint port that always
+ * accounts its own local decode as a target.
+ */
+ if (!found || !ep->next) {
+ cxl_rr->nr_targets++;
+ nr_targets_inc = true;
+ }
+
+ /*
+ * The decoder for @cxlr was allocated when the region was first
+ * attached to @port.
+ */
+ cxld = cxl_rr->decoder;
+ } else {
+ cxl_rr = alloc_region_ref(port, cxlr);
+ if (IS_ERR(cxl_rr)) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to allocate region reference\n",
+ dev_name(&port->dev));
+ return PTR_ERR(cxl_rr);
+ }
+ nr_targets_inc = true;
+
+ if (port == cxled_to_port(cxled))
+ cxld = &cxled->cxld;
+ else
+ cxld = cxl_region_find_decoder(port, cxlr);
+ if (!cxld) {
+ dev_dbg(&cxlr->dev, "%s: no decoder available\n",
+ dev_name(&port->dev));
+ goto out_erase;
+ }
+
+ if (cxld->region) {
+ dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
+ dev_name(&port->dev), dev_name(&cxld->dev),
+ dev_name(&cxld->region->dev));
+ rc = -EBUSY;
+ goto out_erase;
+ }
+
+ cxl_rr->decoder = cxld;
+ }
+
+ rc = cxl_rr_ep_add(cxl_rr, cxled);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to track endpoint %s:%s reference\n",
+ dev_name(&port->dev), dev_name(&cxlmd->dev),
+ dev_name(&cxld->dev));
+ goto out_erase;
+ }
+
+ dev_dbg(&cxlr->dev,
+ "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxld->dev), dev_name(&cxlmd->dev),
+ dev_name(&cxled->cxld.dev), pos,
+ ep ? ep->next ? dev_name(ep->next->uport) :
+ dev_name(&cxlmd->dev) :
+ "none",
+ cxl_rr->nr_eps, cxl_rr->nr_targets);
+
+ return 0;
+out_erase:
+ if (nr_targets_inc)
+ cxl_rr->nr_targets--;
+ if (cxl_rr->nr_eps == 0)
+ free_region_ref(cxl_rr);
+ return rc;
+}
+
+static void cxl_port_detach_region(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_ep *ep = NULL;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ cxl_rr = cxl_rr_load(port, cxlr);
+ if (!cxl_rr)
+ return;
+
+ /*
+ * Endpoint ports do not carry cxl_ep references, and they
+ * never target more than one endpoint by definition
+ */
+ if (cxl_rr->decoder == &cxled->cxld)
+ cxl_rr->nr_eps--;
+ else
+ ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
+ if (ep) {
+ struct cxl_ep *ep_iter;
+ unsigned long index;
+ int found = 0;
+
+ cxl_rr->nr_eps--;
+ xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
+ if (ep_iter->next == ep->next) {
+ found++;
+ break;
+ }
+ }
+ if (!found)
+ cxl_rr->nr_targets--;
+ }
+
+ if (cxl_rr->nr_eps == 0)
+ free_region_ref(cxl_rr);
+}
+
+static int check_last_peer(struct cxl_endpoint_decoder *cxled,
+ struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
+ int distance)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled_peer;
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_memdev *cxlmd_peer;
+ struct cxl_ep *ep_peer;
+ int pos = cxled->pos;
+
+ /*
+ * If this position wants to share a dport with the last endpoint mapped
+ * then that endpoint, at index 'position - distance', must also be
+ * mapped by this dport.
+ */
+ if (pos < distance) {
+ dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+ return -ENXIO;
+ }
+ cxled_peer = p->targets[pos - distance];
+ cxlmd_peer = cxled_to_memdev(cxled_peer);
+ ep_peer = cxl_ep_load(port, cxlmd_peer);
+ if (ep->dport != ep_peer->dport) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
+ dev_name(&cxlmd_peer->dev),
+ dev_name(&cxled_peer->cxld.dev));
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int cxl_port_setup_targets(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
+ struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
+ struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+ struct cxl_switch_decoder *cxlsd;
+ u16 eig, peig;
+ u8 eiw, peiw;
+
+ /*
+ * While root level decoders support x3, x6, x12, switch level
+ * decoders only support powers of 2 up to x16.
+ */
+ if (!is_power_of_2(cxl_rr->nr_targets)) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ cxl_rr->nr_targets);
+ return -EINVAL;
+ }
+
+ cxlsd = to_cxl_switch_decoder(&cxld->dev);
+ if (cxl_rr->nr_targets_set) {
+ int i, distance;
+
+ distance = p->nr_targets / cxl_rr->nr_targets;
+ for (i = 0; i < cxl_rr->nr_targets_set; i++)
+ if (ep->dport == cxlsd->target[i]) {
+ rc = check_last_peer(cxled, ep, cxl_rr,
+ distance);
+ if (rc)
+ return rc;
+ goto out_target_set;
+ }
+ goto add_target;
+ }
+
+ if (is_cxl_root(parent_port)) {
+ parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
+ parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
+ /*
+ * For purposes of address bit routing, use power-of-2 math for
+ * switch ports.
+ */
+ if (!is_power_of_2(parent_iw))
+ parent_iw /= 3;
+ } else {
+ struct cxl_region_ref *parent_rr;
+ struct cxl_decoder *parent_cxld;
+
+ parent_rr = cxl_rr_load(parent_port, cxlr);
+ parent_cxld = parent_rr->decoder;
+ parent_ig = parent_cxld->interleave_granularity;
+ parent_iw = parent_cxld->interleave_ways;
+ }
+
+ rc = granularity_to_cxl(parent_ig, &peig);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
+ dev_name(parent_port->uport),
+ dev_name(&parent_port->dev), parent_ig);
+ return rc;
+ }
+
+ rc = ways_to_cxl(parent_iw, &peiw);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
+ dev_name(parent_port->uport),
+ dev_name(&parent_port->dev), parent_iw);
+ return rc;
+ }
+
+ iw = cxl_rr->nr_targets;
+ rc = ways_to_cxl(iw, &eiw);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
+ dev_name(port->uport), dev_name(&port->dev), iw);
+ return rc;
+ }
+
+ /*
+ * If @parent_port is masking address bits, pick the next unused address
+ * bit to route @port's targets.
+ */
+ if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
+ u32 address_bit = max(peig + peiw, eiw + peig);
+
+ eig = address_bit - eiw + 1;
+ } else {
+ eiw = peiw;
+ eig = peig;
+ }
+
+ rc = cxl_to_granularity(eig, &ig);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ 256 << eig);
+ return rc;
+ }
+
+ cxld->interleave_ways = iw;
+ cxld->interleave_granularity = ig;
+ cxld->hpa_range = (struct range) {
+ .start = p->res->start,
+ .end = p->res->end,
+ };
+ dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport),
+ dev_name(&port->dev), iw, ig);
+add_target:
+ if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: targets full trying to add %s:%s at %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+ return -ENXIO;
+ }
+ cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
+ inc = 1;
+out_target_set:
+ cxl_rr->nr_targets_set += inc;
+ dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+
+ return 0;
+}
+
+static void cxl_port_reset_targets(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
+ struct cxl_decoder *cxld;
+
+ /*
+ * After the last endpoint has been detached the entire cxl_rr may now
+ * be gone.
+ */
+ if (!cxl_rr)
+ return;
+ cxl_rr->nr_targets_set = 0;
+
+ cxld = cxl_rr->decoder;
+ cxld->hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
+}
+
+static void cxl_region_teardown_targets(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_memdev *cxlmd;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+ int i;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ cxlmd = cxled_to_memdev(cxled);
+
+ iter = cxled_to_port(cxled);
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
+ cxl_port_reset_targets(iter, cxlr);
+ }
+}
+
+static int cxl_region_setup_targets(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_memdev *cxlmd;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+ int i, rc;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ cxlmd = cxled_to_memdev(cxled);
+
+ iter = cxled_to_port(cxled);
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ /*
+ * Descend the topology tree programming targets while
+ * looking for conflicts.
+ */
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ rc = cxl_port_setup_targets(iter, cxlr, cxled);
+ if (rc) {
+ cxl_region_teardown_targets(cxlr);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int cxl_region_attach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *ep_port, *root_port, *iter;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_dport *dport;
+ int i, rc = -ENXIO;
+
+ if (cxled->mode == CXL_DECODER_DEAD) {
+ dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
+ return -ENODEV;
+ }
+
+ /* all full of members, or interleave config not established? */
+ if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_dbg(&cxlr->dev, "region already active\n");
+ return -EBUSY;
+ } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_dbg(&cxlr->dev, "interleave config missing\n");
+ return -ENXIO;
+ }
+
+ if (pos < 0 || pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ return -ENXIO;
+ }
+
+ if (p->targets[pos] == cxled)
+ return 0;
+
+ if (p->targets[pos]) {
+ struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
+ struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
+
+ dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
+ pos, dev_name(&cxlmd_target->dev),
+ dev_name(&cxled_target->cxld.dev));
+ return -EBUSY;
+ }
+
+ for (i = 0; i < p->interleave_ways; i++) {
+ struct cxl_endpoint_decoder *cxled_target;
+ struct cxl_memdev *cxlmd_target;
+
+ cxled_target = p->targets[pos];
+ if (!cxled_target)
+ continue;
+
+ cxlmd_target = cxled_to_memdev(cxled_target);
+ if (cxlmd_target == cxlmd) {
+ dev_dbg(&cxlr->dev,
+ "%s already specified at position %d via: %s\n",
+ dev_name(&cxlmd->dev), pos,
+ dev_name(&cxled_target->cxld.dev));
+ return -EBUSY;
+ }
+ }
+
+ ep_port = cxled_to_port(cxled);
+ root_port = cxlrd_to_port(cxlrd);
+ dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
+ if (!dport) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(cxlr->dev.parent));
+ return -ENXIO;
+ }
+
+ if (cxlrd->calc_hb(cxlrd, pos) != dport) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(&cxlrd->cxlsd.cxld.dev));
+ return -ENXIO;
+ }
+
+ if (cxled->cxld.target_type != cxlr->type) {
+ dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ cxled->cxld.target_type, cxlr->type);
+ return -ENXIO;
+ }
+
+ if (!cxled->dpa_res) {
+ dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
+ return -ENXIO;
+ }
+
+ if (resource_size(cxled->dpa_res) * p->interleave_ways !=
+ resource_size(p->res)) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ (u64)resource_size(cxled->dpa_res), p->interleave_ways,
+ (u64)resource_size(p->res));
+ return -EINVAL;
+ }
+
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent)) {
+ rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
+ if (rc)
+ goto err;
+ }
+
+ p->targets[pos] = cxled;
+ cxled->pos = pos;
+ p->nr_targets++;
+
+ if (p->nr_targets == p->interleave_ways) {
+ rc = cxl_region_setup_targets(cxlr);
+ if (rc)
+ goto err_decrement;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+ cxled->cxld.interleave_ways = p->interleave_ways;
+ cxled->cxld.interleave_granularity = p->interleave_granularity;
+ cxled->cxld.hpa_range = (struct range) {
+ .start = p->res->start,
+ .end = p->res->end,
+ };
+
+ return 0;
+
+err_decrement:
+ p->nr_targets--;
+err:
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent))
+ cxl_port_detach_region(iter, cxlr, cxled);
+ return rc;
+}
+
+static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
+ struct cxl_region *cxlr = cxled->cxld.region;
+ struct cxl_region_params *p;
+ int rc = 0;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ if (!cxlr)
+ return 0;
+
+ p = &cxlr->params;
+ get_device(&cxlr->dev);
+
+ if (p->state > CXL_CONFIG_ACTIVE) {
+ /*
+ * TODO: tear down all impacted regions if a device is
+ * removed out of order
+ */
+ rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
+ if (rc)
+ goto out;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent))
+ cxl_port_detach_region(iter, cxlr, cxled);
+
+ if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
+ p->targets[cxled->pos] != cxled) {
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+
+ dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ cxled->pos);
+ goto out;
+ }
+
+ if (p->state == CXL_CONFIG_ACTIVE) {
+ p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
+ cxl_region_teardown_targets(cxlr);
+ }
+ p->targets[cxled->pos] = NULL;
+ p->nr_targets--;
+ cxled->cxld.hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
+
+ /* notify the region driver that one of its targets has departed */
+ up_write(&cxl_region_rwsem);
+ device_release_driver(&cxlr->dev);
+ down_write(&cxl_region_rwsem);
+out:
+ put_device(&cxlr->dev);
+ return rc;
+}
+
+void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+{
+ down_write(&cxl_region_rwsem);
+ cxled->mode = CXL_DECODER_DEAD;
+ cxl_region_detach(cxled);
+ up_write(&cxl_region_rwsem);
+}
+
+static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos)
+{
+ struct device *dev;
+ int rc;
+
+ dev = bus_find_device_by_name(&cxl_bus_type, NULL, decoder);
+ if (!dev)
+ return -ENODEV;
+
+ if (!is_endpoint_decoder(dev)) {
+ put_device(dev);
+ return -EINVAL;
+ }
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ goto out;
+ down_read(&cxl_dpa_rwsem);
+ rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos);
+ up_read(&cxl_dpa_rwsem);
+ up_write(&cxl_region_rwsem);
+out:
+ put_device(dev);
+ return rc;
+}
+
+static int detach_target(struct cxl_region *cxlr, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (!p->targets[pos]) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = cxl_region_detach(p->targets[pos]);
+out:
+ up_write(&cxl_region_rwsem);
+ return rc;
+}
+
+static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
+ size_t len)
+{
+ int rc;
+
+ if (sysfs_streq(buf, "\n"))
+ rc = detach_target(cxlr, pos);
+ else
+ rc = attach_target(cxlr, buf, pos);
+
+ if (rc < 0)
+ return rc;
+ return len;
+}
+
+#define TARGET_ATTR_RW(n) \
+static ssize_t target##n##_show( \
+ struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ return show_targetN(to_cxl_region(dev), buf, (n)); \
+} \
+static ssize_t target##n##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_targetN(to_cxl_region(dev), buf, (n), len); \
+} \
+static DEVICE_ATTR_RW(target##n)
+
+TARGET_ATTR_RW(0);
+TARGET_ATTR_RW(1);
+TARGET_ATTR_RW(2);
+TARGET_ATTR_RW(3);
+TARGET_ATTR_RW(4);
+TARGET_ATTR_RW(5);
+TARGET_ATTR_RW(6);
+TARGET_ATTR_RW(7);
+TARGET_ATTR_RW(8);
+TARGET_ATTR_RW(9);
+TARGET_ATTR_RW(10);
+TARGET_ATTR_RW(11);
+TARGET_ATTR_RW(12);
+TARGET_ATTR_RW(13);
+TARGET_ATTR_RW(14);
+TARGET_ATTR_RW(15);
+
+static struct attribute *target_attrs[] = {
+ &dev_attr_target0.attr,
+ &dev_attr_target1.attr,
+ &dev_attr_target2.attr,
+ &dev_attr_target3.attr,
+ &dev_attr_target4.attr,
+ &dev_attr_target5.attr,
+ &dev_attr_target6.attr,
+ &dev_attr_target7.attr,
+ &dev_attr_target8.attr,
+ &dev_attr_target9.attr,
+ &dev_attr_target10.attr,
+ &dev_attr_target11.attr,
+ &dev_attr_target12.attr,
+ &dev_attr_target13.attr,
+ &dev_attr_target14.attr,
+ &dev_attr_target15.attr,
+ NULL,
+};
+
+static umode_t cxl_region_target_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+
+ if (n < p->interleave_ways)
+ return a->mode;
+ return 0;
+}
+
+static const struct attribute_group cxl_region_target_group = {
+ .attrs = target_attrs,
+ .is_visible = cxl_region_target_visible,
+};
+
+static const struct attribute_group *get_cxl_region_target_group(void)
+{
+ return &cxl_region_target_group;
+}
+
+static const struct attribute_group *region_groups[] = {
+ &cxl_base_attribute_group,
+ &cxl_region_group,
+ &cxl_region_target_group,
+ NULL,
+};
+
+static void cxl_region_release(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ memregion_free(cxlr->id);
+ kfree(cxlr);
+}
+
+const struct device_type cxl_region_type = {
+ .name = "cxl_region",
+ .release = cxl_region_release,
+ .groups = region_groups
+};
+
+bool is_cxl_region(struct device *dev)
+{
+ return dev->type == &cxl_region_type;
+}
+EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
+
+static struct cxl_region *to_cxl_region(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
+ "not a cxl_region device\n"))
+ return NULL;
+
+ return container_of(dev, struct cxl_region, dev);
+}
+
+static void unregister_region(void *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ device_del(dev);
+ cxl_region_iomem_release(cxlr);
+ put_device(dev);
+}
+
+static struct lock_class_key cxl_region_key;
+
+static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
+{
+ struct cxl_region *cxlr;
+ struct device *dev;
+
+ cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
+ if (!cxlr) {
+ memregion_free(id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev = &cxlr->dev;
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_region_key);
+ dev->parent = &cxlrd->cxlsd.cxld.dev;
+ device_set_pm_not_required(dev);
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_region_type;
+ cxlr->id = id;
+
+ return cxlr;
+}
+
+/**
+ * devm_cxl_add_region - Adds a region to a decoder
+ * @cxlrd: root decoder
+ * @id: memregion id to create, or memregion_free() on failure
+ * @mode: mode for the endpoint decoders of this region
+ * @type: select whether this is an expander or accelerator (type-2 or type-3)
+ *
+ * This is the second step of region initialization. Regions exist within an
+ * address space which is mapped by a @cxlrd.
+ *
+ * Return: 0 if the region was added to the @cxlrd, else returns negative error
+ * code. The region will be named "regionZ" where Z is the unique region number.
+ */
+static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
+ int id,
+ enum cxl_decoder_mode mode,
+ enum cxl_decoder_type type)
+{
+ struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
+ struct cxl_region *cxlr;
+ struct device *dev;
+ int rc;
+
+ cxlr = cxl_region_alloc(cxlrd, id);
+ if (IS_ERR(cxlr))
+ return cxlr;
+ cxlr->mode = mode;
+ cxlr->type = type;
+
+ dev = &cxlr->dev;
+ rc = dev_set_name(dev, "region%d", id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr);
+ if (rc)
+ return ERR_PTR(rc);
+
+ dev_dbg(port->uport, "%s: created %s\n",
+ dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
+ return cxlr;
+
+err:
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+
+static ssize_t create_pmem_region_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
+}
+
+static ssize_t create_pmem_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_region *cxlr;
+ int id, rc;
+
+ rc = sscanf(buf, "region%d\n", &id);
+ if (rc != 1)
+ return -EINVAL;
+
+ rc = memregion_alloc(GFP_KERNEL);
+ if (rc < 0)
+ return rc;
+
+ if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
+ memregion_free(rc);
+ return -EBUSY;
+ }
+
+ cxlr = devm_cxl_add_region(cxlrd, id, CXL_DECODER_PMEM,
+ CXL_DECODER_EXPANDER);
+ if (IS_ERR(cxlr))
+ return PTR_ERR(cxlr);
+
+ return len;
+}
+DEVICE_ATTR_RW(create_pmem_region);
+
+static ssize_t region_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (cxld->region)
+ rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
+ else
+ rc = sysfs_emit(buf, "\n");
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+DEVICE_ATTR_RO(region);
+
+static struct cxl_region *
+cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
+{
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct device *region_dev;
+
+ region_dev = device_find_child_by_name(&cxld->dev, name);
+ if (!region_dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_cxl_region(region_dev);
+}
+
+static ssize_t delete_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_region *cxlr;
+
+ cxlr = cxl_find_region_by_name(cxlrd, buf);
+ if (IS_ERR(cxlr))
+ return PTR_ERR(cxlr);
+
+ devm_release_action(port->uport, unregister_region, cxlr);
+ put_device(&cxlr->dev);
+
+ return len;
+}
+DEVICE_ATTR_WO(delete_region);
+
+static void cxl_pmem_region_release(struct device *dev)
+{
+ struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
+ int i;
+
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
+
+ put_device(&cxlmd->dev);
+ }
+
+ kfree(cxlr_pmem);
+}
+
+static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_pmem_region_type = {
+ .name = "cxl_pmem_region",
+ .release = cxl_pmem_region_release,
+ .groups = cxl_pmem_region_attribute_groups,
+};
+
+bool is_cxl_pmem_region(struct device *dev)
+{
+ return dev->type == &cxl_pmem_region_type;
+}
+EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
+
+struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
+ "not a cxl_pmem_region device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_pmem_region, dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
+
+static struct lock_class_key cxl_pmem_region_key;
+
+static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_pmem_region *cxlr_pmem;
+ struct device *dev;
+ int i;
+
+ down_read(&cxl_region_rwsem);
+ if (p->state != CXL_CONFIG_COMMIT) {
+ cxlr_pmem = ERR_PTR(-ENXIO);
+ goto out;
+ }
+
+ cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
+ GFP_KERNEL);
+ if (!cxlr_pmem) {
+ cxlr_pmem = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ cxlr_pmem->hpa_range.start = p->res->start;
+ cxlr_pmem->hpa_range.end = p->res->end;
+
+ /* Snapshot the region configuration underneath the cxl_region_rwsem */
+ cxlr_pmem->nr_mappings = p->nr_targets;
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+
+ m->cxlmd = cxlmd;
+ get_device(&cxlmd->dev);
+ m->start = cxled->dpa_res->start;
+ m->size = resource_size(cxled->dpa_res);
+ m->position = i;
+ }
+
+ dev = &cxlr_pmem->dev;
+ cxlr_pmem->cxlr = cxlr;
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
+ device_set_pm_not_required(dev);
+ dev->parent = &cxlr->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_pmem_region_type;
+out:
+ up_read(&cxl_region_rwsem);
+
+ return cxlr_pmem;
+}
+
+static void cxlr_pmem_unregister(void *dev)
+{
+ device_unregister(dev);
+}
+
+/**
+ * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
+ * @cxlr: parent CXL region for this pmem region bridge device
+ *
+ * Return: 0 on success negative error code on failure.
+ */
+static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
+{
+ struct cxl_pmem_region *cxlr_pmem;
+ struct device *dev;
+ int rc;
+
+ cxlr_pmem = cxl_pmem_region_alloc(cxlr);
+ if (IS_ERR(cxlr_pmem))
+ return PTR_ERR(cxlr_pmem);
+
+ dev = &cxlr_pmem->dev;
+ rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
+ dev_name(dev));
+
+ return devm_add_action_or_reset(&cxlr->dev, cxlr_pmem_unregister, dev);
+
+err:
+ put_device(dev);
+ return rc;
+}
+
+static int cxl_region_probe(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "probe interrupted\n");
+ return rc;
+ }
+
+ if (p->state < CXL_CONFIG_COMMIT) {
+ dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
+ rc = -ENXIO;
+ }
+
+ /*
+ * From this point on any path that changes the region's state away from
+ * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
+ */
+ up_read(&cxl_region_rwsem);
+
+ switch (cxlr->mode) {
+ case CXL_DECODER_PMEM:
+ return devm_cxl_add_pmem_region(cxlr);
+ default:
+ dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
+ cxlr->mode);
+ return -ENXIO;
+ }
+}
+
+static struct cxl_driver cxl_region_driver = {
+ .name = "cxl_region",
+ .probe = cxl_region_probe,
+ .id = CXL_DEVICE_REGION,
+};
+
+int cxl_region_init(void)
+{
+ return cxl_driver_register(&cxl_region_driver);
+}
+
+void cxl_region_exit(void)
+{
+ cxl_driver_unregister(&cxl_region_driver);
+}
+
+MODULE_IMPORT_NS(CXL);
+MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 6799b27c7db2..f680450f0b16 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -7,6 +7,7 @@
#include <linux/libnvdimm.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/log2.h>
#include <linux/io.h>
/**
@@ -53,9 +54,12 @@
#define CXL_HDM_DECODER0_CTRL_LOCK BIT(8)
#define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
#define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
+#define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11)
#define CXL_HDM_DECODER0_CTRL_TYPE BIT(12)
#define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
#define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
+#define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i)
+#define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i)
static inline int cxl_hdm_decoder_count(u32 cap_hdr)
{
@@ -64,6 +68,57 @@ static inline int cxl_hdm_decoder_count(u32 cap_hdr)
return val ? val * 2 : 1;
}
+/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
+static inline int cxl_to_granularity(u16 ig, unsigned int *val)
+{
+ if (ig > 6)
+ return -EINVAL;
+ *val = 256 << ig;
+ return 0;
+}
+
+/* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */
+static inline int cxl_to_ways(u8 eniw, unsigned int *val)
+{
+ switch (eniw) {
+ case 0 ... 4:
+ *val = 1 << eniw;
+ break;
+ case 8 ... 10:
+ *val = 3 << (eniw - 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int granularity_to_cxl(int g, u16 *ig)
+{
+ if (g > SZ_16K || g < 256 || !is_power_of_2(g))
+ return -EINVAL;
+ *ig = ilog2(g) - 8;
+ return 0;
+}
+
+static inline int ways_to_cxl(unsigned int ways, u8 *iw)
+{
+ if (ways > 16)
+ return -EINVAL;
+ if (is_power_of_2(ways)) {
+ *iw = ilog2(ways);
+ return 0;
+ }
+ if (ways % 3)
+ return -EINVAL;
+ ways /= 3;
+ if (!is_power_of_2(ways))
+ return -EINVAL;
+ *iw = ilog2(ways) + 8;
+ return 0;
+}
+
/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
#define CXLDEV_CAP_ARRAY_OFFSET 0x0
#define CXLDEV_CAP_ARRAY_CAP_ID 0
@@ -193,31 +248,77 @@ enum cxl_decoder_type {
*/
#define CXL_DECODER_MAX_INTERLEAVE 16
+#define CXL_DECODER_MIN_GRANULARITY 256
+
/**
- * struct cxl_decoder - CXL address range decode configuration
+ * struct cxl_decoder - Common CXL HDM Decoder Attributes
* @dev: this decoder's device
* @id: kernel device name id
- * @platform_res: address space resources considered by root decoder
- * @decoder_range: address space resources considered by midlevel decoder
+ * @hpa_range: Host physical address range mapped by this decoder
* @interleave_ways: number of cxl_dports in this decode
* @interleave_granularity: data stride per dport
* @target_type: accelerator vs expander (type2 vs type3) selector
+ * @region: currently assigned region for this decoder
* @flags: memory type capabilities and locking
- * @target_lock: coordinate coherent reads of the target list
- * @nr_targets: number of elements in @target
- * @target: active ordered target list in current decoder configuration
- */
+ * @commit: device/decoder-type specific callback to commit settings to hw
+ * @reset: device/decoder-type specific callback to reset hw settings
+*/
struct cxl_decoder {
struct device dev;
int id;
- union {
- struct resource platform_res;
- struct range decoder_range;
- };
+ struct range hpa_range;
int interleave_ways;
int interleave_granularity;
enum cxl_decoder_type target_type;
+ struct cxl_region *region;
unsigned long flags;
+ int (*commit)(struct cxl_decoder *cxld);
+ int (*reset)(struct cxl_decoder *cxld);
+};
+
+/*
+ * CXL_DECODER_DEAD prevents endpoints from being reattached to regions
+ * while cxld_unregister() is running
+ */
+enum cxl_decoder_mode {
+ CXL_DECODER_NONE,
+ CXL_DECODER_RAM,
+ CXL_DECODER_PMEM,
+ CXL_DECODER_MIXED,
+ CXL_DECODER_DEAD,
+};
+
+/**
+ * struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder
+ * @cxld: base cxl_decoder_object
+ * @dpa_res: actively claimed DPA span of this decoder
+ * @skip: offset into @dpa_res where @cxld.hpa_range maps
+ * @mode: which memory type / access-mode-partition this decoder targets
+ * @pos: interleave position in @cxld.region
+ */
+struct cxl_endpoint_decoder {
+ struct cxl_decoder cxld;
+ struct resource *dpa_res;
+ resource_size_t skip;
+ enum cxl_decoder_mode mode;
+ int pos;
+};
+
+/**
+ * struct cxl_switch_decoder - Switch specific CXL HDM Decoder
+ * @cxld: base cxl_decoder object
+ * @target_lock: coordinate coherent reads of the target list
+ * @nr_targets: number of elements in @target
+ * @target: active ordered target list in current decoder configuration
+ *
+ * The 'switch' decoder type represents the decoder instances of cxl_port's that
+ * route from the root of a CXL memory decode topology to the endpoints. They
+ * come in two flavors, root-level decoders, statically defined by platform
+ * firmware, and mid-level decoders, where interleave-granularity,
+ * interleave-width, and the target list are mutable.
+ */
+struct cxl_switch_decoder {
+ struct cxl_decoder cxld;
seqlock_t target_lock;
int nr_targets;
struct cxl_dport *target[];
@@ -225,6 +326,76 @@ struct cxl_decoder {
/**
+ * struct cxl_root_decoder - Static platform CXL address decoder
+ * @res: host / parent resource for region allocations
+ * @region_id: region id for next region provisioning event
+ * @calc_hb: which host bridge covers the n'th position by granularity
+ * @cxlsd: base cxl switch decoder
+ */
+struct cxl_root_decoder {
+ struct resource *res;
+ atomic_t region_id;
+ struct cxl_dport *(*calc_hb)(struct cxl_root_decoder *cxlrd, int pos);
+ struct cxl_switch_decoder cxlsd;
+};
+
+/*
+ * enum cxl_config_state - State machine for region configuration
+ * @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely
+ * @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more
+ * changes to interleave_ways or interleave_granularity
+ * @CXL_CONFIG_ACTIVE: All targets have been added the region is now
+ * active
+ * @CXL_CONFIG_RESET_PENDING: see commit_store()
+ * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware
+ */
+enum cxl_config_state {
+ CXL_CONFIG_IDLE,
+ CXL_CONFIG_INTERLEAVE_ACTIVE,
+ CXL_CONFIG_ACTIVE,
+ CXL_CONFIG_RESET_PENDING,
+ CXL_CONFIG_COMMIT,
+};
+
+/**
+ * struct cxl_region_params - region settings
+ * @state: allow the driver to lockdown further parameter changes
+ * @uuid: unique id for persistent regions
+ * @interleave_ways: number of endpoints in the region
+ * @interleave_granularity: capacity each endpoint contributes to a stripe
+ * @res: allocated iomem capacity for this region
+ * @targets: active ordered targets in current decoder configuration
+ * @nr_targets: number of targets
+ *
+ * State transitions are protected by the cxl_region_rwsem
+ */
+struct cxl_region_params {
+ enum cxl_config_state state;
+ uuid_t uuid;
+ int interleave_ways;
+ int interleave_granularity;
+ struct resource *res;
+ struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE];
+ int nr_targets;
+};
+
+/**
+ * struct cxl_region - CXL region
+ * @dev: This region's device
+ * @id: This region's id. Id is globally unique across all regions
+ * @mode: Endpoint decoder allocation / access mode
+ * @type: Endpoint decoder target type
+ * @params: active + config params for the region
+ */
+struct cxl_region {
+ struct device dev;
+ int id;
+ enum cxl_decoder_mode mode;
+ enum cxl_decoder_type type;
+ struct cxl_region_params params;
+};
+
+/**
* enum cxl_nvdimm_brige_state - state machine for managing bus rescans
* @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
* @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
@@ -251,7 +422,26 @@ struct cxl_nvdimm_bridge {
struct cxl_nvdimm {
struct device dev;
struct cxl_memdev *cxlmd;
- struct nvdimm *nvdimm;
+ struct cxl_nvdimm_bridge *bridge;
+ struct cxl_pmem_region *region;
+};
+
+struct cxl_pmem_region_mapping {
+ struct cxl_memdev *cxlmd;
+ struct cxl_nvdimm *cxl_nvd;
+ u64 start;
+ u64 size;
+ int position;
+};
+
+struct cxl_pmem_region {
+ struct device dev;
+ struct cxl_region *cxlr;
+ struct nd_region *nd_region;
+ struct cxl_nvdimm_bridge *bridge;
+ struct range hpa_range;
+ int nr_mappings;
+ struct cxl_pmem_region_mapping mapping[];
};
/**
@@ -260,50 +450,94 @@ struct cxl_nvdimm {
* decode hierarchy.
* @dev: this port's device
* @uport: PCI or platform device implementing the upstream port capability
+ * @host_bridge: Shortcut to the platform attach point for this port
* @id: id for port device-name
* @dports: cxl_dport instances referenced by decoders
* @endpoints: cxl_ep instances, endpoints that are a descendant of this port
+ * @regions: cxl_region_ref instances, regions mapped by this port
+ * @parent_dport: dport that points to this port in the parent
* @decoder_ida: allocator for decoder ids
+ * @hdm_end: track last allocated HDM decoder instance for allocation ordering
+ * @commit_end: cursor to track highest committed decoder for commit ordering
* @component_reg_phys: component register capability base address (optional)
* @dead: last ep has been removed, force port re-creation
* @depth: How deep this port is relative to the root. depth 0 is the root.
+ * @cdat: Cached CDAT data
+ * @cdat_available: Should a CDAT attribute be available in sysfs
*/
struct cxl_port {
struct device dev;
struct device *uport;
+ struct device *host_bridge;
int id;
- struct list_head dports;
- struct list_head endpoints;
+ struct xarray dports;
+ struct xarray endpoints;
+ struct xarray regions;
+ struct cxl_dport *parent_dport;
struct ida decoder_ida;
+ int hdm_end;
+ int commit_end;
resource_size_t component_reg_phys;
bool dead;
unsigned int depth;
+ struct cxl_cdat {
+ void *table;
+ size_t length;
+ } cdat;
+ bool cdat_available;
};
+static inline struct cxl_dport *
+cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev)
+{
+ return xa_load(&port->dports, (unsigned long)dport_dev);
+}
+
/**
* struct cxl_dport - CXL downstream port
* @dport: PCI bridge or firmware device representing the downstream link
* @port_id: unique hardware identifier for dport in decoder target list
* @component_reg_phys: downstream port component registers
* @port: reference to cxl_port that contains this downstream port
- * @list: node for a cxl_port's list of cxl_dport instances
*/
struct cxl_dport {
struct device *dport;
int port_id;
resource_size_t component_reg_phys;
struct cxl_port *port;
- struct list_head list;
};
/**
* struct cxl_ep - track an endpoint's interest in a port
* @ep: device that hosts a generic CXL endpoint (expander or accelerator)
- * @list: node on port->endpoints list
+ * @dport: which dport routes to this endpoint on @port
+ * @next: cxl switch port across the link attached to @dport NULL if
+ * attached to an endpoint
*/
struct cxl_ep {
struct device *ep;
- struct list_head list;
+ struct cxl_dport *dport;
+ struct cxl_port *next;
+};
+
+/**
+ * struct cxl_region_ref - track a region's interest in a port
+ * @port: point in topology to install this reference
+ * @decoder: decoder assigned for @region in @port
+ * @region: region for this reference
+ * @endpoints: cxl_ep references for region members beneath @port
+ * @nr_targets_set: track how many targets have been programmed during setup
+ * @nr_eps: number of endpoints beneath @port
+ * @nr_targets: number of distinct targets needed to reach @nr_eps
+ */
+struct cxl_region_ref {
+ struct cxl_port *port;
+ struct cxl_decoder *decoder;
+ struct cxl_region *region;
+ struct xarray endpoints;
+ int nr_targets_set;
+ int nr_eps;
+ int nr_targets;
};
/*
@@ -325,29 +559,31 @@ int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port);
+ struct cxl_dport *parent_dport);
+int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
+ struct cxl_dport *parent_dport);
struct cxl_port *find_cxl_root(struct device *dev);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
int cxl_bus_rescan(void);
-struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd);
+struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
+ struct cxl_dport **dport);
bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd);
struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
struct device *dport, int port_id,
resource_size_t component_reg_phys);
-struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
- const struct device *dev);
struct cxl_decoder *to_cxl_decoder(struct device *dev);
+struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
+struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
bool is_root_decoder(struct device *dev);
bool is_endpoint_decoder(struct device *dev);
-bool is_cxl_decoder(struct device *dev);
-struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets);
-struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets);
+struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets);
+struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
-struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
+struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
@@ -357,6 +593,8 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port);
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
+bool is_cxl_region(struct device *dev);
+
extern struct bus_type cxl_bus_type;
struct cxl_driver {
@@ -385,6 +623,8 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv);
#define CXL_DEVICE_PORT 3
#define CXL_DEVICE_ROOT 4
#define CXL_DEVICE_MEMORY_EXPANDER 5
+#define CXL_DEVICE_REGION 6
+#define CXL_DEVICE_PMEM_REGION 7
#define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
#define CXL_MODALIAS_FMT "cxl:t%d"
@@ -396,7 +636,21 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev);
+
+#ifdef CONFIG_CXL_REGION
+bool is_cxl_pmem_region(struct device *dev);
+struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
+#else
+static inline bool is_cxl_pmem_region(struct device *dev)
+{
+ return false;
+}
+static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
+{
+ return NULL;
+}
+#endif
/*
* Unit test builds overrides this to __weak, find the 'strong' version
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 7df0b053373a..88e3a8e54b6a 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -50,6 +50,24 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
return container_of(dev, struct cxl_memdev, dev);
}
+static inline struct cxl_port *cxled_to_port(struct cxl_endpoint_decoder *cxled)
+{
+ return to_cxl_port(cxled->cxld.dev.parent);
+}
+
+static inline struct cxl_port *cxlrd_to_port(struct cxl_root_decoder *cxlrd)
+{
+ return to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
+}
+
+static inline struct cxl_memdev *
+cxled_to_memdev(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = to_cxl_port(cxled->cxld.dev.parent);
+
+ return to_cxl_memdev(port->uport);
+}
+
bool is_cxl_memdev(struct device *dev);
static inline bool is_cxl_endpoint(struct cxl_port *port)
{
@@ -178,8 +196,9 @@ struct cxl_endpoint_dvsec_info {
* @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL.
* @exclusive_cmds: Commands that are kernel-internal only
- * @pmem_range: Active Persistent memory capacity configuration
- * @ram_range: Active Volatile memory capacity configuration
+ * @dpa_res: Overall DPA resource tree for the device
+ * @pmem_res: Active Persistent memory capacity configuration
+ * @ram_res: Active Volatile memory capacity configuration
* @total_bytes: sum of all possible capacities
* @volatile_only_bytes: hard volatile capacity
* @persistent_only_bytes: hard persistent capacity
@@ -191,6 +210,7 @@ struct cxl_endpoint_dvsec_info {
* @component_reg_phys: register base of component registers
* @info: Cached DVSEC information about the device.
* @serial: PCIe Device Serial Number
+ * @doe_mbs: PCI DOE mailbox array
* @mbox_send: @dev specific transport for transmitting mailbox commands
*
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
@@ -209,8 +229,9 @@ struct cxl_dev_state {
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
- struct range pmem_range;
- struct range ram_range;
+ struct resource dpa_res;
+ struct resource pmem_res;
+ struct resource ram_res;
u64 total_bytes;
u64 volatile_only_bytes;
u64 persistent_only_bytes;
@@ -224,6 +245,8 @@ struct cxl_dev_state {
resource_size_t component_reg_phys;
u64 serial;
+ struct xarray doe_mbs;
+
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
};
@@ -299,6 +322,13 @@ struct cxl_mbox_identify {
u8 qos_telemetry_caps;
} __packed;
+struct cxl_mbox_get_partition_info {
+ __le64 active_volatile_cap;
+ __le64 active_persistent_cap;
+ __le64 next_volatile_cap;
+ __le64 next_persistent_cap;
+} __packed;
+
struct cxl_mbox_get_lsa {
__le32 offset;
__le32 length;
@@ -370,4 +400,8 @@ struct cxl_hdm {
unsigned int interleave_mask;
struct cxl_port *port;
};
+
+struct seq_file;
+struct dentry *cxl_debugfs_create_dir(const char *dir);
+void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds);
#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index fce1c11729c2..eec597dbe763 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -74,4 +74,5 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
+void read_cdat_data(struct cxl_port *port);
#endif /* __CXL_PCI_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index a979d0b484d5..64ccf053d32c 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -24,42 +25,32 @@
* in higher level operations.
*/
-static int create_endpoint(struct cxl_memdev *cxlmd,
- struct cxl_port *parent_port)
+static void enable_suspend(void *data)
{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_port *endpoint;
- int rc;
+ cxl_mem_active_dec();
+}
- endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
- cxlds->component_reg_phys, parent_port);
- if (IS_ERR(endpoint))
- return PTR_ERR(endpoint);
+static void remove_debugfs(void *dentry)
+{
+ debugfs_remove_recursive(dentry);
+}
- dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
+static int cxl_mem_dpa_show(struct seq_file *file, void *data)
+{
+ struct device *dev = file->private;
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- rc = cxl_endpoint_autoremove(cxlmd, endpoint);
- if (rc)
- return rc;
-
- if (!endpoint->dev.driver) {
- dev_err(&cxlmd->dev, "%s failed probe\n",
- dev_name(&endpoint->dev));
- return -ENXIO;
- }
+ cxl_dpa_debug(file, cxlmd->cxlds);
return 0;
}
-static void enable_suspend(void *data)
-{
- cxl_mem_active_dec();
-}
-
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_port *parent_port;
+ struct cxl_dport *dport;
+ struct dentry *dentry;
int rc;
/*
@@ -73,11 +64,17 @@ static int cxl_mem_probe(struct device *dev)
if (work_pending(&cxlmd->detach_work))
return -EBUSY;
+ dentry = cxl_debugfs_create_dir(dev_name(dev));
+ debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
+ rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
+ if (rc)
+ return rc;
+
rc = devm_cxl_enumerate_ports(cxlmd);
if (rc)
return rc;
- parent_port = cxl_mem_find_port(cxlmd);
+ parent_port = cxl_mem_find_port(cxlmd, &dport);
if (!parent_port) {
dev_err(dev, "CXL port topology not found\n");
return -ENXIO;
@@ -91,7 +88,7 @@ static int cxl_mem_probe(struct device *dev)
goto unlock;
}
- rc = create_endpoint(cxlmd, parent_port);
+ rc = devm_cxl_add_endpoint(cxlmd, dport);
unlock:
device_unlock(&parent_port->dev);
put_device(&parent_port->dev);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 5a0ae46d4989..faeb5d9d7a7a 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/pci.h>
+#include <linux/pci-doe.h>
#include <linux/io.h>
#include "cxlmem.h"
#include "cxlpci.h"
@@ -386,6 +387,47 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
return rc;
}
+static void cxl_pci_destroy_doe(void *mbs)
+{
+ xa_destroy(mbs);
+}
+
+static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
+{
+ struct device *dev = cxlds->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 off = 0;
+
+ xa_init(&cxlds->doe_mbs);
+ if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) {
+ dev_err(dev, "Failed to create XArray for DOE's\n");
+ return;
+ }
+
+ /*
+ * Mailbox creation is best effort. Higher layers must determine if
+ * the lack of a mailbox for their protocol is a device failure or not.
+ */
+ pci_doe_for_each_off(pdev, off) {
+ struct pci_doe_mb *doe_mb;
+
+ doe_mb = pcim_doe_create_mb(pdev, off);
+ if (IS_ERR(doe_mb)) {
+ dev_err(dev, "Failed to create MB object for MB @ %x\n",
+ off);
+ continue;
+ }
+
+ if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) {
+ dev_err(dev, "xa_insert failed to insert MB @ %x\n",
+ off);
+ continue;
+ }
+
+ dev_dbg(dev, "Created DOE mailbox @%x\n", off);
+ }
+}
+
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_register_map map;
@@ -434,6 +476,8 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map);
+ devm_cxl_pci_create_doe(cxlds);
+
rc = cxl_pci_setup_mailbox(cxlds);
if (rc)
return rc;
@@ -454,7 +498,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+ if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
return rc;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 0aaa70b4e0f7..7dc0a2fa1a6b 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -7,6 +7,7 @@
#include <linux/ndctl.h>
#include <linux/async.h>
#include <linux/slab.h>
+#include <linux/nd.h>
#include "cxlmem.h"
#include "cxl.h"
@@ -26,7 +27,23 @@ static void clear_exclusive(void *cxlds)
static void unregister_nvdimm(void *nvdimm)
{
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge;
+ struct cxl_pmem_region *cxlr_pmem;
+
+ device_lock(&cxl_nvb->dev);
+ cxlr_pmem = cxl_nvd->region;
+ dev_set_drvdata(&cxl_nvd->dev, NULL);
+ cxl_nvd->region = NULL;
+ device_unlock(&cxl_nvb->dev);
+
+ if (cxlr_pmem) {
+ device_release_driver(&cxlr_pmem->dev);
+ put_device(&cxlr_pmem->dev);
+ }
+
nvdimm_delete(nvdimm);
+ cxl_nvd->bridge = NULL;
}
static int cxl_nvdimm_probe(struct device *dev)
@@ -39,7 +56,7 @@ static int cxl_nvdimm_probe(struct device *dev)
struct nvdimm *nvdimm;
int rc;
- cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd);
+ cxl_nvb = cxl_find_nvdimm_bridge(dev);
if (!cxl_nvb)
return -ENXIO;
@@ -66,6 +83,7 @@ static int cxl_nvdimm_probe(struct device *dev)
}
dev_set_drvdata(dev, nvdimm);
+ cxl_nvd->bridge = cxl_nvb;
rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
out:
device_unlock(&cxl_nvb->dev);
@@ -204,15 +222,38 @@ static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
return cxl_nvb->nvdimm_bus != NULL;
}
-static int cxl_nvdimm_release_driver(struct device *dev, void *data)
+static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb)
{
+ struct cxl_nvdimm *cxl_nvd;
+
if (!is_cxl_nvdimm(dev))
return 0;
+
+ cxl_nvd = to_cxl_nvdimm(dev);
+ if (cxl_nvd->bridge != cxl_nvb)
+ return 0;
+
device_release_driver(dev);
return 0;
}
-static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
+static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb)
+{
+ struct cxl_pmem_region *cxlr_pmem;
+
+ if (!is_cxl_pmem_region(dev))
+ return 0;
+
+ cxlr_pmem = to_cxl_pmem_region(dev);
+ if (cxlr_pmem->bridge != cxl_nvb)
+ return 0;
+
+ device_release_driver(dev);
+ return 0;
+}
+
+static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb,
+ struct nvdimm_bus *nvdimm_bus)
{
if (!nvdimm_bus)
return;
@@ -222,7 +263,10 @@ static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
* nvdimm_bus_unregister() rips the nvdimm objects out from
* underneath them.
*/
- bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_release_driver);
+ bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
+ cxl_pmem_region_release_driver);
+ bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
+ cxl_nvdimm_release_driver);
nvdimm_bus_unregister(nvdimm_bus);
}
@@ -260,7 +304,7 @@ static void cxl_nvb_update_state(struct work_struct *work)
dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
}
- offline_nvdimm_bus(victim_bus);
+ offline_nvdimm_bus(cxl_nvb, victim_bus);
put_device(&cxl_nvb->dev);
}
@@ -315,6 +359,203 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = {
.id = CXL_DEVICE_NVDIMM_BRIDGE,
};
+static int match_cxl_nvdimm(struct device *dev, void *data)
+{
+ return is_cxl_nvdimm(dev);
+}
+
+static void unregister_nvdimm_region(void *nd_region)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct cxl_pmem_region *cxlr_pmem;
+ int i;
+
+ cxlr_pmem = nd_region_provider_data(nd_region);
+ cxl_nvb = cxlr_pmem->bridge;
+ device_lock(&cxl_nvb->dev);
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+ struct cxl_nvdimm *cxl_nvd = m->cxl_nvd;
+
+ if (cxl_nvd->region) {
+ put_device(&cxlr_pmem->dev);
+ cxl_nvd->region = NULL;
+ }
+ }
+ device_unlock(&cxl_nvb->dev);
+
+ nvdimm_region_delete(nd_region);
+}
+
+static void cxlr_pmem_remove_resource(void *res)
+{
+ remove_resource(res);
+}
+
+struct cxl_pmem_region_info {
+ u64 offset;
+ u64 serial;
+};
+
+static int cxl_pmem_region_probe(struct device *dev)
+{
+ struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE];
+ struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
+ struct cxl_region *cxlr = cxlr_pmem->cxlr;
+ struct cxl_pmem_region_info *info = NULL;
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct nd_interleave_set *nd_set;
+ struct nd_region_desc ndr_desc;
+ struct cxl_nvdimm *cxl_nvd;
+ struct nvdimm *nvdimm;
+ struct resource *res;
+ int rc, i = 0;
+
+ cxl_nvb = cxl_find_nvdimm_bridge(&cxlr_pmem->mapping[0].cxlmd->dev);
+ if (!cxl_nvb) {
+ dev_dbg(dev, "bridge not found\n");
+ return -ENXIO;
+ }
+ cxlr_pmem->bridge = cxl_nvb;
+
+ device_lock(&cxl_nvb->dev);
+ if (!cxl_nvb->nvdimm_bus) {
+ dev_dbg(dev, "nvdimm bus not found\n");
+ rc = -ENXIO;
+ goto err;
+ }
+
+ memset(&mappings, 0, sizeof(mappings));
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ res->name = "Persistent Memory";
+ res->start = cxlr_pmem->hpa_range.start;
+ res->end = cxlr_pmem->hpa_range.end;
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_PERSISTENT_MEMORY;
+
+ rc = insert_resource(&iomem_resource, res);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res);
+ if (rc)
+ goto err;
+
+ ndr_desc.res = res;
+ ndr_desc.provider_data = cxlr_pmem;
+
+ ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start);
+ ndr_desc.target_node = phys_to_target_node(res->start);
+ if (ndr_desc.target_node == NUMA_NO_NODE) {
+ ndr_desc.target_node = ndr_desc.numa_node;
+ dev_dbg(&cxlr->dev, "changing target node from %d to %d",
+ NUMA_NO_NODE, ndr_desc.target_node);
+ }
+
+ nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
+ if (!nd_set) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ ndr_desc.memregion = cxlr->id;
+ set_bit(ND_REGION_CXL, &ndr_desc.flags);
+ set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
+
+ info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+ struct cxl_memdev *cxlmd = m->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *d;
+
+ d = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm);
+ if (!d) {
+ dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i,
+ dev_name(&cxlmd->dev));
+ rc = -ENODEV;
+ goto err;
+ }
+
+ /* safe to drop ref now with bridge lock held */
+ put_device(d);
+
+ cxl_nvd = to_cxl_nvdimm(d);
+ nvdimm = dev_get_drvdata(&cxl_nvd->dev);
+ if (!nvdimm) {
+ dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i,
+ dev_name(&cxlmd->dev));
+ rc = -ENODEV;
+ goto err;
+ }
+ cxl_nvd->region = cxlr_pmem;
+ get_device(&cxlr_pmem->dev);
+ m->cxl_nvd = cxl_nvd;
+ mappings[i] = (struct nd_mapping_desc) {
+ .nvdimm = nvdimm,
+ .start = m->start,
+ .size = m->size,
+ .position = i,
+ };
+ info[i].offset = m->start;
+ info[i].serial = cxlds->serial;
+ }
+ ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
+ ndr_desc.mapping = mappings;
+
+ /*
+ * TODO enable CXL labels which skip the need for 'interleave-set cookie'
+ */
+ nd_set->cookie1 =
+ nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0);
+ nd_set->cookie2 = nd_set->cookie1;
+ ndr_desc.nd_set = nd_set;
+
+ cxlr_pmem->nd_region =
+ nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc);
+ if (!cxlr_pmem->nd_region) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = devm_add_action_or_reset(dev, unregister_nvdimm_region,
+ cxlr_pmem->nd_region);
+out:
+ kfree(info);
+ device_unlock(&cxl_nvb->dev);
+ put_device(&cxl_nvb->dev);
+
+ return rc;
+
+err:
+ dev_dbg(dev, "failed to create nvdimm region\n");
+ for (i--; i >= 0; i--) {
+ nvdimm = mappings[i].nvdimm;
+ cxl_nvd = nvdimm_provider_data(nvdimm);
+ put_device(&cxl_nvd->region->dev);
+ cxl_nvd->region = NULL;
+ }
+ goto out;
+}
+
+static struct cxl_driver cxl_pmem_region_driver = {
+ .name = "cxl_pmem_region",
+ .probe = cxl_pmem_region_probe,
+ .id = CXL_DEVICE_PMEM_REGION,
+};
+
/*
* Return all bridges to the CXL_NVB_NEW state to invalidate any
* ->state_work referring to the now destroyed cxl_pmem_wq.
@@ -359,8 +600,14 @@ static __init int cxl_pmem_init(void)
if (rc)
goto err_nvdimm;
+ rc = cxl_driver_register(&cxl_pmem_region_driver);
+ if (rc)
+ goto err_region;
+
return 0;
+err_region:
+ cxl_driver_unregister(&cxl_nvdimm_driver);
err_nvdimm:
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
err_bridge:
@@ -370,6 +617,7 @@ err_bridge:
static __exit void cxl_pmem_exit(void)
{
+ cxl_driver_unregister(&cxl_pmem_region_driver);
cxl_driver_unregister(&cxl_nvdimm_driver);
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
destroy_cxl_pmem_wq();
@@ -381,3 +629,4 @@ module_exit(cxl_pmem_exit);
MODULE_IMPORT_NS(CXL);
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
+MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 3cf308f114c4..5453771bf330 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -53,6 +53,9 @@ static int cxl_port_probe(struct device *dev)
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ /* Cache the data early to ensure is_visible() works */
+ read_cdat_data(port);
+
get_device(&cxlmd->dev);
rc = devm_add_action_or_reset(dev, schedule_detach, cxlmd);
if (rc)
@@ -78,10 +81,60 @@ static int cxl_port_probe(struct device *dev)
return 0;
}
+static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_port *port = to_cxl_port(dev);
+
+ if (!port->cdat_available)
+ return -ENXIO;
+
+ if (!port->cdat.table)
+ return 0;
+
+ return memory_read_from_buffer(buf, count, &offset,
+ port->cdat.table,
+ port->cdat.length);
+}
+
+static BIN_ATTR_ADMIN_RO(CDAT, 0);
+
+static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_port *port = to_cxl_port(dev);
+
+ if ((attr == &bin_attr_CDAT) && port->cdat_available)
+ return attr->attr.mode;
+
+ return 0;
+}
+
+static struct bin_attribute *cxl_cdat_bin_attributes[] = {
+ &bin_attr_CDAT,
+ NULL,
+};
+
+static struct attribute_group cxl_cdat_attribute_group = {
+ .bin_attrs = cxl_cdat_bin_attributes,
+ .is_bin_visible = cxl_port_bin_attr_is_visible,
+};
+
+static const struct attribute_group *cxl_port_attribute_groups[] = {
+ &cxl_cdat_attribute_group,
+ NULL,
+};
+
static struct cxl_driver cxl_port_driver = {
.name = "cxl_port",
.probe = cxl_port_probe,
.id = CXL_DEVICE_PORT,
+ .drv = {
+ .dev_groups = cxl_port_attribute_groups,
+ },
};
module_cxl_driver(cxl_port_driver);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 50a08b2ec247..9b5e2a5eb0ae 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -22,6 +22,8 @@
* @private: dax driver private data
* @flags: state and boolean properties
* @ops: operations for this device
+ * @holder_data: holder of a dax_device: could be filesystem or mapped device
+ * @holder_ops: operations for the inner holder
*/
struct dax_device {
struct inode inode;
@@ -29,6 +31,8 @@ struct dax_device {
void *private;
unsigned long flags;
const struct dax_operations *ops;
+ void *holder_data;
+ const struct dax_holder_operations *holder_ops;
};
static dev_t dax_devt;
@@ -71,8 +75,11 @@ EXPORT_SYMBOL_GPL(dax_remove_host);
* fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
* @bdev: block device to find a dax_device for
* @start_off: returns the byte offset into the dax_device that @bdev starts
+ * @holder: filesystem or mapped device inside the dax_device
+ * @ops: operations for the inner holder
*/
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
+ void *holder, const struct dax_holder_operations *ops)
{
struct dax_device *dax_dev;
u64 part_size;
@@ -92,11 +99,26 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk);
if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode))
dax_dev = NULL;
+ else if (holder) {
+ if (!cmpxchg(&dax_dev->holder_data, NULL, holder))
+ dax_dev->holder_ops = ops;
+ else
+ dax_dev = NULL;
+ }
dax_read_unlock(id);
return dax_dev;
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
+
+void fs_put_dax(struct dax_device *dax_dev, void *holder)
+{
+ if (dax_dev && holder &&
+ cmpxchg(&dax_dev->holder_data, holder, NULL) == holder)
+ dax_dev->holder_ops = NULL;
+ put_dax(dax_dev);
+}
+EXPORT_SYMBOL_GPL(fs_put_dax);
#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
enum dax_device_flags {
@@ -204,6 +226,29 @@ size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
}
EXPORT_SYMBOL_GPL(dax_recovery_write);
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off,
+ u64 len, int mf_flags)
+{
+ int rc, id;
+
+ id = dax_read_lock();
+ if (!dax_alive(dax_dev)) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (!dax_dev->holder_ops) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags);
+out:
+ dax_read_unlock(id);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dax_holder_notify_failure);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
@@ -277,8 +322,15 @@ void kill_dax(struct dax_device *dax_dev)
if (!dax_dev)
return;
+ if (dax_dev->holder_data != NULL)
+ dax_holder_notify_failure(dax_dev, 0, U64_MAX, 0);
+
clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
synchronize_srcu(&dax_srcu);
+
+ /* clear holder data */
+ dax_dev->holder_ops = NULL;
+ dax_dev->holder_data = NULL;
}
EXPORT_SYMBOL_GPL(kill_dax);
@@ -421,6 +473,19 @@ void put_dax(struct dax_device *dax_dev)
EXPORT_SYMBOL_GPL(put_dax);
/**
+ * dax_holder() - obtain the holder of a dax device
+ * @dax_dev: a dax_device instance
+
+ * Return: the holder's data which represents the holder if registered,
+ * otherwize NULL.
+ */
+void *dax_holder(struct dax_device *dax_dev)
+{
+ return dax_dev->holder_data;
+}
+EXPORT_SYMBOL_GPL(dax_holder);
+
+/**
* inode_dax: convert a public inode into its dax_dev
* @inode: An inode with i_cdev pointing to a dax_dev
*
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index f7dcc44f9414..027e8f336acc 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -33,7 +33,7 @@ struct exynos_bus {
unsigned long curr_freq;
- struct opp_table *opp_table;
+ int opp_token;
struct clk *clk;
unsigned int ratio;
};
@@ -161,8 +161,7 @@ static void exynos_bus_exit(struct device *dev)
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
}
static void exynos_bus_passive_exit(struct device *dev)
@@ -179,18 +178,16 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
struct exynos_bus *bus)
{
struct device *dev = bus->dev;
- struct opp_table *opp_table;
- const char *vdd = "vdd";
+ const char *supplies[] = { "vdd", NULL };
int i, ret, count, size;
- opp_table = dev_pm_opp_set_regulators(dev, &vdd, 1);
- if (IS_ERR(opp_table)) {
- ret = PTR_ERR(opp_table);
+ ret = dev_pm_opp_set_regulators(dev, supplies);
+ if (ret < 0) {
dev_err(dev, "failed to set regulators %d\n", ret);
return ret;
}
- bus->opp_table = opp_table;
+ bus->opp_token = ret;
/*
* Get the devfreq-event devices to get the current utilization of
@@ -236,8 +233,7 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
return 0;
err_regulator:
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
return ret;
}
@@ -459,8 +455,7 @@ err:
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
err_reg:
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
return ret;
}
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 585a95fe2bd6..503376b894b6 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -821,6 +821,15 @@ static int devm_tegra_devfreq_init_hw(struct device *dev,
return err;
}
+static int tegra_devfreq_config_clks_nop(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
+{
+ /* We want to skip clk configuration via dev_pm_opp_set_opp() */
+ return 0;
+}
+
static int tegra_devfreq_probe(struct platform_device *pdev)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
@@ -830,6 +839,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
unsigned int i;
long rate;
int err;
+ const char *clk_names[] = { "actmon", NULL };
+ struct dev_pm_opp_config config = {
+ .supported_hw = &hw_version,
+ .supported_hw_count = 1,
+ .clk_names = clk_names,
+ .config_clks = tegra_devfreq_config_clks_nop,
+ };
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
@@ -874,13 +890,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- err = devm_pm_opp_set_supported_hw(&pdev->dev, &hw_version, 1);
+ err = devm_pm_opp_set_config(&pdev->dev, &config);
if (err) {
- dev_err(&pdev->dev, "Failed to set supported HW: %d\n", err);
+ dev_err(&pdev->dev, "Failed to set OPP config: %d\n", err);
return err;
}
- err = devm_pm_opp_of_add_table_noclk(&pdev->dev, 0);
+ err = devm_pm_opp_of_add_table_indexed(&pdev->dev, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
return err;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 487ed4ddc3be..a06d2a7627aa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -85,6 +85,14 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
+config APPLE_ADMAC
+ tristate "Apple ADMAC support"
+ depends on ARCH_APPLE || COMPILE_TEST
+ select DMA_ENGINE
+ default ARCH_APPLE
+ help
+ Enable support for Audio DMA Controller found on Apple Silicon SoCs.
+
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2f1b87ffd7ab..10f7d4241001 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AMD_PTDMA) += ptdma/
+obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 6f56dfd375e3..4153c2edb049 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -749,7 +749,7 @@ static irqreturn_t msgdma_irq_handler(int irq, void *data)
}
/**
- * msgdma_chan_remove - Channel remove function
+ * msgdma_dev_remove() - Device remove function
* @mdev: Pointer to the Altera mSGDMA device structure
*/
static void msgdma_dev_remove(struct msgdma_device *mdev)
@@ -918,7 +918,7 @@ fail:
}
/**
- * msgdma_dma_remove - Driver remove function
+ * msgdma_remove() - Driver remove function
* @pdev: Pointer to the platform_device structure
*
* Return: Always '0'
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index a4a794e62ac2..487a01aa207d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -231,7 +231,7 @@ enum pl08x_dma_chan_state {
/**
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @phychan: the physical channel utilized by this channel, if there is one
* @name: name of channel
* @cd: channel platform data
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
new file mode 100644
index 000000000000..d1f74a3aa999
--- /dev/null
+++ b/drivers/dma/apple-admac.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Audio DMA Controller (ADMAC) on t8103 (M1) and other Apple chips
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include "dmaengine.h"
+
+#define NCHANNELS_MAX 64
+#define IRQ_NOUTPUTS 4
+
+#define RING_WRITE_SLOT GENMASK(1, 0)
+#define RING_READ_SLOT GENMASK(5, 4)
+#define RING_FULL BIT(9)
+#define RING_EMPTY BIT(8)
+#define RING_ERR BIT(10)
+
+#define STATUS_DESC_DONE BIT(0)
+#define STATUS_ERR BIT(6)
+
+#define FLAG_DESC_NOTIFY BIT(16)
+
+#define REG_TX_START 0x0000
+#define REG_TX_STOP 0x0004
+#define REG_RX_START 0x0008
+#define REG_RX_STOP 0x000c
+
+#define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200)
+#define REG_CHAN_CTL_RST_RINGS BIT(0)
+
+#define REG_DESC_RING(ch) (0x8070 + (ch) * 0x200)
+#define REG_REPORT_RING(ch) (0x8074 + (ch) * 0x200)
+
+#define REG_RESIDUE(ch) (0x8064 + (ch) * 0x200)
+
+#define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
+
+#define BUS_WIDTH_8BIT 0x00
+#define BUS_WIDTH_16BIT 0x01
+#define BUS_WIDTH_32BIT 0x02
+#define BUS_WIDTH_FRAME_2_WORDS 0x10
+#define BUS_WIDTH_FRAME_4_WORDS 0x20
+
+#define CHAN_BUFSIZE 0x8000
+
+#define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200)
+#define CHAN_FIFOCTL_LIMIT GENMASK(31, 16)
+#define CHAN_FIFOCTL_THRESHOLD GENMASK(15, 0)
+
+#define REG_DESC_WRITE(ch) (0x10000 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
+#define REG_REPORT_READ(ch) (0x10100 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
+
+#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
+#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
+#define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
+#define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
+
+struct admac_data;
+struct admac_tx;
+
+struct admac_chan {
+ unsigned int no;
+ struct admac_data *host;
+ struct dma_chan chan;
+ struct tasklet_struct tasklet;
+
+ spinlock_t lock;
+ struct admac_tx *current_tx;
+ int nperiod_acks;
+
+ /*
+ * We maintain a 'submitted' and 'issued' list mainly for interface
+ * correctness. Typical use of the driver (per channel) will be
+ * prepping, submitting and issuing a single cyclic transaction which
+ * will stay current until terminate_all is called.
+ */
+ struct list_head submitted;
+ struct list_head issued;
+
+ struct list_head to_free;
+};
+
+struct admac_data {
+ struct dma_device dma;
+ struct device *dev;
+ __iomem void *base;
+
+ int irq_index;
+ int nchannels;
+ struct admac_chan channels[];
+};
+
+struct admac_tx {
+ struct dma_async_tx_descriptor tx;
+ bool cyclic;
+ dma_addr_t buf_addr;
+ dma_addr_t buf_end;
+ size_t buf_len;
+ size_t period_len;
+
+ size_t submitted_pos;
+ size_t reclaimed_pos;
+
+ struct list_head node;
+};
+
+static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val)
+{
+ void __iomem *addr = ad->base + reg;
+ u32 curr = readl_relaxed(addr);
+
+ writel_relaxed((curr & ~mask) | (val & mask), addr);
+}
+
+static struct admac_chan *to_admac_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct admac_chan, chan);
+}
+
+static struct admac_tx *to_admac_tx(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct admac_tx, tx);
+}
+
+static enum dma_transfer_direction admac_chan_direction(int channo)
+{
+ /* Channel directions are hardwired */
+ return (channo & 1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+}
+
+static dma_cookie_t admac_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct admac_tx *adtx = to_admac_tx(tx);
+ struct admac_chan *adchan = to_admac_chan(tx->chan);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&adtx->node, &adchan->submitted);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ return cookie;
+}
+
+static int admac_desc_free(struct dma_async_tx_descriptor *tx)
+{
+ kfree(to_admac_tx(tx));
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *admac_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct admac_chan *adchan = container_of(chan, struct admac_chan, chan);
+ struct admac_tx *adtx;
+
+ if (direction != admac_chan_direction(adchan->no))
+ return NULL;
+
+ adtx = kzalloc(sizeof(*adtx), GFP_NOWAIT);
+ if (!adtx)
+ return NULL;
+
+ adtx->cyclic = true;
+
+ adtx->buf_addr = buf_addr;
+ adtx->buf_len = buf_len;
+ adtx->buf_end = buf_addr + buf_len;
+ adtx->period_len = period_len;
+
+ adtx->submitted_pos = 0;
+ adtx->reclaimed_pos = 0;
+
+ dma_async_tx_descriptor_init(&adtx->tx, chan);
+ adtx->tx.tx_submit = admac_tx_submit;
+ adtx->tx.desc_free = admac_desc_free;
+
+ return &adtx->tx;
+}
+
+/*
+ * Write one hardware descriptor for a dmaengine cyclic transaction.
+ */
+static void admac_cyclic_write_one_desc(struct admac_data *ad, int channo,
+ struct admac_tx *tx)
+{
+ dma_addr_t addr;
+
+ addr = tx->buf_addr + (tx->submitted_pos % tx->buf_len);
+
+ /* If happens means we have buggy code */
+ WARN_ON_ONCE(addr + tx->period_len > tx->buf_end);
+
+ dev_dbg(ad->dev, "ch%d descriptor: addr=0x%pad len=0x%zx flags=0x%lx\n",
+ channo, &addr, tx->period_len, FLAG_DESC_NOTIFY);
+
+ writel_relaxed(lower_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(upper_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(tx->period_len, ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(FLAG_DESC_NOTIFY, ad->base + REG_DESC_WRITE(channo));
+
+ tx->submitted_pos += tx->period_len;
+ tx->submitted_pos %= 2 * tx->buf_len;
+}
+
+/*
+ * Write all the hardware descriptors for a dmaengine cyclic
+ * transaction there is space for.
+ */
+static void admac_cyclic_write_desc(struct admac_data *ad, int channo,
+ struct admac_tx *tx)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_FULL)
+ break;
+ admac_cyclic_write_one_desc(ad, channo, tx);
+ }
+}
+
+static int admac_ring_noccupied_slots(int ringval)
+{
+ int wrslot = FIELD_GET(RING_WRITE_SLOT, ringval);
+ int rdslot = FIELD_GET(RING_READ_SLOT, ringval);
+
+ if (wrslot != rdslot) {
+ return (wrslot + 4 - rdslot) % 4;
+ } else {
+ WARN_ON((ringval & (RING_FULL | RING_EMPTY)) == 0);
+
+ if (ringval & RING_FULL)
+ return 4;
+ else
+ return 0;
+ }
+}
+
+/*
+ * Read from hardware the residue of a cyclic dmaengine transaction.
+ */
+static u32 admac_cyclic_read_residue(struct admac_data *ad, int channo,
+ struct admac_tx *adtx)
+{
+ u32 ring1, ring2;
+ u32 residue1, residue2;
+ int nreports;
+ size_t pos;
+
+ ring1 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
+ residue1 = readl_relaxed(ad->base + REG_RESIDUE(channo));
+ ring2 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
+ residue2 = readl_relaxed(ad->base + REG_RESIDUE(channo));
+
+ if (residue2 > residue1) {
+ /*
+ * Controller must have loaded next descriptor between
+ * the two residue reads
+ */
+ nreports = admac_ring_noccupied_slots(ring1) + 1;
+ } else {
+ /* No descriptor load between the two reads, ring2 is safe to use */
+ nreports = admac_ring_noccupied_slots(ring2);
+ }
+
+ pos = adtx->reclaimed_pos + adtx->period_len * (nreports + 1) - residue2;
+
+ return adtx->buf_len - pos % adtx->buf_len;
+}
+
+static enum dma_status admac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_data *ad = adchan->host;
+ struct admac_tx *adtx;
+
+ enum dma_status ret;
+ size_t residue;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ adtx = adchan->current_tx;
+
+ if (adtx && adtx->tx.cookie == cookie) {
+ ret = DMA_IN_PROGRESS;
+ residue = admac_cyclic_read_residue(ad, adchan->no, adtx);
+ } else {
+ ret = DMA_IN_PROGRESS;
+ residue = 0;
+ list_for_each_entry(adtx, &adchan->issued, node) {
+ if (adtx->tx.cookie == cookie) {
+ residue = adtx->buf_len;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ dma_set_residue(txstate, residue);
+ return ret;
+}
+
+static void admac_start_chan(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ u32 startbit = 1 << (adchan->no / 2);
+
+ writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
+ ad->base + REG_CHAN_INTSTATUS(adchan->no, ad->irq_index));
+ writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
+ ad->base + REG_CHAN_INTMASK(adchan->no, ad->irq_index));
+
+ switch (admac_chan_direction(adchan->no)) {
+ case DMA_MEM_TO_DEV:
+ writel_relaxed(startbit, ad->base + REG_TX_START);
+ break;
+ case DMA_DEV_TO_MEM:
+ writel_relaxed(startbit, ad->base + REG_RX_START);
+ break;
+ default:
+ break;
+ }
+ dev_dbg(adchan->host->dev, "ch%d start\n", adchan->no);
+}
+
+static void admac_stop_chan(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ u32 stopbit = 1 << (adchan->no / 2);
+
+ switch (admac_chan_direction(adchan->no)) {
+ case DMA_MEM_TO_DEV:
+ writel_relaxed(stopbit, ad->base + REG_TX_STOP);
+ break;
+ case DMA_DEV_TO_MEM:
+ writel_relaxed(stopbit, ad->base + REG_RX_STOP);
+ break;
+ default:
+ break;
+ }
+ dev_dbg(adchan->host->dev, "ch%d stop\n", adchan->no);
+}
+
+static void admac_reset_rings(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+
+ writel_relaxed(REG_CHAN_CTL_RST_RINGS,
+ ad->base + REG_CHAN_CTL(adchan->no));
+ writel_relaxed(0, ad->base + REG_CHAN_CTL(adchan->no));
+}
+
+static void admac_start_current_tx(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ int ch = adchan->no;
+
+ admac_reset_rings(adchan);
+ writel_relaxed(0, ad->base + REG_CHAN_CTL(ch));
+
+ admac_cyclic_write_one_desc(ad, ch, adchan->current_tx);
+ admac_start_chan(adchan);
+ admac_cyclic_write_desc(ad, ch, adchan->current_tx);
+}
+
+static void admac_issue_pending(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_tx *tx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ list_splice_tail_init(&adchan->submitted, &adchan->issued);
+ if (!list_empty(&adchan->issued) && !adchan->current_tx) {
+ tx = list_first_entry(&adchan->issued, struct admac_tx, node);
+ list_del(&tx->node);
+
+ adchan->current_tx = tx;
+ adchan->nperiod_acks = 0;
+ admac_start_current_tx(adchan);
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+}
+
+static int admac_pause(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ admac_stop_chan(adchan);
+
+ return 0;
+}
+
+static int admac_resume(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ admac_start_chan(adchan);
+
+ return 0;
+}
+
+static int admac_terminate_all(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ admac_stop_chan(adchan);
+ admac_reset_rings(adchan);
+
+ adchan->current_tx = NULL;
+ /*
+ * Descriptors can only be freed after the tasklet
+ * has been killed (in admac_synchronize).
+ */
+ list_splice_tail_init(&adchan->submitted, &adchan->to_free);
+ list_splice_tail_init(&adchan->issued, &adchan->to_free);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ return 0;
+}
+
+static void admac_synchronize(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_tx *adtx, *_adtx;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ list_splice_tail_init(&adchan->to_free, &head);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ tasklet_kill(&adchan->tasklet);
+
+ list_for_each_entry_safe(adtx, _adtx, &head, node) {
+ list_del(&adtx->node);
+ admac_desc_free(&adtx->tx);
+ }
+}
+
+static int admac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ dma_cookie_init(&adchan->chan);
+ return 0;
+}
+
+static void admac_free_chan_resources(struct dma_chan *chan)
+{
+ admac_terminate_all(chan);
+ admac_synchronize(chan);
+}
+
+static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct admac_data *ad = (struct admac_data *) ofdma->of_dma_data;
+ unsigned int index;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ index = dma_spec->args[0];
+
+ if (index >= ad->nchannels) {
+ dev_err(ad->dev, "channel index %u out of bounds\n", index);
+ return NULL;
+ }
+
+ return &ad->channels[index].chan;
+}
+
+static int admac_drain_reports(struct admac_data *ad, int channo)
+{
+ int count;
+
+ for (count = 0; count < 4; count++) {
+ u32 countval_hi, countval_lo, unk1, flags;
+
+ if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_EMPTY)
+ break;
+
+ countval_lo = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ countval_hi = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ unk1 = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ flags = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+
+ dev_dbg(ad->dev, "ch%d report: countval=0x%llx unk1=0x%x flags=0x%x\n",
+ channo, ((u64) countval_hi) << 32 | countval_lo, unk1, flags);
+ }
+
+ return count;
+}
+
+static void admac_handle_status_err(struct admac_data *ad, int channo)
+{
+ bool handled = false;
+
+ if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_ERR) {
+ writel_relaxed(RING_ERR, ad->base + REG_DESC_RING(channo));
+ dev_err_ratelimited(ad->dev, "ch%d descriptor ring error\n", channo);
+ handled = true;
+ }
+
+ if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_ERR) {
+ writel_relaxed(RING_ERR, ad->base + REG_REPORT_RING(channo));
+ dev_err_ratelimited(ad->dev, "ch%d report ring error\n", channo);
+ handled = true;
+ }
+
+ if (unlikely(!handled)) {
+ dev_err(ad->dev, "ch%d unknown error, masking errors as cause of IRQs\n", channo);
+ admac_modify(ad, REG_CHAN_INTMASK(channo, ad->irq_index),
+ STATUS_ERR, 0);
+ }
+}
+
+static void admac_handle_status_desc_done(struct admac_data *ad, int channo)
+{
+ struct admac_chan *adchan = &ad->channels[channo];
+ unsigned long flags;
+ int nreports;
+
+ writel_relaxed(STATUS_DESC_DONE,
+ ad->base + REG_CHAN_INTSTATUS(channo, ad->irq_index));
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ nreports = admac_drain_reports(ad, channo);
+
+ if (adchan->current_tx) {
+ struct admac_tx *tx = adchan->current_tx;
+
+ adchan->nperiod_acks += nreports;
+ tx->reclaimed_pos += nreports * tx->period_len;
+ tx->reclaimed_pos %= 2 * tx->buf_len;
+
+ admac_cyclic_write_desc(ad, channo, tx);
+ tasklet_schedule(&adchan->tasklet);
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+}
+
+static void admac_handle_chan_int(struct admac_data *ad, int no)
+{
+ u32 cause = readl_relaxed(ad->base + REG_CHAN_INTSTATUS(no, ad->irq_index));
+
+ if (cause & STATUS_ERR)
+ admac_handle_status_err(ad, no);
+
+ if (cause & STATUS_DESC_DONE)
+ admac_handle_status_desc_done(ad, no);
+}
+
+static irqreturn_t admac_interrupt(int irq, void *devid)
+{
+ struct admac_data *ad = devid;
+ u32 rx_intstate, tx_intstate;
+ int i;
+
+ rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
+ tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
+
+ if (!tx_intstate && !rx_intstate)
+ return IRQ_NONE;
+
+ for (i = 0; i < ad->nchannels; i += 2) {
+ if (tx_intstate & 1)
+ admac_handle_chan_int(ad, i);
+ tx_intstate >>= 1;
+ }
+
+ for (i = 1; i < ad->nchannels; i += 2) {
+ if (rx_intstate & 1)
+ admac_handle_chan_int(ad, i);
+ rx_intstate >>= 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void admac_chan_tasklet(struct tasklet_struct *t)
+{
+ struct admac_chan *adchan = from_tasklet(adchan, t, tasklet);
+ struct admac_tx *adtx;
+ struct dmaengine_desc_callback cb;
+ struct dmaengine_result tx_result;
+ int nacks;
+
+ spin_lock_irq(&adchan->lock);
+ adtx = adchan->current_tx;
+ nacks = adchan->nperiod_acks;
+ adchan->nperiod_acks = 0;
+ spin_unlock_irq(&adchan->lock);
+
+ if (!adtx || !nacks)
+ return;
+
+ tx_result.result = DMA_TRANS_NOERROR;
+ tx_result.residue = 0;
+
+ dmaengine_desc_get_callback(&adtx->tx, &cb);
+ while (nacks--)
+ dmaengine_desc_callback_invoke(&cb, &tx_result);
+}
+
+static int admac_device_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_data *ad = adchan->host;
+ bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
+ int wordsize = 0;
+ u32 bus_width = 0;
+
+ switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ wordsize = 1;
+ bus_width |= BUS_WIDTH_8BIT;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ wordsize = 2;
+ bus_width |= BUS_WIDTH_16BIT;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ wordsize = 4;
+ bus_width |= BUS_WIDTH_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * We take port_window_size to be the number of words in a frame.
+ *
+ * The controller has some means of out-of-band signalling, to the peripheral,
+ * of words position in a frame. That's where the importance of this control
+ * comes from.
+ */
+ switch (is_tx ? config->dst_port_window_size : config->src_port_window_size) {
+ case 0 ... 1:
+ break;
+ case 2:
+ bus_width |= BUS_WIDTH_FRAME_2_WORDS;
+ break;
+ case 4:
+ bus_width |= BUS_WIDTH_FRAME_4_WORDS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(bus_width, ad->base + REG_BUS_WIDTH(adchan->no));
+
+ /*
+ * By FIFOCTL_LIMIT we seem to set the maximal number of bytes allowed to be
+ * held in controller's per-channel FIFO. Transfers seem to be triggered
+ * around the time FIFO occupancy touches FIFOCTL_THRESHOLD.
+ *
+ * The numbers we set are more or less arbitrary.
+ */
+ writel_relaxed(FIELD_PREP(CHAN_FIFOCTL_LIMIT, 0x30 * wordsize)
+ | FIELD_PREP(CHAN_FIFOCTL_THRESHOLD, 0x18 * wordsize),
+ ad->base + REG_CHAN_FIFOCTL(adchan->no));
+
+ return 0;
+}
+
+static int admac_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct admac_data *ad;
+ struct dma_device *dma;
+ int nchannels;
+ int err, irq, i;
+
+ err = of_property_read_u32(np, "dma-channels", &nchannels);
+ if (err || nchannels > NCHANNELS_MAX) {
+ dev_err(&pdev->dev, "missing or invalid dma-channels property\n");
+ return -EINVAL;
+ }
+
+ ad = devm_kzalloc(&pdev->dev, struct_size(ad, channels, nchannels), GFP_KERNEL);
+ if (!ad)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ad);
+ ad->dev = &pdev->dev;
+ ad->nchannels = nchannels;
+
+ /*
+ * The controller has 4 IRQ outputs. Try them all until
+ * we find one we can use.
+ */
+ for (i = 0; i < IRQ_NOUTPUTS; i++) {
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq >= 0) {
+ ad->irq_index = i;
+ break;
+ }
+ }
+
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "no usable interrupt\n");
+
+ err = devm_request_irq(&pdev->dev, irq, admac_interrupt,
+ 0, dev_name(&pdev->dev), ad);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "unable to register interrupt\n");
+
+ ad->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ad->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ad->base),
+ "unable to obtain MMIO resource\n");
+
+ dma = &ad->dma;
+
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+
+ dma->dev = &pdev->dev;
+ dma->device_alloc_chan_resources = admac_alloc_chan_resources;
+ dma->device_free_chan_resources = admac_free_chan_resources;
+ dma->device_tx_status = admac_tx_status;
+ dma->device_issue_pending = admac_issue_pending;
+ dma->device_terminate_all = admac_terminate_all;
+ dma->device_synchronize = admac_synchronize;
+ dma->device_prep_dma_cyclic = admac_prep_dma_cyclic;
+ dma->device_config = admac_device_config;
+ dma->device_pause = admac_pause;
+ dma->device_resume = admac_resume;
+
+ dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+
+ INIT_LIST_HEAD(&dma->channels);
+ for (i = 0; i < nchannels; i++) {
+ struct admac_chan *adchan = &ad->channels[i];
+
+ adchan->host = ad;
+ adchan->no = i;
+ adchan->chan.device = &ad->dma;
+ spin_lock_init(&adchan->lock);
+ INIT_LIST_HEAD(&adchan->submitted);
+ INIT_LIST_HEAD(&adchan->issued);
+ INIT_LIST_HEAD(&adchan->to_free);
+ list_add_tail(&adchan->chan.device_node, &dma->channels);
+ tasklet_setup(&adchan->tasklet, admac_chan_tasklet);
+ }
+
+ err = dma_async_device_register(&ad->dma);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
+
+ err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad);
+ if (err) {
+ dma_async_device_unregister(&ad->dma);
+ return dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
+ }
+
+ return 0;
+}
+
+static int admac_remove(struct platform_device *pdev)
+{
+ struct admac_data *ad = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&ad->dma);
+
+ return 0;
+}
+
+static const struct of_device_id admac_of_match[] = {
+ { .compatible = "apple,admac", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, admac_of_match);
+
+static struct platform_driver apple_admac_driver = {
+ .driver = {
+ .name = "apple-admac",
+ .of_match_table = admac_of_match,
+ },
+ .probe = admac_probe,
+ .remove = admac_remove,
+};
+module_platform_driver(apple_admac_driver);
+
+MODULE_AUTHOR("Martin Povišer <povik+lin@cutebit.org>");
+MODULE_DESCRIPTION("Driver for Audio DMA Controller (ADMAC) on Apple SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7b3e6030f7b4..b102d8eb5d83 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -649,7 +649,7 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
}
/*
- * Only check that maxburst and addr width values are supported by the
+ * Only check that maxburst and addr width values are supported by
* the controller but not that the configuration is good to perform the
* transfer since we don't know the direction at this stage.
*/
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 5161b73c30c4..f30dabc99795 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -55,6 +56,9 @@
#define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
#define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
#define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
+#define AXI_DMAC_REG_COHERENCY_DESC 0x14
+#define AXI_DMAC_DST_COHERENT_MSK BIT(0)
+#define AXI_DMAC_DST_COHERENT_GET(x) FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x)
#define AXI_DMAC_REG_IRQ_MASK 0x80
#define AXI_DMAC_REG_IRQ_PENDING 0x84
@@ -979,6 +983,18 @@ static int axi_dmac_probe(struct platform_device *pdev)
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+ if (of_dma_is_coherent(pdev->dev.of_node)) {
+ ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
+
+ if (version < ADI_AXI_PCORE_VER(4, 4, 'a') ||
+ !AXI_DMAC_DST_COHERENT_GET(ret)) {
+ dev_err(dmac->dma_dev.dev,
+ "Coherent DMA not supported in hardware");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+ }
+
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_clk_disable;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index e2ec540e6519..2a483802d9ee 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -388,7 +388,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
if (i != (sg_len - 1) &&
!(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
- /* Automatically proceeed to the next descriptor. */
+ /* Automatically proceed to the next descriptor. */
desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
/*
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e80feeea0e01..c741b6431958 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1153,13 +1153,6 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
- if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_MEMCPY_SG");
- return -EIO;
- }
-
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index f696246f57fd..9fe2ae794316 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -22,51 +22,50 @@
#include <linux/wait.h>
static unsigned int test_buf_size = 16384;
-module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
+module_param(test_buf_size, uint, 0644);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
static char test_device[32];
-module_param_string(device, test_device, sizeof(test_device),
- S_IRUGO | S_IWUSR);
+module_param_string(device, test_device, sizeof(test_device), 0644);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
static unsigned int threads_per_chan = 1;
-module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
+module_param(threads_per_chan, uint, 0644);
MODULE_PARM_DESC(threads_per_chan,
"Number of threads to start per channel (default: 1)");
static unsigned int max_channels;
-module_param(max_channels, uint, S_IRUGO | S_IWUSR);
+module_param(max_channels, uint, 0644);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
static unsigned int iterations;
-module_param(iterations, uint, S_IRUGO | S_IWUSR);
+module_param(iterations, uint, 0644);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int dmatest;
-module_param(dmatest, uint, S_IRUGO | S_IWUSR);
+module_param(dmatest, uint, 0644);
MODULE_PARM_DESC(dmatest,
"dmatest 0-memcpy 1-memset (default: 0)");
static unsigned int xor_sources = 3;
-module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
+module_param(xor_sources, uint, 0644);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
-module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
+module_param(pq_sources, uint, 0644);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
-module_param(timeout, int, S_IRUGO | S_IWUSR);
+module_param(timeout, int, 0644);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
static bool noverify;
-module_param(noverify, bool, S_IRUGO | S_IWUSR);
+module_param(noverify, bool, 0644);
MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
static bool norandom;
@@ -74,7 +73,7 @@ module_param(norandom, bool, 0644);
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
static bool verbose;
-module_param(verbose, bool, S_IRUGO | S_IWUSR);
+module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
static int alignment = -1;
@@ -86,7 +85,7 @@ module_param(transfer_size, uint, 0644);
MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
static bool polled;
-module_param(polled, bool, S_IRUGO | S_IWUSR);
+module_param(polled, bool, 0644);
MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
/**
@@ -154,7 +153,7 @@ static const struct kernel_param_ops run_ops = {
.get = dmatest_run_get,
};
static bool dmatest_run;
-module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
+module_param_cb(run, &run_ops, &dmatest_run, 0644);
MODULE_PARM_DESC(run, "Run the test (default: false)");
static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
@@ -290,7 +289,7 @@ static const struct kernel_param_ops wait_ops = {
.get = dmatest_wait_get,
.set = param_set_bool,
};
-module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
+module_param_cb(wait, &wait_ops, &wait, 0444);
MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
static bool dmatest_match_channel(struct dmatest_params *params,
@@ -579,10 +578,10 @@ static int dmatest_func(void *data)
unsigned int total_tests = 0;
dma_cookie_t cookie;
enum dma_status status;
- enum dma_ctrl_flags flags;
+ enum dma_ctrl_flags flags;
u8 *pq_coefs = NULL;
int ret;
- unsigned int buf_size;
+ unsigned int buf_size;
struct dmatest_data *src;
struct dmatest_data *dst;
int i;
@@ -1095,8 +1094,8 @@ static void add_threaded_test(struct dmatest_info *info)
/* Copy test parameters */
params->buf_size = test_buf_size;
- strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
- strlcpy(params->device, strim(test_device), sizeof(params->device));
+ strscpy(params->channel, strim(test_channel), sizeof(params->channel));
+ strscpy(params->device, strim(test_device), sizeof(params->device));
params->threads_per_chan = threads_per_chan;
params->max_channels = max_channels;
params->iterations = iterations;
@@ -1240,7 +1239,7 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
dtc = list_last_entry(&info->channels,
struct dmatest_chan,
node);
- strlcpy(chan_reset_val,
+ strscpy(chan_reset_val,
dma_chan_name(dtc->chan),
sizeof(chan_reset_val));
ret = -EBUSY;
@@ -1263,14 +1262,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
&& (strcmp("", strim(test_channel)) != 0)) {
ret = -EINVAL;
- strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
+ strscpy(chan_reset_val, dma_chan_name(dtc->chan),
sizeof(chan_reset_val));
goto add_chan_err;
}
} else {
/* Clear test_channel if no channels were added successfully */
- strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
+ strscpy(chan_reset_val, "", sizeof(chan_reset_val));
ret = -EBUSY;
goto add_chan_err;
}
@@ -1295,7 +1294,7 @@ static int dmatest_chan_get(char *val, const struct kernel_param *kp)
mutex_lock(&info->lock);
if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
stop_threaded_test(info);
- strlcpy(test_channel, "", sizeof(test_channel));
+ strscpy(test_channel, "", sizeof(test_channel));
}
mutex_unlock(&info->lock);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index c741da02b67e..a183d93bd7e2 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -982,6 +982,11 @@ static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
static void axi_chan_dump_lli(struct axi_dma_chan *chan,
struct axi_dma_hw_desc *desc)
{
+ if (!desc->lli) {
+ dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
+ return;
+ }
+
dev_err(dchan2dev(&chan->vc.chan),
"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
le64_to_cpu(desc->lli->sar),
@@ -1049,6 +1054,11 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* The completed descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
if (chan->cyclic) {
desc = vd_to_axi_desc(vd);
@@ -1078,6 +1088,7 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
axi_chan_start_first_queued(chan);
}
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 468d1097a1ec..07f756479663 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -64,8 +64,8 @@ static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
{
+ struct dw_edma_chip *chip = desc->chan->dw->chip;
struct dw_edma_chan *chan = desc->chan;
- struct dw_edma *dw = chan->chip->dw;
struct dw_edma_chunk *chunk;
chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
@@ -82,11 +82,11 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
*/
chunk->cb = !(desc->chunks_alloc % 2);
if (chan->dir == EDMA_DIR_WRITE) {
- chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
- chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
+ chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
+ chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
} else {
- chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
- chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
+ chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
+ chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
}
if (desc->chunk) {
@@ -339,21 +339,40 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (!chan->configured)
return NULL;
- switch (chan->config.direction) {
- case DMA_DEV_TO_MEM: /* local DMA */
- if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
- break;
- return NULL;
- case DMA_MEM_TO_DEV: /* local DMA */
- if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
- break;
- return NULL;
- default: /* remote DMA */
- if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
- break;
- if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
- break;
- return NULL;
+ /*
+ * Local Root Port/End-point Remote End-point
+ * +-----------------------+ PCIe bus +----------------------+
+ * | | +-+ | |
+ * | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM |
+ * | | | | | |
+ * | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV |
+ * | | +-+ | |
+ * +-----------------------+ +----------------------+
+ *
+ * 1. Normal logic:
+ * If eDMA is embedded into the DW PCIe RP/EP and controlled from the
+ * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used
+ * for the device read operations (DEV_TO_MEM) and the Tx channel
+ * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV).
+ *
+ * 2. Inverted logic:
+ * If eDMA is embedded into a Remote PCIe EP and is controlled by the
+ * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx
+ * channel (EDMA_DIR_WRITE) will be used for the device read operations
+ * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write
+ * operations (MEM_TO_DEV).
+ *
+ * It is the client driver responsibility to choose a proper channel
+ * for the DMA transfers.
+ */
+ if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
+ (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
+ return NULL;
+ } else {
+ if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
+ (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
+ return NULL;
}
if (xfer->type == EDMA_XFER_CYCLIC) {
@@ -423,7 +442,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
- if (chan->dir == EDMA_DIR_WRITE) {
+ if (dir == DMA_DEV_TO_MEM) {
burst->sar = src_addr;
if (xfer->type == EDMA_XFER_CYCLIC) {
burst->dar = xfer->xfer.cyclic.paddr;
@@ -663,7 +682,7 @@ static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->status != EDMA_ST_IDLE)
return -EBUSY;
- pm_runtime_get(chan->chip->dev);
+ pm_runtime_get(chan->dw->chip->dev);
return 0;
}
@@ -685,15 +704,15 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
cpu_relax();
}
- pm_runtime_put(chan->chip->dev);
+ pm_runtime_put(chan->dw->chip->dev);
}
-static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
+static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
u32 wr_alloc, u32 rd_alloc)
{
+ struct dw_edma_chip *chip = dw->chip;
struct dw_edma_region *dt_region;
struct device *dev = chip->dev;
- struct dw_edma *dw = chip->dw;
struct dw_edma_chan *chan;
struct dw_edma_irq *irq;
struct dma_device *dma;
@@ -726,7 +745,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->vc.chan.private = dt_region;
- chan->chip = chip;
+ chan->dw = dw;
chan->id = j;
chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
chan->configured = false;
@@ -734,9 +753,9 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->status = EDMA_ST_IDLE;
if (write)
- chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
+ chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
else
- chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
+ chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
chan->ll_max -= 1;
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
@@ -766,13 +785,13 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
vchan_init(&chan->vc, dma);
if (write) {
- dt_region->paddr = dw->dt_region_wr[j].paddr;
- dt_region->vaddr = dw->dt_region_wr[j].vaddr;
- dt_region->sz = dw->dt_region_wr[j].sz;
+ dt_region->paddr = chip->dt_region_wr[j].paddr;
+ dt_region->vaddr = chip->dt_region_wr[j].vaddr;
+ dt_region->sz = chip->dt_region_wr[j].sz;
} else {
- dt_region->paddr = dw->dt_region_rd[j].paddr;
- dt_region->vaddr = dw->dt_region_rd[j].vaddr;
- dt_region->sz = dw->dt_region_rd[j].sz;
+ dt_region->paddr = chip->dt_region_rd[j].paddr;
+ dt_region->vaddr = chip->dt_region_rd[j].vaddr;
+ dt_region->sz = chip->dt_region_rd[j].sz;
}
dw_edma_v0_core_device_config(chan);
@@ -826,11 +845,11 @@ static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
(*mask)++;
}
-static int dw_edma_irq_request(struct dw_edma_chip *chip,
+static int dw_edma_irq_request(struct dw_edma *dw,
u32 *wr_alloc, u32 *rd_alloc)
{
- struct device *dev = chip->dev;
- struct dw_edma *dw = chip->dw;
+ struct dw_edma_chip *chip = dw->chip;
+ struct device *dev = dw->chip->dev;
u32 wr_mask = 1;
u32 rd_mask = 1;
int i, err = 0;
@@ -839,12 +858,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
- if (dw->nr_irqs < 1)
+ if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
return -EINVAL;
- if (dw->nr_irqs == 1) {
+ dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
+ if (!dw->irq)
+ return -ENOMEM;
+
+ if (chip->nr_irqs == 1) {
/* Common IRQ shared among all channels */
- irq = dw->ops->irq_vector(dev, 0);
+ irq = chip->ops->irq_vector(dev, 0);
err = request_irq(irq, dw_edma_interrupt_common,
IRQF_SHARED, dw->name, &dw->irq[0]);
if (err) {
@@ -854,9 +877,11 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
if (irq_get_msi_desc(irq))
get_cached_msi_msg(irq, &dw->irq[0].msi);
+
+ dw->nr_irqs = 1;
} else {
/* Distribute IRQs equally among all channels */
- int tmp = dw->nr_irqs;
+ int tmp = chip->nr_irqs;
while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
@@ -867,7 +892,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
- irq = dw->ops->irq_vector(dev, i);
+ irq = chip->ops->irq_vector(dev, i);
err = request_irq(irq,
i < *wr_alloc ?
dw_edma_interrupt_write :
@@ -901,20 +926,22 @@ int dw_edma_probe(struct dw_edma_chip *chip)
return -EINVAL;
dev = chip->dev;
- if (!dev)
+ if (!dev || !chip->ops)
return -EINVAL;
- dw = chip->dw;
- if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
- return -EINVAL;
+ dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ dw->chip = chip;
raw_spin_lock_init(&dw->lock);
- dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
+ dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
- dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
+ dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
@@ -936,17 +963,17 @@ int dw_edma_probe(struct dw_edma_chip *chip)
dw_edma_v0_core_off(dw);
/* Request IRQs */
- err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
+ err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
if (err)
return err;
/* Setup write channels */
- err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
+ err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
/* Setup read channels */
- err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
+ err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
@@ -954,15 +981,15 @@ int dw_edma_probe(struct dw_edma_chip *chip)
pm_runtime_enable(dev);
/* Turn debugfs on */
- dw_edma_v0_core_debugfs_on(chip);
+ dw_edma_v0_core_debugfs_on(dw);
+
+ chip->dw = dw;
return 0;
err_irq_free:
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
-
- dw->nr_irqs = 0;
+ free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
return err;
}
@@ -980,7 +1007,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Free irqs */
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
+ free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
/* Power management */
pm_runtime_disable(dev);
@@ -1001,7 +1028,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
}
/* Turn debugfs off */
- dw_edma_v0_core_debugfs_off(chip);
+ dw_edma_v0_core_debugfs_off(dw);
return 0;
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 60316d408c3e..85df2d511907 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -15,20 +15,12 @@
#include "../virt-dma.h"
#define EDMA_LL_SZ 24
-#define EDMA_MAX_WR_CH 8
-#define EDMA_MAX_RD_CH 8
enum dw_edma_dir {
EDMA_DIR_WRITE = 0,
EDMA_DIR_READ
};
-enum dw_edma_map_format {
- EDMA_MF_EDMA_LEGACY = 0x0,
- EDMA_MF_EDMA_UNROLL = 0x1,
- EDMA_MF_HDMA_COMPAT = 0x5
-};
-
enum dw_edma_request {
EDMA_REQ_NONE = 0,
EDMA_REQ_STOP,
@@ -57,12 +49,6 @@ struct dw_edma_burst {
u32 sz;
};
-struct dw_edma_region {
- phys_addr_t paddr;
- void __iomem *vaddr;
- size_t sz;
-};
-
struct dw_edma_chunk {
struct list_head list;
struct dw_edma_chan *chan;
@@ -87,7 +73,7 @@ struct dw_edma_desc {
struct dw_edma_chan {
struct virt_dma_chan vc;
- struct dw_edma_chip *chip;
+ struct dw_edma *dw;
int id;
enum dw_edma_dir dir;
@@ -109,10 +95,6 @@ struct dw_edma_irq {
struct dw_edma *dw;
};
-struct dw_edma_core_ops {
- int (*irq_vector)(struct device *dev, unsigned int nr);
-};
-
struct dw_edma {
char name[20];
@@ -122,21 +104,14 @@ struct dw_edma {
struct dma_device rd_edma;
u16 rd_ch_cnt;
- struct dw_edma_region rg_region; /* Registers */
- struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
- struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
- struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
- struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
-
struct dw_edma_irq *irq;
int nr_irqs;
- enum dw_edma_map_format mf;
-
struct dw_edma_chan *chan;
- const struct dw_edma_core_ops *ops;
raw_spinlock_t lock; /* Only for legacy */
+
+ struct dw_edma_chip *chip;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index cee7aa231d7b..d6b5e2463884 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -148,7 +148,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_pcie_data vsec_data;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
- struct dw_edma *dw;
int err, nr_irqs;
int i, mask;
@@ -197,10 +196,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
if (!chip)
return -ENOMEM;
- dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
- if (!dw)
- return -ENOMEM;
-
/* IRQs allocation */
nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
@@ -211,29 +206,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
}
/* Data structure initialization */
- chip->dw = dw;
chip->dev = dev;
chip->id = pdev->devfn;
- chip->irq = pdev->irq;
- dw->mf = vsec_data.mf;
- dw->nr_irqs = nr_irqs;
- dw->ops = &dw_edma_pcie_core_ops;
- dw->wr_ch_cnt = vsec_data.wr_ch_cnt;
- dw->rd_ch_cnt = vsec_data.rd_ch_cnt;
+ chip->mf = vsec_data.mf;
+ chip->nr_irqs = nr_irqs;
+ chip->ops = &dw_edma_pcie_core_ops;
- dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
- if (!dw->rg_region.vaddr)
- return -ENOMEM;
+ chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
+ chip->ll_rd_cnt = vsec_data.rd_ch_cnt;
- dw->rg_region.vaddr += vsec_data.rg.off;
- dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start;
- dw->rg_region.paddr += vsec_data.rg.off;
- dw->rg_region.sz = vsec_data.rg.sz;
+ chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ if (!chip->reg_base)
+ return -ENOMEM;
- for (i = 0; i < dw->wr_ch_cnt; i++) {
- struct dw_edma_region *ll_region = &dw->ll_region_wr[i];
- struct dw_edma_region *dt_region = &dw->dt_region_wr[i];
+ for (i = 0; i < chip->ll_wr_cnt; i++) {
+ struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
+ struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
@@ -256,9 +245,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
dt_region->sz = dt_block->sz;
}
- for (i = 0; i < dw->rd_ch_cnt; i++) {
- struct dw_edma_region *ll_region = &dw->ll_region_rd[i];
- struct dw_edma_region *dt_region = &dw->dt_region_rd[i];
+ for (i = 0; i < chip->ll_rd_cnt; i++) {
+ struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
+ struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
@@ -282,45 +271,45 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
}
/* Debug info */
- if (dw->mf == EDMA_MF_EDMA_LEGACY)
- pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
- else if (dw->mf == EDMA_MF_EDMA_UNROLL)
- pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
- else if (dw->mf == EDMA_MF_HDMA_COMPAT)
- pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
+ if (chip->mf == EDMA_MF_EDMA_LEGACY)
+ pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", chip->mf);
+ else if (chip->mf == EDMA_MF_EDMA_UNROLL)
+ pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf);
+ else if (chip->mf == EDMA_MF_HDMA_COMPAT)
+ pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);
else
- pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
+ pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
- pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",
vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
- dw->rg_region.vaddr, &dw->rg_region.paddr);
+ chip->reg_base);
- for (i = 0; i < dw->wr_ch_cnt; i++) {
+ for (i = 0; i < chip->ll_wr_cnt; i++) {
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_wr[i].bar,
- vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz,
- dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr);
+ vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
+ chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_wr[i].bar,
- vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz,
- dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr);
+ vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
+ chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);
}
- for (i = 0; i < dw->rd_ch_cnt; i++) {
+ for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_rd[i].bar,
- vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz,
- dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr);
+ vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
+ chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_rd[i].bar,
- vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz,
- dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr);
+ vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
+ chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);
}
- pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
+ pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);
/* Validating if PCI interrupts were enabled */
if (!pci_dev_msi_enabled(pdev)) {
@@ -328,10 +317,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -EPERM;
}
- dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
- if (!dw->irq)
- return -ENOMEM;
-
/* Starting eDMA driver */
err = dw_edma_probe(chip);
if (err) {
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 33bc1e6c4cf2..77e6cfe52e0a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -25,7 +25,7 @@ enum dw_edma_control {
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{
- return dw->rg_region.vaddr;
+ return dw->chip->reg_base;
}
#define SET_32(dw, name, value) \
@@ -96,7 +96,7 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
{
- if (dw->mf == EDMA_MF_EDMA_LEGACY)
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)
return &(__dw_regs(dw)->type.legacy.ch);
if (dir == EDMA_DIR_WRITE)
@@ -108,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u32 value, void __iomem *addr)
{
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -133,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
{
u32 value;
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -169,7 +169,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u64 value, void __iomem *addr)
{
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -194,7 +194,7 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
{
u32 value;
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -256,7 +256,7 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
u32 tmp;
tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
@@ -272,7 +272,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
SET_RW_32(dw, chan->dir, int_clear,
FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
@@ -280,7 +280,7 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
SET_RW_32(dw, chan->dir, int_clear,
FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
@@ -301,6 +301,7 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
+ struct dw_edma_chan *chan = chunk->chan;
struct dw_edma_v0_lli __iomem *lli;
struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
@@ -314,9 +315,11 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
j = chunk->bursts_alloc;
list_for_each_entry(child, &chunk->burst->list, list) {
j--;
- if (!j)
- control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
-
+ if (!j) {
+ control |= DW_EDMA_V0_LIE;
+ if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ control |= DW_EDMA_V0_RIE;
+ }
/* Channel control */
SET_LL_32(&lli[i].control, control);
/* Transfer size */
@@ -357,7 +360,7 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
u32 tmp;
dw_edma_v0_core_write_chunk(chunk);
@@ -365,7 +368,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
if (first) {
/* Enable engine */
SET_RW_32(dw, chan->dir, engine_en, BIT(0));
- if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
switch (chan->id) {
case 0:
SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
@@ -414,19 +417,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list */
-
- #ifdef CONFIG_64BIT
/* llp is not aligned on 64bit -> keep 32bit accesses */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
- #else /* CONFIG_64BIT */
- SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
- lower_32_bits(chunk->ll_region.paddr));
- SET_CH_32(dw, chan->dir, chan->id, llp.msb,
- upper_32_bits(chunk->ll_region.paddr));
- #endif /* CONFIG_64BIT */
}
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
@@ -435,7 +430,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
u32 tmp = 0;
/* MSI done addr - low, high */
@@ -505,12 +500,12 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
}
/* eDMA debugfs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
+void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
{
- dw_edma_v0_debugfs_on(chip);
+ dw_edma_v0_debugfs_on(dw);
}
-void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip)
+void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
{
- dw_edma_v0_debugfs_off(chip);
+ dw_edma_v0_debugfs_off(dw);
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
index 2afa626b8300..75aec6d31b21 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -22,7 +22,7 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir)
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip);
+void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
+void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);
#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 4b3bcffd15ef..5226c9014703 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -54,7 +54,7 @@ struct debugfs_entries {
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
void __iomem *reg = (void __force __iomem *)data;
- if (dw->mf == EDMA_MF_EDMA_LEGACY &&
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
reg >= (void __iomem *)&regs->type.legacy.ch) {
void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
@@ -173,7 +173,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -242,7 +242,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -282,13 +282,13 @@ static void dw_edma_debugfs_regs(void)
dw_edma_debugfs_regs_rd(regs_dir);
}
-void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
{
- dw = chip->dw;
+ dw = _dw;
if (!dw)
return;
- regs = dw->rg_region.vaddr;
+ regs = dw->chip->reg_base;
if (!regs)
return;
@@ -296,16 +296,16 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!dw->debugfs)
return;
- debugfs_create_u32("mf", 0444, dw->debugfs, &dw->mf);
+ debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
dw_edma_debugfs_regs();
}
-void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
+void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
{
- dw = chip->dw;
+ dw = _dw;
if (!dw)
return;
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
index d0ff25a9ea5c..3391b86edf5a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
@@ -12,14 +12,14 @@
#include <linux/dma/edma.h>
#ifdef CONFIG_DEBUG_FS
-void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip);
+void dw_edma_v0_debugfs_on(struct dw_edma *dw);
+void dw_edma_v0_debugfs_off(struct dw_edma *dw);
#else
-static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
}
-static inline void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
+static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)
{
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index 11d254e450b0..f9912c3dd4d7 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -102,10 +102,12 @@ free_map:
return ERR_PTR(ret);
}
+#ifdef CONFIG_OF
static const struct of_device_id rzn1_dmac_match[] = {
{ .compatible = "renesas,rzn1-dma" },
{}
};
+#endif
static int rzn1_dmamux_probe(struct platform_device *pdev)
{
@@ -140,6 +142,7 @@ static const struct of_device_id rzn1_dmamux_match[] = {
{ .compatible = "renesas,rzn1-dmamux" },
{}
};
+MODULE_DEVICE_TABLE(of, rzn1_dmamux_match);
static struct platform_driver rzn1_dmamux_driver = {
.driver = {
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 971ff5f9ae84..d19ea885c63e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1183,7 +1183,7 @@ fail:
*
* Synchronizes the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
- * descriptors have stopped and and it is safe to free the memory associated
+ * descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 3ae05d1446a5..a06a1575a2a5 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -559,9 +559,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
}
for_each_sg(sgl, sg, sg_len, i) {
- /* get next sg's physical address */
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
-
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
dst_addr = fsl_chan->dma_dev_addr;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 3bffe3ecbd1b..65c6094ce063 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1047,7 +1047,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
return -ENOMEM;
imxdma->dev = &pdev->dev;
- imxdma->devtype = (enum imx_dma_type)of_device_get_match_data(&pdev->dev);
+ imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imxdma->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f37a276f519e..fbea5f62dd98 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -183,12 +183,14 @@
BIT(DMA_DEV_TO_DEV))
#define SDMA_WATERMARK_LEVEL_N_FIFOS GENMASK(15, 12)
+#define SDMA_WATERMARK_LEVEL_OFF_FIFOS GENMASK(19, 16)
+#define SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO GENMASK(31, 28)
#define SDMA_WATERMARK_LEVEL_SW_DONE BIT(23)
#define SDMA_DONE0_CONFIG_DONE_SEL BIT(7)
#define SDMA_DONE0_CONFIG_DONE_DIS BIT(6)
-/**
+/*
* struct sdma_script_start_addrs - SDMA script start pointers
*
* start addresses of the different functions in the physical
@@ -424,6 +426,14 @@ struct sdma_desc {
* @data: specific sdma interface structure
* @bd_pool: dma_pool for bd
* @terminate_worker: used to call back into terminate work function
+ * @terminated: terminated list
+ * @is_ram_script: flag for script in ram
+ * @n_fifos_src: number of source device fifos
+ * @n_fifos_dst: number of destination device fifos
+ * @sw_done: software done flag
+ * @stride_fifos_src: stride for source device FIFOs
+ * @stride_fifos_dst: stride for destination device FIFOs
+ * @words_per_fifo: copy number of words one time for one FIFO
*/
struct sdma_channel {
struct virt_dma_chan vc;
@@ -451,6 +461,9 @@ struct sdma_channel {
bool is_ram_script;
unsigned int n_fifos_src;
unsigned int n_fifos_dst;
+ unsigned int stride_fifos_src;
+ unsigned int stride_fifos_dst;
+ unsigned int words_per_fifo;
bool sw_done;
};
@@ -1240,17 +1253,29 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
{
unsigned int n_fifos;
+ unsigned int stride_fifos;
+ unsigned int words_per_fifo;
if (sdmac->sw_done)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
- if (sdmac->direction == DMA_DEV_TO_MEM)
+ if (sdmac->direction == DMA_DEV_TO_MEM) {
n_fifos = sdmac->n_fifos_src;
- else
+ stride_fifos = sdmac->stride_fifos_src;
+ } else {
n_fifos = sdmac->n_fifos_dst;
+ stride_fifos = sdmac->stride_fifos_dst;
+ }
+
+ words_per_fifo = sdmac->words_per_fifo;
sdmac->watermark_level |=
FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
+ sdmac->watermark_level |=
+ FIELD_PREP(SDMA_WATERMARK_LEVEL_OFF_FIFOS, stride_fifos);
+ if (words_per_fifo)
+ sdmac->watermark_level |=
+ FIELD_PREP(SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO, (words_per_fifo - 1));
}
static int sdma_config_channel(struct dma_chan *chan)
@@ -1764,6 +1789,9 @@ static int sdma_config(struct dma_chan *chan,
}
sdmac->n_fifos_src = sdmacfg->n_fifos_src;
sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
+ sdmac->stride_fifos_src = sdmacfg->stride_fifos_src;
+ sdmac->stride_fifos_dst = sdmacfg->stride_fifos_dst;
+ sdmac->words_per_fifo = sdmacfg->words_per_fifo;
sdmac->sw_done = sdmacfg->sw_done;
}
@@ -2183,8 +2211,8 @@ static int sdma_probe(struct platform_device *pdev)
if (ret)
goto err_clk;
- ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
- sdma);
+ ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
+ dev_name(&pdev->dev), sdma);
if (ret)
goto err_irq;
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index f8847c48ba03..9ae92b8940ef 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -373,7 +373,7 @@ static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
/*
* free child CVD after completion.
- * the parent CVD would be freeed with desc_free by user.
+ * the parent CVD would be freed with desc_free by user.
*/
if (cvd->parent != cvd)
kfree(cvd);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 9ebd9231f62f..f7717c44b887 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -138,7 +138,7 @@ struct mtk_hsdma_vdesc {
/**
* struct mtk_hsdma_cb - This is the struct holding extra info required for RX
- * ring to know what relevant VD the the PD is being
+ * ring to know what relevant VD the PD is being
* mapped to.
* @vd: Pointer to the relevant VD.
* @flag: Flag indicating what action should be taken when VD
@@ -761,7 +761,7 @@ static void mtk_hsdma_free_active_desc(struct dma_chan *c)
/*
* Once issue_synchronize is being set, which means once the hardware
* consumes all descriptors for the channel in the ring, the
- * synchronization must be be notified immediately it is completed.
+ * synchronization must be notified immediately it is completed.
*/
spin_lock(&hvc->vc.lock);
if (!list_empty(&hvc->desc_hw_processing)) {
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f10b29034da1..f629ef6fd3c2 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -313,7 +313,7 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
- /* assign coookie */
+ /* assign cookie */
spin_lock_bh(&xor_dev->lock);
cookie = dma_cookie_assign(tx);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 1f0bbaed4643..95a462a1f511 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -193,7 +193,7 @@ struct owl_dma_pchan {
/**
* struct owl_dma_pchan - Wrapper for DMA ENGINE channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @pchan: the physical channel utilized by this channel
* @txd: active transaction on this channel
* @cfg: slave configuration for this channel
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 8e14c72d03f0..f6ed7e889781 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -202,7 +202,7 @@ struct s3c24xx_dma_phy {
* struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
* @id: the id of the channel
* @name: name of the channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @phy: the physical channel utilized by this channel, if there is one
* @runtime_addr: address for RX/TX according to the runtime config
* @at: active transaction on this channel
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index db5a4ef76077..4f8b8498c5c6 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -52,16 +52,6 @@ static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->lock, flags);
-
- if (chan->desc && !chan->desc->in_use) {
- spin_unlock_irqrestore(&chan->lock, flags);
- return chan->desc;
- }
-
- spin_unlock_irqrestore(&chan->lock, flags);
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
if (!desc)
@@ -111,7 +101,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
spin_lock_irqsave(&chan->vchan.lock, iflags);
- chan->desc = desc;
sf_pdma_fill_desc(desc, dest, src, len);
spin_unlock_irqrestore(&chan->vchan.lock, iflags);
@@ -170,11 +159,17 @@ static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
unsigned long flags;
u64 residue = 0;
struct sf_pdma_desc *desc;
- struct dma_async_tx_descriptor *tx;
+ struct dma_async_tx_descriptor *tx = NULL;
spin_lock_irqsave(&chan->vchan.lock, flags);
- tx = &chan->desc->vdesc.tx;
+ list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
+ if (vd->tx.cookie == cookie)
+ tx = &vd->tx;
+
+ if (!tx)
+ goto out;
+
if (cookie == tx->chan->completed_cookie)
goto out;
@@ -241,6 +236,19 @@ static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
writel(v, regs->ctrl);
}
+static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
+{
+ struct virt_dma_chan *vchan = &chan->vchan;
+ struct virt_dma_desc *vdesc;
+
+ if (list_empty(&vchan->desc_issued))
+ return NULL;
+
+ vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
+
+ return container_of(vdesc, struct sf_pdma_desc, vdesc);
+}
+
static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc = chan->desc;
@@ -268,8 +276,11 @@ static void sf_pdma_issue_pending(struct dma_chan *dchan)
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (vchan_issue_pending(&chan->vchan) && chan->desc)
+ if (!chan->desc && vchan_issue_pending(&chan->vchan)) {
+ /* vchan_issue_pending has made a check that desc in not NULL */
+ chan->desc = sf_pdma_get_first_pending_desc(chan);
sf_pdma_xfer_desc(chan);
+ }
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
@@ -298,6 +309,11 @@ static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
spin_lock_irqsave(&chan->vchan.lock, flags);
list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
+
+ chan->desc = sf_pdma_get_first_pending_desc(chan);
+ if (chan->desc)
+ sf_pdma_xfer_desc(chan);
+
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index ee2872e7d64c..476847a4916b 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -12,6 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -630,6 +631,21 @@ static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
*/
}
+static void rz_dmac_device_synchronize(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ u32 chstat;
+ int ret;
+
+ ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN),
+ 100, 100000, false, channel, CHSTAT, 1);
+ if (ret < 0)
+ dev_warn(dmac->dev, "DMA Timeout");
+
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+}
+
/*
* -----------------------------------------------------------------------------
* IRQ handling
@@ -909,6 +925,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
engine->device_config = rz_dmac_config;
engine->device_terminate_all = rz_dmac_terminate_all;
engine->device_issue_pending = rz_dmac_issue_pending;
+ engine->device_synchronize = rz_dmac_device_synchronize;
engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
dma_set_max_seg_size(engine->dev, U32_MAX);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 2138b80435ab..474d3ba8ec9f 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1237,11 +1237,8 @@ static int sprd_dma_remove(struct platform_device *pdev)
{
struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
struct sprd_dma_chn *c, *cn;
- int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- return ret;
+ pm_runtime_get_sync(&pdev->dev);
/* explicitly free the irq */
if (sdev->irq > 0)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index e1827393143f..f093e08c23b1 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1970,7 +1970,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
dma_has_cap(DMA_SLAVE, cap)) {
d40c->dma_cfg = dma40_memcpy_conf_phy;
- /* Generate interrrupt at end of transfer or relink. */
+ /* Generate interrupt at end of transfer or relink. */
d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
/* Generate interrupt on error. */
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index caf0cce8f528..b11927ed4367 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1328,12 +1328,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
return IRQ_NONE;
}
id = __ffs(status);
-
chan = &dmadev->chan[id];
- if (!chan) {
- dev_warn(mdma2dev(dmadev), "MDMA channel not initialized\n");
- return IRQ_NONE;
- }
/* Handle interrupt for the channel */
spin_lock(&chan->vchan.lock);
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 93f1645ae928..f291b1b4db32 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -7,6 +7,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
@@ -122,6 +123,15 @@
SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
+/*
+ * Normal DMA supports individual transfers (segments) up to 128k.
+ * Dedicated DMA supports transfers up to 16M. We can only report
+ * one size limit, so we have to use the smaller value.
+ */
+#define SUN4I_NDMA_MAX_SEG_SIZE SZ_128K
+#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
+#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
+
struct sun4i_dma_pchan {
/* Register base of channel */
void __iomem *base;
@@ -155,7 +165,8 @@ struct sun4i_dma_contract {
struct virt_dma_desc vd;
struct list_head demands;
struct list_head completed_demands;
- int is_cyclic;
+ bool is_cyclic : 1;
+ bool use_half_int : 1;
};
struct sun4i_dma_dev {
@@ -372,7 +383,7 @@ static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
if (promise) {
vchan->contract = contract;
vchan->pchan = pchan;
- set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
+ set_pchan_interrupt(priv, pchan, contract->use_half_int, 1);
configure_pchan(pchan, promise);
}
@@ -735,12 +746,21 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
*
* Which requires half the engine programming for the same
* functionality.
+ *
+ * This only works if two periods fit in a single promise. That will
+ * always be the case for dedicated DMA, where the hardware has a much
+ * larger maximum transfer size than advertised to clients.
*/
- nr_periods = DIV_ROUND_UP(len / period_len, 2);
+ if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) {
+ period_len *= 2;
+ contract->use_half_int = 1;
+ }
+
+ nr_periods = DIV_ROUND_UP(len, period_len);
for (i = 0; i < nr_periods; i++) {
/* Calculate the offset in the buffer and the length needed */
- offset = i * period_len * 2;
- plength = min((len - offset), (period_len * 2));
+ offset = i * period_len;
+ plength = min((len - offset), period_len);
if (dir == DMA_MEM_TO_DEV)
src = buf + offset;
else
@@ -1149,6 +1169,8 @@ static int sun4i_dma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
spin_lock_init(&priv->lock);
+ dma_set_max_seg_size(&pdev->dev, SUN4I_DMA_MAX_SEG_SIZE);
+
dma_cap_zero(priv->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 05cd451f541d..fa9bda4a2bc6 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -157,8 +157,8 @@
* If any burst is in flight and DMA paused then this is the time to complete
* on-flight burst and update DMA status register.
*/
-#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20
-#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100
+#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 10
+#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */
/* Channel base address offset from GPCDMA base address */
#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000
@@ -432,6 +432,17 @@ static int tegra_dma_device_resume(struct dma_chan *dc)
return 0;
}
+static inline int tegra_dma_pause_noerr(struct tegra_dma_channel *tdc)
+{
+ /* Return 0 irrespective of PAUSE status.
+ * This is useful to recover channels that can exit out of flush
+ * state when the channel is disabled.
+ */
+
+ tegra_dma_pause(tdc);
+ return 0;
+}
+
static void tegra_dma_disable(struct tegra_dma_channel *tdc)
{
u32 csr, status;
@@ -1292,6 +1303,14 @@ static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
.terminate = tegra_dma_pause,
};
+static const struct tegra_dma_chip_data tegra234_dma_chip_data = {
+ .nr_channels = 31,
+ .channel_reg_size = SZ_64K,
+ .max_dma_count = SZ_1G,
+ .hw_support_pause = true,
+ .terminate = tegra_dma_pause_noerr,
+};
+
static const struct of_device_id tegra_dma_of_match[] = {
{
.compatible = "nvidia,tegra186-gpcdma",
@@ -1300,6 +1319,9 @@ static const struct of_device_id tegra_dma_of_match[] = {
.compatible = "nvidia,tegra194-gpcdma",
.data = &tegra194_dma_chip_data,
}, {
+ .compatible = "nvidia,tegra234-gpcdma",
+ .data = &tegra234_dma_chip_data,
+ }, {
},
};
MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
index 4c4172a4d271..a488c2250623 100644
--- a/drivers/dma/ti/k3-psil-j721s2.c
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -112,6 +112,11 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* MAIN SA2UL */
+ PSIL_SA2UL(0x4a40, 0),
+ PSIL_SA2UL(0x4a41, 0),
+ PSIL_SA2UL(0x4a42, 0),
+ PSIL_SA2UL(0x4a43, 0),
/* CPSW0 */
PSIL_ETHERNET(0x7000),
/* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
@@ -144,6 +149,9 @@ static struct psil_ep j721s2_src_ep_map[] = {
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep j721s2_dst_ep_map[] = {
+ /* MAIN SA2UL */
+ PSIL_SA2UL(0xca40, 1),
+ PSIL_SA2UL(0xca41, 1),
/* CPSW0 */
PSIL_ETHERNET(0xf000),
PSIL_ETHERNET(0xf001),
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index cd62bbb50e8b..6276934d4d2b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2128,126 +2128,6 @@ error:
}
/**
- * xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction
- * @dchan: DMA channel
- * @dst_sg: Destination scatter list
- * @dst_sg_len: Number of entries in destination scatter list
- * @src_sg: Source scatter list
- * @src_sg_len: Number of entries in source scatter list
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg(
- struct dma_chan *dchan, struct scatterlist *dst_sg,
- unsigned int dst_sg_len, struct scatterlist *src_sg,
- unsigned int src_sg_len, unsigned long flags)
-{
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_cdma_tx_segment *segment, *prev = NULL;
- struct xilinx_cdma_desc_hw *hw;
- size_t len, dst_avail, src_avail;
- dma_addr_t dma_dst, dma_src;
-
- if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
- return NULL;
-
- if (unlikely(!dst_sg || !src_sg))
- return NULL;
-
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
-
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
-
- dst_avail = sg_dma_len(dst_sg);
- src_avail = sg_dma_len(src_sg);
- /*
- * loop until there is either no more source or no more destination
- * scatterlist entry
- */
- while (true) {
- len = min_t(size_t, src_avail, dst_avail);
- len = min_t(size_t, len, chan->xdev->max_buffer_len);
- if (len == 0)
- goto fetch;
-
- /* Allocate the link descriptor from DMA pool */
- segment = xilinx_cdma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
-
- dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
- dst_avail;
- dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
- src_avail;
- hw = &segment->hw;
- hw->control = len;
- hw->src_addr = dma_src;
- hw->dest_addr = dma_dst;
- if (chan->ext_addr) {
- hw->src_addr_msb = upper_32_bits(dma_src);
- hw->dest_addr_msb = upper_32_bits(dma_dst);
- }
-
- if (prev) {
- prev->hw.next_desc = segment->phys;
- if (chan->ext_addr)
- prev->hw.next_desc_msb =
- upper_32_bits(segment->phys);
- }
-
- prev = segment;
- dst_avail -= len;
- src_avail -= len;
- list_add_tail(&segment->node, &desc->segments);
-
-fetch:
- /* Fetch the next dst scatterlist entry */
- if (dst_avail == 0) {
- if (dst_sg_len == 0)
- break;
- dst_sg = sg_next(dst_sg);
- if (dst_sg == NULL)
- break;
- dst_sg_len--;
- dst_avail = sg_dma_len(dst_sg);
- }
- /* Fetch the next src scatterlist entry */
- if (src_avail == 0) {
- if (src_sg_len == 0)
- break;
- src_sg = sg_next(src_sg);
- if (src_sg == NULL)
- break;
- src_sg_len--;
- src_avail = sg_dma_len(src_sg);
- }
- }
-
- if (list_empty(&desc->segments)) {
- dev_err(chan->xdev->dev,
- "%s: Zero-size SG transfer requested\n", __func__);
- goto error;
- }
-
- /* Link the last hardware descriptor with the first. */
- segment = list_first_entry(&desc->segments,
- struct xilinx_cdma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
-
- return &desc->async_tx;
-
-error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
-}
-
-/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -3240,9 +3120,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
- dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
- xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg;
/* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b0f4948b00a5..84dc5240a807 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -376,7 +376,7 @@ static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
if (ret < 0)
goto done;
} else {
- strlcpy(kern_buff, "No testcase executed",
+ strscpy(kern_buff, "No testcase executed",
XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
}
@@ -1652,10 +1652,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
dpdma_hw_init(xdev);
xdev->irq = platform_get_irq(pdev, 0);
- if (xdev->irq < 0) {
- dev_err(xdev->dev, "failed to get platform irq\n");
+ if (xdev->irq < 0)
return xdev->irq;
- }
ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
dev_name(xdev->dev), xdev);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index d3e2477948c8..17562cf1fe97 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -263,6 +263,7 @@ config EDAC_I10NM
config EDAC_PND2
tristate "Intel Pondicherry2"
depends on PCI && X86_64 && X86_MCE_INTEL
+ select P2SB if X86
help
Support for error detection and correction on the Intel
Pondicherry2 Integrated Memory Controller. This SoC IP is
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 5bf92298554d..e50d7928bf8f 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -24,6 +24,8 @@
#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include "edac_module.h"
#include "mpc85xx_edac.h"
#include "fsl_ddr_edac.h"
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index c94ca1f790c4..a20b299f1202 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -28,6 +28,8 @@
#include <linux/bitmap.h>
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
+#include <linux/platform_data/x86/p2sb.h>
+
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
@@ -232,42 +234,14 @@ static u64 get_mem_ctrl_hub_base_addr(void)
return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
}
-static u64 get_sideband_reg_base_addr(void)
-{
- struct pci_dev *pdev;
- u32 hi, lo;
- u8 hidden;
-
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
- if (pdev) {
- /* Unhide the P2SB device, if it's hidden */
- pci_read_config_byte(pdev, 0xe1, &hidden);
- if (hidden)
- pci_write_config_byte(pdev, 0xe1, 0);
-
- pci_read_config_dword(pdev, 0x10, &lo);
- pci_read_config_dword(pdev, 0x14, &hi);
- lo &= 0xfffffff0;
-
- /* Hide the P2SB device, if it was hidden before */
- if (hidden)
- pci_write_config_byte(pdev, 0xe1, hidden);
-
- pci_dev_put(pdev);
- return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
- } else {
- return 0xfd000000;
- }
-}
-
#define DNV_MCHBAR_SIZE 0x8000
#define DNV_SB_PORT_SIZE 0x10000
static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
struct pci_dev *pdev;
- char *base;
- u64 addr;
- unsigned long size;
+ void __iomem *base;
+ struct resource r;
+ int ret;
if (op == 4) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
@@ -279,26 +253,30 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
} else {
/* MMIO via memory controller hub base address */
if (op == 0 && port == 0x4c) {
- addr = get_mem_ctrl_hub_base_addr();
- if (!addr)
+ memset(&r, 0, sizeof(r));
+
+ r.start = get_mem_ctrl_hub_base_addr();
+ if (!r.start)
return -ENODEV;
- size = DNV_MCHBAR_SIZE;
+ r.end = r.start + DNV_MCHBAR_SIZE - 1;
} else {
/* MMIO via sideband register base address */
- addr = get_sideband_reg_base_addr();
- if (!addr)
- return -ENODEV;
- addr += (port << 16);
- size = DNV_SB_PORT_SIZE;
+ ret = p2sb_bar(NULL, 0, &r);
+ if (ret)
+ return ret;
+
+ r.start += (port << 16);
+ r.end = r.start + DNV_SB_PORT_SIZE - 1;
}
- base = ioremap((resource_size_t)addr, size);
+ base = ioremap(r.start, resource_size(&r));
if (!base)
return -ENODEV;
if (sz == 8)
- *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
- *(u32 *)data = *(u32 *)(base + off);
+ *(u64 *)data = readq(base + off);
+ else
+ *(u32 *)data = readl(base + off);
iounmap(base);
}
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 6793f6d799e7..0bc670778c99 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/types.h>
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 7dad6f57d970..81cc3d0f6eec 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -2725,6 +2725,9 @@ void cs_dsp_stop(struct cs_dsp *dsp)
mutex_lock(&dsp->pwr_lock);
+ if (dsp->client_ops->pre_stop)
+ dsp->client_ops->pre_stop(dsp);
+
dsp->running = false;
if (dsp->ops->stop_core)
@@ -3177,6 +3180,110 @@ static const struct cs_dsp_ops cs_dsp_halo_ops = {
.stop_core = cs_dsp_halo_stop_core,
};
+/**
+ * cs_dsp_chunk_write() - Format data to a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to write
+ * @val: Value to write
+ *
+ * This function sequentially writes values into the format required for DSP
+ * memory, it handles both inserting of the padding bytes and converting to
+ * big endian. Note that data is only committed to the chunk when a whole DSP
+ * words worth of data is available.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val)
+{
+ int nwrite, i;
+
+ nwrite = min(CS_DSP_DATA_WORD_BITS - ch->cachebits, nbits);
+
+ ch->cache <<= nwrite;
+ ch->cache |= val >> (nbits - nwrite);
+ ch->cachebits += nwrite;
+ nbits -= nwrite;
+
+ if (ch->cachebits == CS_DSP_DATA_WORD_BITS) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache &= 0xFFFFFF;
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ *ch->data++ = (ch->cache & 0xFF000000) >> CS_DSP_DATA_WORD_BITS;
+
+ ch->bytes += sizeof(ch->cache);
+ ch->cachebits = 0;
+ }
+
+ if (nbits)
+ return cs_dsp_chunk_write(ch, nbits, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_write);
+
+/**
+ * cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * As cs_dsp_chunk_write only writes data when a whole DSP word is ready to
+ * be written out it is possible that some data will remain in the cache, this
+ * function will pad that data with zeros upto a whole DSP word and write out.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch)
+{
+ if (!ch->cachebits)
+ return 0;
+
+ return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_flush);
+
+/**
+ * cs_dsp_chunk_read() - Parse data from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to read
+ *
+ * This function sequentially reads values from a DSP memory formatted buffer,
+ * it handles both removing of the padding bytes and converting from big endian.
+ *
+ * Return: A negative number is returned on error, otherwise the read value.
+ */
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
+{
+ int nread, i;
+ u32 result;
+
+ if (!ch->cachebits) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache = 0;
+ ch->cachebits = CS_DSP_DATA_WORD_BITS;
+
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ ch->cache |= *ch->data++;
+
+ ch->bytes += sizeof(ch->cache);
+ }
+
+ nread = min(ch->cachebits, nbits);
+ nbits -= nread;
+
+ result = ch->cache >> ((sizeof(ch->cache) * BITS_PER_BYTE) - nread);
+ ch->cache <<= nread;
+ ch->cachebits -= nread;
+
+ if (nbits)
+ result = (result << nbits) | cs_dsp_chunk_read(ch, nbits);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_read);
+
MODULE_DESCRIPTION("Cirrus Logic DSP Support");
MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f191a1f901ac..0eb6b617f709 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -630,7 +630,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
- dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
+ dmi_ver = get_unaligned_be24(buf + 7);
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 9e85e58d1f27..b450ebf95977 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -8,6 +8,7 @@
#include <asm/efi.h>
#include <asm/sections.h>
+#include <asm/unaligned.h>
#include "efistub.h"
@@ -29,7 +30,7 @@ static int get_boot_hartid_from_fdt(void)
{
const void *fdt;
int chosen_node, len;
- const fdt32_t *prop;
+ const void *prop;
fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
@@ -40,10 +41,16 @@ static int get_boot_hartid_from_fdt(void)
return -EINVAL;
prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
- if (!prop || len != sizeof(u32))
+ if (!prop)
+ return -EINVAL;
+
+ if (len == sizeof(u32))
+ hartid = (unsigned long) fdt32_to_cpu(*(fdt32_t *)prop);
+ else if (len == sizeof(u64))
+ hartid = (unsigned long) fdt64_to_cpu(__get_unaligned_t(fdt64_t, prop));
+ else
return -EINVAL;
- hartid = fdt32_to_cpu(*prop);
return 0;
}
diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c
index cb255a99170c..3c071f814455 100644
--- a/drivers/firmware/mtk-adsp-ipc.c
+++ b/drivers/firmware/mtk-adsp-ipc.c
@@ -12,6 +12,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+static const char * const adsp_mbox_ch_names[MTK_ADSP_MBOX_NUM] = { "rx", "tx" };
+
/*
* mtk_adsp_ipc_send - send ipc cmd to MTK ADSP
*
@@ -72,7 +74,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
struct mtk_adsp_ipc *adsp_ipc;
struct mtk_adsp_chan *adsp_chan;
struct mbox_client *cl;
- char *chan_name;
int ret;
int i, j;
@@ -83,12 +84,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) {
- chan_name = kasprintf(GFP_KERNEL, "mbox%d", i);
- if (!chan_name) {
- ret = -ENOMEM;
- goto out;
- }
-
adsp_chan = &adsp_ipc->chans[i];
cl = &adsp_chan->cl;
cl->dev = dev->parent;
@@ -99,17 +94,20 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
adsp_chan->ipc = adsp_ipc;
adsp_chan->idx = i;
- adsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]);
if (IS_ERR(adsp_chan->ch)) {
ret = PTR_ERR(adsp_chan->ch);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request mbox chan %d ret %d\n",
- i, ret);
- goto out_free;
- }
+ dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ adsp_mbox_ch_names[i], ret);
+
+ for (j = 0; j < i; j++) {
+ adsp_chan = &adsp_ipc->chans[j];
+ mbox_free_channel(adsp_chan->ch);
+ }
- dev_dbg(dev, "request mbox chan %s\n", chan_name);
- kfree(chan_name);
+ return ret;
+ }
}
adsp_ipc->dev = dev;
@@ -117,16 +115,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
dev_dbg(dev, "MTK ADSP IPC initialized\n");
return 0;
-
-out_free:
- kfree(chan_name);
-out:
- for (j = 0; j < i; j++) {
- adsp_chan = &adsp_ipc->chans[j];
- mbox_free_channel(adsp_chan->ch);
- }
-
- return ret;
}
static int mtk_adsp_ipc_remove(struct platform_device *pdev)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b01961999ced..0642f579196f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -544,6 +544,7 @@ config GPIO_SAMA5D2_PIOBU
tristate "SAMA5D2 PIOBU GPIO support"
depends on MFD_SYSCON
depends on OF_GPIO
+ depends on ARCH_AT91 || COMPILE_TEST
select GPIO_SYSCON
help
Say yes here to use the PIOBU pins as GPIOs.
@@ -690,12 +691,6 @@ config GPIO_VISCONTI
help
Say yes here to support GPIO on Tohisba Visconti.
-config GPIO_VR41XX
- tristate "NEC VR4100 series General-purpose I/O Unit support"
- depends on CPU_VR41XX
- help
- Say yes here to support the NEC VR4100 series General-purpose I/O Unit.
-
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
depends on (X86 || COMPILE_TEST) && PCI
@@ -829,11 +824,24 @@ endmenu
menu "Port-mapped I/O GPIO drivers"
depends on X86 # Unconditional I/O space access
+config GPIO_I8255
+ tristate
+ help
+ Enables support for the i8255 interface library functions. The i8255
+ interface library provides functions to facilitate communication with
+ interfaces compatible with the venerable Intel 8255 Programmable
+ Peripheral Interface (PPI). The Intel 8255 PPI chip was first released
+ in the early 1970s but compatible interfaces are nowadays typically
+ found embedded in larger VLSI processing chips and FPGA components.
+
+ If built as a module its name will be gpio-i8255.
+
config GPIO_104_DIO_48E
tristate "ACCES 104-DIO-48E GPIO support"
depends on PC104
select ISA_BUS_API
select GPIOLIB_IRQCHIP
+ select GPIO_I8255
help
Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
104-DIO-24E). The base port addresses for the devices may be
@@ -857,6 +865,7 @@ config GPIO_104_IDI_48
depends on PC104
select ISA_BUS_API
select GPIOLIB_IRQCHIP
+ select GPIO_I8255
help
Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
104-IDI-48AC, 104-IDI-48B, 104-IDI-48BC). The base port addresses for
@@ -877,6 +886,7 @@ config GPIO_GPIO_MM
tristate "Diamond Systems GPIO-MM GPIO support"
depends on PC104
select ISA_BUS_API
+ select GPIO_I8255
help
Enables GPIO support for the Diamond Systems GPIO-MM and GPIO-MM-12.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 14352f6dfe8e..a0985d30f51b 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o
obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o
obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
+obj-$(CONFIG_GPIO_I8255) += gpio-i8255.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IDT3243X) += gpio-idt3243x.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
@@ -169,7 +170,6 @@ obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VIRTIO) += gpio-virtio.o
obj-$(CONFIG_GPIO_VISCONTI) += gpio-visconti.o
-obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WCD934X) += gpio-wcd934x.o
obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index f118ad9bcd33..a41551870759 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -6,8 +6,7 @@
* This driver supports the following ACCES devices: 104-DIO-48E and
* 104-DIO-24E.
*/
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -20,6 +19,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "gpio-i8255.h"
+
+MODULE_IMPORT_NS(I8255);
#define DIO48E_EXTENT 16
#define MAX_NUM_DIO48E max_num_isa_dev(DIO48E_EXTENT)
@@ -33,34 +37,54 @@ static unsigned int irq[MAX_NUM_DIO48E];
module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers");
+#define DIO48E_NUM_PPI 2
+
+/**
+ * struct dio48e_reg - device register structure
+ * @ppi: Programmable Peripheral Interface groups
+ * @enable_buffer: Enable/Disable Buffer groups
+ * @unused1: Unused
+ * @enable_interrupt: Write: Enable Interrupt
+ * Read: Disable Interrupt
+ * @unused2: Unused
+ * @enable_counter: Write: Enable Counter/Timer Addressing
+ * Read: Disable Counter/Timer Addressing
+ * @unused3: Unused
+ * @clear_interrupt: Clear Interrupt
+ */
+struct dio48e_reg {
+ struct i8255 ppi[DIO48E_NUM_PPI];
+ u8 enable_buffer[DIO48E_NUM_PPI];
+ u8 unused1;
+ u8 enable_interrupt;
+ u8 unused2;
+ u8 enable_counter;
+ u8 unused3;
+ u8 clear_interrupt;
+};
+
/**
* struct dio48e_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @io_state: bit I/O state (whether bit is set to input or output)
- * @out_state: output bits state
- * @control: Control registers state
- * @lock: synchronization lock to prevent I/O race conditions
- * @base: base port address of the GPIO device
- * @irq_mask: I/O bits affected by interrupts
+ * @chip: instance of the gpio_chip
+ * @ppi_state: PPI device states
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @reg: I/O address offset for the device registers
+ * @irq_mask: I/O bits affected by interrupts
*/
struct dio48e_gpio {
struct gpio_chip chip;
- unsigned char io_state[6];
- unsigned char out_state[6];
- unsigned char control[2];
+ struct i8255_state ppi_state[DIO48E_NUM_PPI];
raw_spinlock_t lock;
- void __iomem *base;
+ struct dio48e_reg __iomem *reg;
unsigned char irq_mask;
};
static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- if (dio48egpio->io_state[port] & mask)
- return GPIO_LINE_DIRECTION_IN;
+ if (i8255_get_direction(dio48egpio->ppi_state, offset))
+ return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
@@ -68,38 +92,9 @@ static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset
static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- void __iomem *const control_addr = dio48egpio->base + 3 + control_port * 4;
- unsigned long flags;
- unsigned int control;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- dio48egpio->io_state[io_port] |= 0xF0;
- dio48egpio->control[control_port] |= BIT(3);
- } else {
- dio48egpio->io_state[io_port] |= 0x0F;
- dio48egpio->control[control_port] |= BIT(0);
- }
- } else {
- dio48egpio->io_state[io_port] |= 0xFF;
- if (io_port == 0 || io_port == 3)
- dio48egpio->control[control_port] |= BIT(4);
- else
- dio48egpio->control[control_port] |= BIT(1);
- }
-
- control = BIT(7) | dio48egpio->control[control_port];
- iowrite8(control, control_addr);
- control &= ~BIT(7);
- iowrite8(control, control_addr);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ i8255_direction_input(dio48egpio->reg->ppi, dio48egpio->ppi_state,
+ offset);
return 0;
}
@@ -108,48 +103,9 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int off
int value)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- const unsigned int mask = BIT(offset % 8);
- void __iomem *const control_addr = dio48egpio->base + 3 + control_port * 4;
- const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port;
- unsigned long flags;
- unsigned int control;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
-
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- dio48egpio->io_state[io_port] &= 0x0F;
- dio48egpio->control[control_port] &= ~BIT(3);
- } else {
- dio48egpio->io_state[io_port] &= 0xF0;
- dio48egpio->control[control_port] &= ~BIT(0);
- }
- } else {
- dio48egpio->io_state[io_port] &= 0x00;
- if (io_port == 0 || io_port == 3)
- dio48egpio->control[control_port] &= ~BIT(4);
- else
- dio48egpio->control[control_port] &= ~BIT(1);
- }
-
- if (value)
- dio48egpio->out_state[io_port] |= mask;
- else
- dio48egpio->out_state[io_port] &= ~mask;
- control = BIT(7) | dio48egpio->control[control_port];
- iowrite8(control, control_addr);
-
- iowrite8(dio48egpio->out_state[io_port], dio48egpio->base + out_port);
-
- control &= ~BIT(7);
- iowrite8(control, control_addr);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ i8255_direction_output(dio48egpio->reg->ppi, dio48egpio->ppi_state,
+ offset, value);
return 0;
}
@@ -157,47 +113,16 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int off
static int dio48e_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int in_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
- unsigned int port_state;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
- /* ensure that GPIO is set for input */
- if (!(dio48egpio->io_state[port] & mask)) {
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
- return -EINVAL;
- }
-
- port_state = ioread8(dio48egpio->base + in_port);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
-
- return !!(port_state & mask);
+ return i8255_get(dio48egpio->reg->ppi, offset);
}
-static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
-
static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- void __iomem *port_addr;
- unsigned long port_state;
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- port_addr = dio48egpio->base + ports[offset / 8];
- port_state = ioread8(port_addr) & gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
+ i8255_get_multiple(dio48egpio->reg->ppi, mask, bits, chip->ngpio);
return 0;
}
@@ -205,49 +130,17 @@ static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
static void dio48e_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int out_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
-
- if (value)
- dio48egpio->out_state[port] |= mask;
- else
- dio48egpio->out_state[port] &= ~mask;
-
- iowrite8(dio48egpio->out_state[port], dio48egpio->base + out_port);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ i8255_set(dio48egpio->reg->ppi, dio48egpio->ppi_state, offset, value);
}
static void dio48e_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- size_t index;
- void __iomem *port_addr;
- unsigned long bitmask;
- unsigned long flags;
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- index = offset / 8;
- port_addr = dio48egpio->base + ports[index];
-
- bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
- /* update output state data and set device gpio register */
- dio48egpio->out_state[index] &= ~gpio_mask;
- dio48egpio->out_state[index] |= bitmask;
- iowrite8(dio48egpio->out_state[index], port_addr);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
- }
+ i8255_set_multiple(dio48egpio->reg->ppi, dio48egpio->ppi_state, mask,
+ bits, chip->ngpio);
}
static void dio48e_irq_ack(struct irq_data *data)
@@ -274,7 +167,7 @@ static void dio48e_irq_mask(struct irq_data *data)
if (!dio48egpio->irq_mask)
/* disable interrupts */
- ioread8(dio48egpio->base + 0xB);
+ ioread8(&dio48egpio->reg->enable_interrupt);
raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
}
@@ -294,8 +187,8 @@ static void dio48e_irq_unmask(struct irq_data *data)
if (!dio48egpio->irq_mask) {
/* enable interrupts */
- iowrite8(0x00, dio48egpio->base + 0xF);
- iowrite8(0x00, dio48egpio->base + 0xB);
+ iowrite8(0x00, &dio48egpio->reg->clear_interrupt);
+ iowrite8(0x00, &dio48egpio->reg->enable_interrupt);
}
if (offset == 19)
@@ -341,7 +234,7 @@ static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
raw_spin_lock(&dio48egpio->lock);
- iowrite8(0x00, dio48egpio->base + 0xF);
+ iowrite8(0x00, &dio48egpio->reg->clear_interrupt);
raw_spin_unlock(&dio48egpio->lock);
@@ -373,11 +266,26 @@ static int dio48e_irq_init_hw(struct gpio_chip *gc)
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(gc);
/* Disable IRQ by default */
- ioread8(dio48egpio->base + 0xB);
+ ioread8(&dio48egpio->reg->enable_interrupt);
return 0;
}
+static void dio48e_init_ppi(struct i8255 __iomem *const ppi,
+ struct i8255_state *const ppi_state)
+{
+ const unsigned long ngpio = 24;
+ const unsigned long mask = GENMASK(ngpio - 1, 0);
+ const unsigned long bits = 0;
+ unsigned long i;
+
+ /* Initialize all GPIO to output 0 */
+ for (i = 0; i < DIO48E_NUM_PPI; i++) {
+ i8255_mode0_output(&ppi[i]);
+ i8255_set_multiple(&ppi[i], &ppi_state[i], &mask, &bits, ngpio);
+ }
+}
+
static int dio48e_probe(struct device *dev, unsigned int id)
{
struct dio48e_gpio *dio48egpio;
@@ -395,8 +303,8 @@ static int dio48e_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- dio48egpio->base = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
- if (!dio48egpio->base)
+ dio48egpio->reg = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
+ if (!dio48egpio->reg)
return -ENOMEM;
dio48egpio->chip.label = name;
@@ -425,17 +333,8 @@ static int dio48e_probe(struct device *dev, unsigned int id)
raw_spin_lock_init(&dio48egpio->lock);
- /* initialize all GPIO as output */
- iowrite8(0x80, dio48egpio->base + 3);
- iowrite8(0x00, dio48egpio->base);
- iowrite8(0x00, dio48egpio->base + 1);
- iowrite8(0x00, dio48egpio->base + 2);
- iowrite8(0x00, dio48egpio->base + 3);
- iowrite8(0x80, dio48egpio->base + 7);
- iowrite8(0x00, dio48egpio->base + 4);
- iowrite8(0x00, dio48egpio->base + 5);
- iowrite8(0x00, dio48egpio->base + 6);
- iowrite8(0x00, dio48egpio->base + 7);
+ i8255_state_init(dio48egpio->ppi_state, DIO48E_NUM_PPI);
+ dio48e_init_ppi(dio48egpio->reg->ppi, dio48egpio->ppi_state);
err = devm_gpiochip_add_data(dev, &dio48egpio->chip, dio48egpio);
if (err) {
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 9521ece3ebef..40be76efeed7 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -6,8 +6,7 @@
* This driver supports the following ACCES devices: 104-IDI-48A,
* 104-IDI-48AC, 104-IDI-48B, and 104-IDI-48BC.
*/
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -20,6 +19,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "gpio-i8255.h"
+
+MODULE_IMPORT_NS(I8255);
#define IDI_48_EXTENT 8
#define MAX_NUM_IDI_48 max_num_isa_dev(IDI_48_EXTENT)
@@ -34,72 +38,61 @@ module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers");
/**
+ * struct idi_48_reg - device register structure
+ * @port0: Port 0 Inputs
+ * @unused: Unused
+ * @port1: Port 1 Inputs
+ * @irq: Read: IRQ Status Register/IRQ Clear
+ * Write: IRQ Enable/Disable
+ */
+struct idi_48_reg {
+ u8 port0[3];
+ u8 unused;
+ u8 port1[3];
+ u8 irq;
+};
+
+/**
* struct idi_48_gpio - GPIO device private data structure
* @chip: instance of the gpio_chip
* @lock: synchronization lock to prevent I/O race conditions
- * @ack_lock: synchronization lock to prevent IRQ handler race conditions
* @irq_mask: input bits affected by interrupts
- * @base: base port address of the GPIO device
+ * @reg: I/O address offset for the device registers
* @cos_enb: Change-Of-State IRQ enable boundaries mask
*/
struct idi_48_gpio {
struct gpio_chip chip;
- raw_spinlock_t lock;
- spinlock_t ack_lock;
+ spinlock_t lock;
unsigned char irq_mask[6];
- void __iomem *base;
+ struct idi_48_reg __iomem *reg;
unsigned char cos_enb;
};
-static int idi_48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int idi_48_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
return GPIO_LINE_DIRECTION_IN;
}
-static int idi_48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int idi_48_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
return 0;
}
-static int idi_48_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int idi_48_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- unsigned i;
- static const unsigned int register_offset[6] = { 0, 1, 2, 4, 5, 6 };
- void __iomem *port_addr;
- unsigned mask;
-
- for (i = 0; i < 48; i += 8)
- if (offset < i + 8) {
- port_addr = idi48gpio->base + register_offset[i / 8];
- mask = BIT(offset - i);
-
- return !!(ioread8(port_addr) & mask);
- }
+ void __iomem *const ppi = idi48gpio->reg;
- /* The following line should never execute since offset < 48 */
- return 0;
+ return i8255_get(ppi, offset);
}
static int idi_48_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
- void __iomem *port_addr;
- unsigned long port_state;
-
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
+ void __iomem *const ppi = idi48gpio->reg;
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- port_addr = idi48gpio->base + ports[offset / 8];
- port_state = ioread8(port_addr) & gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
+ i8255_get_multiple(ppi, mask, bits, chip->ngpio);
return 0;
}
@@ -112,67 +105,56 @@ static void idi_48_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- const unsigned offset = irqd_to_hwirq(data);
- unsigned i;
- unsigned mask;
- unsigned boundary;
+ const unsigned int offset = irqd_to_hwirq(data);
+ const unsigned long boundary = offset / 8;
+ const unsigned long mask = BIT(offset % 8);
unsigned long flags;
- for (i = 0; i < 48; i += 8)
- if (offset < i + 8) {
- mask = BIT(offset - i);
- boundary = i / 8;
-
- idi48gpio->irq_mask[boundary] &= ~mask;
+ spin_lock_irqsave(&idi48gpio->lock, flags);
- if (!idi48gpio->irq_mask[boundary]) {
- idi48gpio->cos_enb &= ~BIT(boundary);
+ idi48gpio->irq_mask[boundary] &= ~mask;
- raw_spin_lock_irqsave(&idi48gpio->lock, flags);
+ /* Exit early if there are still input lines with IRQ unmasked */
+ if (idi48gpio->irq_mask[boundary])
+ goto exit;
- iowrite8(idi48gpio->cos_enb, idi48gpio->base + 7);
+ idi48gpio->cos_enb &= ~BIT(boundary);
- raw_spin_unlock_irqrestore(&idi48gpio->lock, flags);
- }
+ iowrite8(idi48gpio->cos_enb, &idi48gpio->reg->irq);
- return;
- }
+exit:
+ spin_unlock_irqrestore(&idi48gpio->lock, flags);
}
static void idi_48_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- const unsigned offset = irqd_to_hwirq(data);
- unsigned i;
- unsigned mask;
- unsigned boundary;
- unsigned prev_irq_mask;
+ const unsigned int offset = irqd_to_hwirq(data);
+ const unsigned long boundary = offset / 8;
+ const unsigned long mask = BIT(offset % 8);
+ unsigned int prev_irq_mask;
unsigned long flags;
- for (i = 0; i < 48; i += 8)
- if (offset < i + 8) {
- mask = BIT(offset - i);
- boundary = i / 8;
- prev_irq_mask = idi48gpio->irq_mask[boundary];
+ spin_lock_irqsave(&idi48gpio->lock, flags);
- idi48gpio->irq_mask[boundary] |= mask;
+ prev_irq_mask = idi48gpio->irq_mask[boundary];
- if (!prev_irq_mask) {
- idi48gpio->cos_enb |= BIT(boundary);
+ idi48gpio->irq_mask[boundary] |= mask;
- raw_spin_lock_irqsave(&idi48gpio->lock, flags);
+ /* Exit early if IRQ was already unmasked for this boundary */
+ if (prev_irq_mask)
+ goto exit;
- iowrite8(idi48gpio->cos_enb, idi48gpio->base + 7);
+ idi48gpio->cos_enb |= BIT(boundary);
- raw_spin_unlock_irqrestore(&idi48gpio->lock, flags);
- }
+ iowrite8(idi48gpio->cos_enb, &idi48gpio->reg->irq);
- return;
- }
+exit:
+ spin_unlock_irqrestore(&idi48gpio->lock, flags);
}
-static int idi_48_irq_set_type(struct irq_data *data, unsigned flow_type)
+static int idi_48_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
/* The only valid irq types are none and both-edges */
if (flow_type != IRQ_TYPE_NONE &&
@@ -200,17 +182,13 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
unsigned long gpio;
struct gpio_chip *const chip = &idi48gpio->chip;
- spin_lock(&idi48gpio->ack_lock);
-
- raw_spin_lock(&idi48gpio->lock);
-
- cos_status = ioread8(idi48gpio->base + 7);
+ spin_lock(&idi48gpio->lock);
- raw_spin_unlock(&idi48gpio->lock);
+ cos_status = ioread8(&idi48gpio->reg->irq);
/* IRQ Status (bit 6) is active low (0 = IRQ generated by device) */
if (cos_status & BIT(6)) {
- spin_unlock(&idi48gpio->ack_lock);
+ spin_unlock(&idi48gpio->lock);
return IRQ_NONE;
}
@@ -228,7 +206,7 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
}
}
- spin_unlock(&idi48gpio->ack_lock);
+ spin_unlock(&idi48gpio->lock);
return IRQ_HANDLED;
}
@@ -250,8 +228,8 @@ static int idi_48_irq_init_hw(struct gpio_chip *gc)
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(gc);
/* Disable IRQ by default */
- iowrite8(0, idi48gpio->base + 7);
- ioread8(idi48gpio->base + 7);
+ iowrite8(0, &idi48gpio->reg->irq);
+ ioread8(&idi48gpio->reg->irq);
return 0;
}
@@ -273,8 +251,8 @@ static int idi_48_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- idi48gpio->base = devm_ioport_map(dev, base[id], IDI_48_EXTENT);
- if (!idi48gpio->base)
+ idi48gpio->reg = devm_ioport_map(dev, base[id], IDI_48_EXTENT);
+ if (!idi48gpio->reg)
return -ENOMEM;
idi48gpio->chip.label = name;
@@ -298,8 +276,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
girq->handler = handle_edge_irq;
girq->init_hw = idi_48_irq_init_hw;
- raw_spin_lock_init(&idi48gpio->lock);
- spin_lock_init(&idi48gpio->ack_lock);
+ spin_lock_init(&idi48gpio->lock);
err = devm_gpiochip_add_data(dev, &idi48gpio->chip, idi48gpio);
if (err) {
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 45f7ad8573e1..65a5f581d981 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -6,7 +6,7 @@
* This driver supports the following ACCES devices: 104-IDIO-16,
* 104-IDIO-16E, 104-IDO-16, 104-IDIO-8, 104-IDIO-8E, and 104-IDO-8.
*/
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
#define IDIO_16_EXTENT 8
#define MAX_NUM_IDIO_16 max_num_isa_dev(IDIO_16_EXTENT)
@@ -33,18 +34,41 @@ module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers");
/**
+ * struct idio_16_reg - device registers structure
+ * @out0_7: Read: N/A
+ * Write: FET Drive Outputs 0-7
+ * @in0_7: Read: Isolated Inputs 0-7
+ * Write: Clear Interrupt
+ * @irq_ctl: Read: Enable IRQ
+ * Write: Disable IRQ
+ * @unused: N/A
+ * @out8_15: Read: N/A
+ * Write: FET Drive Outputs 8-15
+ * @in8_15: Read: Isolated Inputs 8-15
+ * Write: N/A
+ */
+struct idio_16_reg {
+ u8 out0_7;
+ u8 in0_7;
+ u8 irq_ctl;
+ u8 unused;
+ u8 out8_15;
+ u8 in8_15;
+};
+
+/**
* struct idio_16_gpio - GPIO device private data structure
* @chip: instance of the gpio_chip
* @lock: synchronization lock to prevent I/O race conditions
* @irq_mask: I/O bits affected by interrupts
- * @base: base port address of the GPIO device
+ * @reg: I/O address offset for the device registers
* @out_state: output bits state
*/
struct idio_16_gpio {
struct gpio_chip chip;
raw_spinlock_t lock;
unsigned long irq_mask;
- void __iomem *base;
+ struct idio_16_reg __iomem *reg;
unsigned int out_state;
};
@@ -79,9 +103,9 @@ static int idio_16_gpio_get(struct gpio_chip *chip, unsigned int offset)
return -EINVAL;
if (offset < 24)
- return !!(ioread8(idio16gpio->base + 1) & mask);
+ return !!(ioread8(&idio16gpio->reg->in0_7) & mask);
- return !!(ioread8(idio16gpio->base + 5) & (mask>>8));
+ return !!(ioread8(&idio16gpio->reg->in8_15) & (mask>>8));
}
static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
@@ -91,9 +115,9 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
*bits = 0;
if (*mask & GENMASK(23, 16))
- *bits |= (unsigned long)ioread8(idio16gpio->base + 1) << 16;
+ *bits |= (unsigned long)ioread8(&idio16gpio->reg->in0_7) << 16;
if (*mask & GENMASK(31, 24))
- *bits |= (unsigned long)ioread8(idio16gpio->base + 5) << 24;
+ *bits |= (unsigned long)ioread8(&idio16gpio->reg->in8_15) << 24;
return 0;
}
@@ -116,9 +140,9 @@ static void idio_16_gpio_set(struct gpio_chip *chip, unsigned int offset,
idio16gpio->out_state &= ~mask;
if (offset > 7)
- iowrite8(idio16gpio->out_state >> 8, idio16gpio->base + 4);
+ iowrite8(idio16gpio->out_state >> 8, &idio16gpio->reg->out8_15);
else
- iowrite8(idio16gpio->out_state, idio16gpio->base);
+ iowrite8(idio16gpio->out_state, &idio16gpio->reg->out0_7);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -135,9 +159,9 @@ static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
idio16gpio->out_state |= *mask & *bits;
if (*mask & 0xFF)
- iowrite8(idio16gpio->out_state, idio16gpio->base);
+ iowrite8(idio16gpio->out_state, &idio16gpio->reg->out0_7);
if ((*mask >> 8) & 0xFF)
- iowrite8(idio16gpio->out_state >> 8, idio16gpio->base + 4);
+ iowrite8(idio16gpio->out_state >> 8, &idio16gpio->reg->out8_15);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -158,7 +182,7 @@ static void idio_16_irq_mask(struct irq_data *data)
if (!idio16gpio->irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
- iowrite8(0, idio16gpio->base + 2);
+ iowrite8(0, &idio16gpio->reg->irq_ctl);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -177,7 +201,7 @@ static void idio_16_irq_unmask(struct irq_data *data)
if (!prev_irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
- ioread8(idio16gpio->base + 2);
+ ioread8(&idio16gpio->reg->irq_ctl);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -212,7 +236,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
raw_spin_lock(&idio16gpio->lock);
- iowrite8(0, idio16gpio->base + 1);
+ iowrite8(0, &idio16gpio->reg->in0_7);
raw_spin_unlock(&idio16gpio->lock);
@@ -232,8 +256,8 @@ static int idio_16_irq_init_hw(struct gpio_chip *gc)
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(gc);
/* Disable IRQ by default */
- iowrite8(0, idio16gpio->base + 2);
- iowrite8(0, idio16gpio->base + 1);
+ iowrite8(0, &idio16gpio->reg->irq_ctl);
+ iowrite8(0, &idio16gpio->reg->in0_7);
return 0;
}
@@ -255,8 +279,8 @@ static int idio_16_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- idio16gpio->base = devm_ioport_map(dev, base[id], IDIO_16_EXTENT);
- if (!idio16gpio->base)
+ idio16gpio->reg = devm_ioport_map(dev, base[id], IDIO_16_EXTENT);
+ if (!idio16gpio->reg)
return -ENOMEM;
idio16gpio->chip.label = name;
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 173e06758e6c..0464f1ecd20d 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -5,15 +5,17 @@
* Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
*/
+#include <linux/bits.h>
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/gpio/driver.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
-#define MMIO_74XX_DIR_IN (0 << 8)
-#define MMIO_74XX_DIR_OUT (1 << 8)
-#define MMIO_74XX_BIT_CNT(x) ((x) & 0xff)
+#define MMIO_74XX_DIR_IN BIT(8)
+#define MMIO_74XX_DIR_OUT BIT(9)
+#define MMIO_74XX_BIT_CNT(x) ((x) & GENMASK(7, 0))
struct mmio_74xx_gpio_priv {
struct gpio_chip gc;
@@ -87,7 +89,10 @@ static int mmio_74xx_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct mmio_74xx_gpio_priv *priv = gpiochip_get_data(gc);
- return (priv->flags & MMIO_74XX_DIR_OUT) ? -ENOTSUPP : 0;
+ if (priv->flags & MMIO_74XX_DIR_IN)
+ return 0;
+
+ return -ENOTSUPP;
}
static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -112,7 +117,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ priv->flags = (uintptr_t)device_get_match_data(&pdev->dev);
dat = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dat))
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index cc349d4e4973..a6439e3daff0 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -6,8 +6,9 @@
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
+#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -485,22 +486,17 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
return 0;
}
-static int adnp_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adnp_i2c_probe(struct i2c_client *client)
{
- struct device_node *np = client->dev.of_node;
+ struct device *dev = &client->dev;
struct adnp *adnp;
u32 num_gpios;
int err;
- err = of_property_read_u32(np, "nr-gpios", &num_gpios);
+ err = device_property_read_u32(dev, "nr-gpios", &num_gpios);
if (err < 0)
return err;
- client->irq = irq_of_parse_and_map(np, 0);
- if (!client->irq)
- return -EPROBE_DEFER;
-
adnp = devm_kzalloc(&client->dev, sizeof(*adnp), GFP_KERNEL);
if (!adnp)
return -ENOMEM;
@@ -508,8 +504,7 @@ static int adnp_i2c_probe(struct i2c_client *client,
mutex_init(&adnp->i2c_lock);
adnp->client = client;
- err = adnp_gpio_setup(adnp, num_gpios,
- of_property_read_bool(np, "interrupt-controller"));
+ err = adnp_gpio_setup(adnp, num_gpios, device_property_read_bool(dev, "interrupt-controller"));
if (err)
return err;
@@ -535,7 +530,7 @@ static struct i2c_driver adnp_i2c_driver = {
.name = "gpio-adnp",
.of_match_table = adnp_of_match,
},
- .probe = adnp_i2c_probe,
+ .probe_new = adnp_i2c_probe,
.id_table = adnp_i2c_id,
};
module_i2c_driver(adnp_i2c_driver);
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index e388e75103f4..d49f12560cde 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -6,20 +6,18 @@
* Copyright 2009-2010 Analog Devices Inc.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/platform_data/adp5588.h>
-#define DRV_NAME "adp5588-gpio"
-
/*
* Early pre 4.0 Silicon required to delay readout by at least 25ms,
* since the Event Counter Register updated 25ms after the interrupt
@@ -422,23 +420,21 @@ static int adp5588_gpio_remove(struct i2c_client *client)
}
static const struct i2c_device_id adp5588_gpio_id[] = {
- {DRV_NAME, 0},
+ { "adp5588-gpio" },
{}
};
MODULE_DEVICE_TABLE(i2c, adp5588_gpio_id);
-#ifdef CONFIG_OF
static const struct of_device_id adp5588_gpio_of_id[] = {
- { .compatible = "adi," DRV_NAME, },
- {},
+ { .compatible = "adi,adp5588-gpio" },
+ {}
};
MODULE_DEVICE_TABLE(of, adp5588_gpio_of_id);
-#endif
static struct i2c_driver adp5588_gpio_driver = {
.driver = {
- .name = DRV_NAME,
- .of_match_table = of_match_ptr(adp5588_gpio_of_id),
+ .name = "adp5588-gpio",
+ .of_match_table = adp5588_gpio_of_id,
},
.probe_new = adp5588_gpio_probe,
.remove = adp5588_gpio_remove,
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 85429dd6b3b6..c55b35da61a0 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -375,12 +375,7 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
{
struct brcmstb_gpio_priv *priv = platform_get_drvdata(pdev);
struct brcmstb_gpio_bank *bank;
- int offset, ret = 0, virq;
-
- if (!priv) {
- dev_err(&pdev->dev, "called %s without drvdata!\n", __func__);
- return -EFAULT;
- }
+ int offset, virq;
if (priv->parent_irq > 0)
irq_set_chained_handler_and_data(priv->parent_irq, NULL, NULL);
@@ -401,7 +396,7 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
list_for_each_entry(bank, &priv->bank_list, node)
gpiochip_remove(&bank->gc);
- return ret;
+ return 0;
}
static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index f960587f86a3..59c4c48d8296 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -22,6 +22,7 @@
#include <linux/platform_data/gpio-davinci.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
#include <asm-generic/gpio.h>
@@ -62,6 +63,8 @@ struct davinci_gpio_controller {
void __iomem *regs[MAX_REGS_BANKS];
int gpio_unbanked;
int irqs[MAX_INT_PER_BANK];
+ struct davinci_gpio_regs context[MAX_REGS_BANKS];
+ u32 binten_context;
};
static inline u32 __gpio_mask(unsigned gpio)
@@ -622,6 +625,85 @@ done:
return 0;
}
+static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
+ u32 nbank)
+{
+ struct davinci_gpio_regs __iomem *g;
+ struct davinci_gpio_regs *context;
+ u32 bank;
+ void __iomem *base;
+
+ base = chips->regs[0] - offset_array[0];
+ chips->binten_context = readl_relaxed(base + BINTEN);
+
+ for (bank = 0; bank < nbank; bank++) {
+ g = chips->regs[bank];
+ context = &chips->context[bank];
+ context->dir = readl_relaxed(&g->dir);
+ context->set_data = readl_relaxed(&g->set_data);
+ context->set_rising = readl_relaxed(&g->set_rising);
+ context->set_falling = readl_relaxed(&g->set_falling);
+ }
+
+ /* Clear Bank interrupt enable bit */
+ writel_relaxed(0, base + BINTEN);
+
+ /* Clear all interrupt status registers */
+ writel_relaxed(GENMASK(31, 0), &g->intstat);
+}
+
+static void davinci_gpio_restore_context(struct davinci_gpio_controller *chips,
+ u32 nbank)
+{
+ struct davinci_gpio_regs __iomem *g;
+ struct davinci_gpio_regs *context;
+ u32 bank;
+ void __iomem *base;
+
+ base = chips->regs[0] - offset_array[0];
+
+ if (readl_relaxed(base + BINTEN) != chips->binten_context)
+ writel_relaxed(chips->binten_context, base + BINTEN);
+
+ for (bank = 0; bank < nbank; bank++) {
+ g = chips->regs[bank];
+ context = &chips->context[bank];
+ if (readl_relaxed(&g->dir) != context->dir)
+ writel_relaxed(context->dir, &g->dir);
+ if (readl_relaxed(&g->set_data) != context->set_data)
+ writel_relaxed(context->set_data, &g->set_data);
+ if (readl_relaxed(&g->set_rising) != context->set_rising)
+ writel_relaxed(context->set_rising, &g->set_rising);
+ if (readl_relaxed(&g->set_falling) != context->set_falling)
+ writel_relaxed(context->set_falling, &g->set_falling);
+ }
+}
+
+static int davinci_gpio_suspend(struct device *dev)
+{
+ struct davinci_gpio_controller *chips = dev_get_drvdata(dev);
+ struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev);
+ u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32);
+
+ davinci_gpio_save_context(chips, nbank);
+
+ return 0;
+}
+
+static int davinci_gpio_resume(struct device *dev)
+{
+ struct davinci_gpio_controller *chips = dev_get_drvdata(dev);
+ struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev);
+ u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32);
+
+ davinci_gpio_restore_context(chips, nbank);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEV_PM_OPS(davinci_gpio_dev_pm_ops, davinci_gpio_suspend,
+ davinci_gpio_resume);
+
static const struct of_device_id davinci_gpio_ids[] = {
{ .compatible = "ti,keystone-gpio", keystone_gpio_get_irq_chip},
{ .compatible = "ti,am654-gpio", keystone_gpio_get_irq_chip},
@@ -634,6 +716,7 @@ static struct platform_driver davinci_gpio_driver = {
.probe = davinci_gpio_probe,
.driver = {
.name = "davinci_gpio",
+ .pm = pm_sleep_ptr(&davinci_gpio_dev_pm_ops),
.of_match_table = of_match_ptr(davinci_gpio_ids),
},
};
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index 097a06463d01..2689671b6b01 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -6,8 +6,6 @@
* This driver supports the following Diamond Systems devices: GPIO-MM and
* GPIO-MM-12.
*/
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -17,7 +15,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/spinlock.h>
+
+#include "gpio-i8255.h"
+
+MODULE_IMPORT_NS(I8255);
#define GPIOMM_EXTENT 8
#define MAX_NUM_GPIOMM max_num_isa_dev(GPIOMM_EXTENT)
@@ -27,32 +28,26 @@ static unsigned int num_gpiomm;
module_param_hw_array(base, uint, ioport, &num_gpiomm, 0);
MODULE_PARM_DESC(base, "Diamond Systems GPIO-MM base addresses");
+#define GPIOMM_NUM_PPI 2
+
/**
* struct gpiomm_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @io_state: bit I/O state (whether bit is set to input or output)
- * @out_state: output bits state
- * @control: Control registers state
- * @lock: synchronization lock to prevent I/O race conditions
- * @base: base port address of the GPIO device
+ * @chip: instance of the gpio_chip
+ * @ppi_state: Programmable Peripheral Interface group states
+ * @ppi: Programmable Peripheral Interface groups
*/
struct gpiomm_gpio {
struct gpio_chip chip;
- unsigned char io_state[6];
- unsigned char out_state[6];
- unsigned char control[2];
- spinlock_t lock;
- void __iomem *base;
+ struct i8255_state ppi_state[GPIOMM_NUM_PPI];
+ struct i8255 __iomem *ppi;
};
static int gpiomm_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- if (gpiommgpio->io_state[port] & mask)
+ if (i8255_get_direction(gpiommgpio->ppi_state, offset))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -62,35 +57,8 @@ static int gpiomm_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- unsigned long flags;
- unsigned int control;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- gpiommgpio->io_state[io_port] |= 0xF0;
- gpiommgpio->control[control_port] |= BIT(3);
- } else {
- gpiommgpio->io_state[io_port] |= 0x0F;
- gpiommgpio->control[control_port] |= BIT(0);
- }
- } else {
- gpiommgpio->io_state[io_port] |= 0xFF;
- if (io_port == 0 || io_port == 3)
- gpiommgpio->control[control_port] |= BIT(4);
- else
- gpiommgpio->control[control_port] |= BIT(1);
- }
- control = BIT(7) | gpiommgpio->control[control_port];
- iowrite8(control, gpiommgpio->base + 3 + control_port*4);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+ i8255_direction_input(gpiommgpio->ppi, gpiommgpio->ppi_state, offset);
return 0;
}
@@ -99,44 +67,9 @@ static int gpiomm_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port;
- unsigned long flags;
- unsigned int control;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- gpiommgpio->io_state[io_port] &= 0x0F;
- gpiommgpio->control[control_port] &= ~BIT(3);
- } else {
- gpiommgpio->io_state[io_port] &= 0xF0;
- gpiommgpio->control[control_port] &= ~BIT(0);
- }
- } else {
- gpiommgpio->io_state[io_port] &= 0x00;
- if (io_port == 0 || io_port == 3)
- gpiommgpio->control[control_port] &= ~BIT(4);
- else
- gpiommgpio->control[control_port] &= ~BIT(1);
- }
-
- if (value)
- gpiommgpio->out_state[io_port] |= mask;
- else
- gpiommgpio->out_state[io_port] &= ~mask;
-
- control = BIT(7) | gpiommgpio->control[control_port];
- iowrite8(control, gpiommgpio->base + 3 + control_port*4);
- iowrite8(gpiommgpio->out_state[io_port], gpiommgpio->base + out_port);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+ i8255_direction_output(gpiommgpio->ppi, gpiommgpio->ppi_state, offset,
+ value);
return 0;
}
@@ -144,47 +77,16 @@ static int gpiomm_gpio_direction_output(struct gpio_chip *chip,
static int gpiomm_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int in_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
- unsigned int port_state;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- /* ensure that GPIO is set for input */
- if (!(gpiommgpio->io_state[port] & mask)) {
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
- return -EINVAL;
- }
-
- port_state = ioread8(gpiommgpio->base + in_port);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
- return !!(port_state & mask);
+ return i8255_get(gpiommgpio->ppi, offset);
}
-static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
-
static int gpiomm_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- void __iomem *port_addr;
- unsigned long port_state;
-
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- port_addr = gpiommgpio->base + ports[offset / 8];
- port_state = ioread8(port_addr) & gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
+ i8255_get_multiple(gpiommgpio->ppi, mask, bits, chip->ngpio);
return 0;
}
@@ -193,49 +95,17 @@ static void gpiomm_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int out_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- if (value)
- gpiommgpio->out_state[port] |= mask;
- else
- gpiommgpio->out_state[port] &= ~mask;
-
- iowrite8(gpiommgpio->out_state[port], gpiommgpio->base + out_port);
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+ i8255_set(gpiommgpio->ppi, gpiommgpio->ppi_state, offset, value);
}
static void gpiomm_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- size_t index;
- void __iomem *port_addr;
- unsigned long bitmask;
- unsigned long flags;
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- index = offset / 8;
- port_addr = gpiommgpio->base + ports[index];
-
- bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
- /* update output state data and set device gpio register */
- gpiommgpio->out_state[index] &= ~gpio_mask;
- gpiommgpio->out_state[index] |= bitmask;
- iowrite8(gpiommgpio->out_state[index], port_addr);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
- }
+ i8255_set_multiple(gpiommgpio->ppi, gpiommgpio->ppi_state, mask, bits,
+ chip->ngpio);
}
#define GPIOMM_NGPIO 48
@@ -250,6 +120,21 @@ static const char *gpiomm_names[GPIOMM_NGPIO] = {
"Port 2C2", "Port 2C3", "Port 2C4", "Port 2C5", "Port 2C6", "Port 2C7",
};
+static void gpiomm_init_dio(struct i8255 __iomem *const ppi,
+ struct i8255_state *const ppi_state)
+{
+ const unsigned long ngpio = 24;
+ const unsigned long mask = GENMASK(ngpio - 1, 0);
+ const unsigned long bits = 0;
+ unsigned long i;
+
+ /* Initialize all GPIO to output 0 */
+ for (i = 0; i < GPIOMM_NUM_PPI; i++) {
+ i8255_mode0_output(&ppi[i]);
+ i8255_set_multiple(&ppi[i], &ppi_state[i], &mask, &bits, ngpio);
+ }
+}
+
static int gpiomm_probe(struct device *dev, unsigned int id)
{
struct gpiomm_gpio *gpiommgpio;
@@ -266,8 +151,8 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- gpiommgpio->base = devm_ioport_map(dev, base[id], GPIOMM_EXTENT);
- if (!gpiommgpio->base)
+ gpiommgpio->ppi = devm_ioport_map(dev, base[id], GPIOMM_EXTENT);
+ if (!gpiommgpio->ppi)
return -ENOMEM;
gpiommgpio->chip.label = name;
@@ -284,7 +169,8 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
gpiommgpio->chip.set = gpiomm_gpio_set;
gpiommgpio->chip.set_multiple = gpiomm_gpio_set_multiple;
- spin_lock_init(&gpiommgpio->lock);
+ i8255_state_init(gpiommgpio->ppi_state, GPIOMM_NUM_PPI);
+ gpiomm_init_dio(gpiommgpio->ppi, gpiommgpio->ppi_state);
err = devm_gpiochip_add_data(dev, &gpiommgpio->chip, gpiommgpio);
if (err) {
@@ -292,16 +178,6 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
return err;
}
- /* initialize all GPIO as output */
- iowrite8(0x80, gpiommgpio->base + 3);
- iowrite8(0x00, gpiommgpio->base);
- iowrite8(0x00, gpiommgpio->base + 1);
- iowrite8(0x00, gpiommgpio->base + 2);
- iowrite8(0x80, gpiommgpio->base + 7);
- iowrite8(0x00, gpiommgpio->base + 4);
- iowrite8(0x00, gpiommgpio->base + 5);
- iowrite8(0x00, gpiommgpio->base + 6);
-
return 0;
}
diff --git a/drivers/gpio/gpio-i8255.c b/drivers/gpio/gpio-i8255.c
new file mode 100644
index 000000000000..9b97db418df1
--- /dev/null
+++ b/drivers/gpio/gpio-i8255.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel 8255 Programmable Peripheral Interface
+ * Copyright (C) 2022 William Breathitt Gray
+ */
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "gpio-i8255.h"
+
+#define I8255_CONTROL_PORTC_LOWER_DIRECTION BIT(0)
+#define I8255_CONTROL_PORTB_DIRECTION BIT(1)
+#define I8255_CONTROL_PORTC_UPPER_DIRECTION BIT(3)
+#define I8255_CONTROL_PORTA_DIRECTION BIT(4)
+#define I8255_CONTROL_MODE_SET BIT(7)
+#define I8255_PORTA 0
+#define I8255_PORTB 1
+#define I8255_PORTC 2
+
+static int i8255_get_port(struct i8255 __iomem *const ppi,
+ const unsigned long io_port, const unsigned long mask)
+{
+ const unsigned long bank = io_port / 3;
+ const unsigned long ppi_port = io_port % 3;
+
+ return ioread8(&ppi[bank].port[ppi_port]) & mask;
+}
+
+static u8 i8255_direction_mask(const unsigned long offset)
+{
+ const unsigned long port_offset = offset % 8;
+ const unsigned long io_port = offset / 8;
+ const unsigned long ppi_port = io_port % 3;
+
+ switch (ppi_port) {
+ case I8255_PORTA:
+ return I8255_CONTROL_PORTA_DIRECTION;
+ case I8255_PORTB:
+ return I8255_CONTROL_PORTB_DIRECTION;
+ case I8255_PORTC:
+ /* Port C can be configured by nibble */
+ if (port_offset >= 4)
+ return I8255_CONTROL_PORTC_UPPER_DIRECTION;
+ return I8255_CONTROL_PORTC_LOWER_DIRECTION;
+ default:
+ /* Should never reach this path */
+ return 0;
+ }
+}
+
+static void i8255_set_port(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long io_port,
+ const unsigned long mask, const unsigned long bits)
+{
+ const unsigned long bank = io_port / 3;
+ const unsigned long ppi_port = io_port % 3;
+ unsigned long flags;
+ unsigned long out_state;
+
+ spin_lock_irqsave(&state[bank].lock, flags);
+
+ out_state = ioread8(&ppi[bank].port[ppi_port]);
+ out_state = (out_state & ~mask) | (bits & mask);
+ iowrite8(out_state, &ppi[bank].port[ppi_port]);
+
+ spin_unlock_irqrestore(&state[bank].lock, flags);
+}
+
+/**
+ * i8255_direction_input - configure signal offset as input
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @offset: signal offset to configure as input
+ *
+ * Configures a signal @offset as input for the respective Intel 8255
+ * Programmable Peripheral Interface (@ppi) banks. The @state control_state
+ * values are updated to reflect the new configuration.
+ */
+void i8255_direction_input(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long offset)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long bank = io_port / 3;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state[bank].lock, flags);
+
+ state[bank].control_state |= I8255_CONTROL_MODE_SET;
+ state[bank].control_state |= i8255_direction_mask(offset);
+
+ iowrite8(state[bank].control_state, &ppi[bank].control);
+
+ spin_unlock_irqrestore(&state[bank].lock, flags);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_direction_input, I8255);
+
+/**
+ * i8255_direction_output - configure signal offset as output
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @offset: signal offset to configure as output
+ * @value: signal value to output
+ *
+ * Configures a signal @offset as output for the respective Intel 8255
+ * Programmable Peripheral Interface (@ppi) banks and sets the respective signal
+ * output to the desired @value. The @state control_state values are updated to
+ * reflect the new configuration.
+ */
+void i8255_direction_output(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long offset,
+ const unsigned long value)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long bank = io_port / 3;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state[bank].lock, flags);
+
+ state[bank].control_state |= I8255_CONTROL_MODE_SET;
+ state[bank].control_state &= ~i8255_direction_mask(offset);
+
+ iowrite8(state[bank].control_state, &ppi[bank].control);
+
+ spin_unlock_irqrestore(&state[bank].lock, flags);
+
+ i8255_set(ppi, state, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_direction_output, I8255);
+
+/**
+ * i8255_get - get signal value at signal offset
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @offset: offset of signal to get
+ *
+ * Returns the signal value (0=low, 1=high) for the signal at @offset for the
+ * respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
+ */
+int i8255_get(struct i8255 __iomem *const ppi, const unsigned long offset)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long offset_mask = BIT(offset % 8);
+
+ return !!i8255_get_port(ppi, io_port, offset_mask);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_get, I8255);
+
+/**
+ * i8255_get_direction - get the I/O direction for a signal offset
+ * @state: devices states of the respective PPI banks
+ * @offset: offset of signal to get direction
+ *
+ * Returns the signal direction (0=output, 1=input) for the signal at @offset.
+ */
+int i8255_get_direction(const struct i8255_state *const state,
+ const unsigned long offset)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long bank = io_port / 3;
+
+ return !!(state[bank].control_state & i8255_direction_mask(offset));
+}
+EXPORT_SYMBOL_NS_GPL(i8255_get_direction, I8255);
+
+/**
+ * i8255_get_multiple - get multiple signal values at multiple signal offsets
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @mask: mask of signals to get
+ * @bits: bitmap to store signal values
+ * @ngpio: number of GPIO signals of the respective PPI banks
+ *
+ * Stores in @bits the values (0=low, 1=high) for the signals defined by @mask
+ * for the respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
+ */
+void i8255_get_multiple(struct i8255 __iomem *const ppi,
+ const unsigned long *const mask,
+ unsigned long *const bits, const unsigned long ngpio)
+{
+ unsigned long offset;
+ unsigned long port_mask;
+ unsigned long io_port;
+ unsigned long port_state;
+
+ bitmap_zero(bits, ngpio);
+
+ for_each_set_clump8(offset, port_mask, mask, ngpio) {
+ io_port = offset / 8;
+ port_state = i8255_get_port(ppi, io_port, port_mask);
+
+ bitmap_set_value8(bits, port_state, offset);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(i8255_get_multiple, I8255);
+
+/**
+ * i8255_mode0_output - configure all PPI ports to MODE 0 output mode
+ * @ppi: Intel 8255 Programmable Peripheral Interface bank
+ *
+ * Configures all Intel 8255 Programmable Peripheral Interface (@ppi) ports to
+ * MODE 0 (Basic Input/Output) output mode.
+ */
+void i8255_mode0_output(struct i8255 __iomem *const ppi)
+{
+ iowrite8(I8255_CONTROL_MODE_SET, &ppi->control);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_mode0_output, I8255);
+
+/**
+ * i8255_set - set signal value at signal offset
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @offset: offset of signal to set
+ * @value: value of signal to set
+ *
+ * Assigns output @value for the signal at @offset for the respective Intel 8255
+ * Programmable Peripheral Interface (@ppi) banks.
+ */
+void i8255_set(struct i8255 __iomem *const ppi, struct i8255_state *const state,
+ const unsigned long offset, const unsigned long value)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long port_offset = offset % 8;
+ const unsigned long mask = BIT(port_offset);
+ const unsigned long bits = value << port_offset;
+
+ i8255_set_port(ppi, state, io_port, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_set, I8255);
+
+/**
+ * i8255_set_multiple - set signal values at multiple signal offsets
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @mask: mask of signals to set
+ * @bits: bitmap of signal output values
+ * @ngpio: number of GPIO signals of the respective PPI banks
+ *
+ * Assigns output values defined by @bits for the signals defined by @mask for
+ * the respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
+ */
+void i8255_set_multiple(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long *const mask,
+ const unsigned long *const bits,
+ const unsigned long ngpio)
+{
+ unsigned long offset;
+ unsigned long port_mask;
+ unsigned long io_port;
+ unsigned long value;
+
+ for_each_set_clump8(offset, port_mask, mask, ngpio) {
+ io_port = offset / 8;
+ value = bitmap_get_value8(bits, offset);
+ i8255_set_port(ppi, state, io_port, port_mask, value);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(i8255_set_multiple, I8255);
+
+/**
+ * i8255_state_init - initialize i8255_state structure
+ * @state: devices states of the respective PPI banks
+ * @nbanks: number of Intel 8255 Programmable Peripheral Interface banks
+ *
+ * Initializes the @state of each Intel 8255 Programmable Peripheral Interface
+ * bank for use in i8255 library functions.
+ */
+void i8255_state_init(struct i8255_state *const state,
+ const unsigned long nbanks)
+{
+ unsigned long bank;
+
+ for (bank = 0; bank < nbanks; bank++)
+ spin_lock_init(&state[bank].lock);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_state_init, I8255);
+
+MODULE_AUTHOR("William Breathitt Gray");
+MODULE_DESCRIPTION("Intel 8255 Programmable Peripheral Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-i8255.h b/drivers/gpio/gpio-i8255.h
new file mode 100644
index 000000000000..d9084aae9446
--- /dev/null
+++ b/drivers/gpio/gpio-i8255.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2022 William Breathitt Gray */
+#ifndef _I8255_H_
+#define _I8255_H_
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * struct i8255 - Intel 8255 register structure
+ * @port: Port A, B, and C
+ * @control: Control register
+ */
+struct i8255 {
+ u8 port[3];
+ u8 control;
+};
+
+/**
+ * struct i8255_state - Intel 8255 state structure
+ * @lock: synchronization lock for accessing device state
+ * @control_state: Control register state
+ */
+struct i8255_state {
+ spinlock_t lock;
+ u8 control_state;
+};
+
+void i8255_direction_input(struct i8255 __iomem *ppi, struct i8255_state *state,
+ unsigned long offset);
+void i8255_direction_output(struct i8255 __iomem *ppi,
+ struct i8255_state *state, unsigned long offset,
+ unsigned long value);
+int i8255_get(struct i8255 __iomem *ppi, unsigned long offset);
+int i8255_get_direction(const struct i8255_state *state, unsigned long offset);
+void i8255_get_multiple(struct i8255 __iomem *ppi, const unsigned long *mask,
+ unsigned long *bits, unsigned long ngpio);
+void i8255_mode0_output(struct i8255 __iomem *const ppi);
+void i8255_set(struct i8255 __iomem *ppi, struct i8255_state *state,
+ unsigned long offset, unsigned long value);
+void i8255_set_multiple(struct i8255 __iomem *ppi, struct i8255_state *state,
+ const unsigned long *mask, const unsigned long *bits,
+ unsigned long ngpio);
+void i8255_state_init(struct i8255_state *const state, unsigned long nbanks);
+
+#endif /* _I8255_H_ */
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 8a30fb185aab..79edd5db49d2 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -42,7 +42,7 @@ struct lp3943_gpio {
u16 input_mask; /* 1 = GPIO is input direction, 0 = output */
};
-static int lp3943_gpio_request(struct gpio_chip *chip, unsigned offset)
+static int lp3943_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
struct lp3943 *lp3943 = lp3943_gpio->lp3943;
@@ -54,7 +54,7 @@ static int lp3943_gpio_request(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void lp3943_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void lp3943_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
struct lp3943 *lp3943 = lp3943_gpio->lp3943;
@@ -72,7 +72,7 @@ static int lp3943_gpio_set_mode(struct lp3943_gpio *lp3943_gpio, u8 offset,
val << mux[offset].shift);
}
-static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
@@ -82,7 +82,7 @@ static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
}
static int lp3943_get_gpio_in_status(struct lp3943_gpio *lp3943_gpio,
- struct gpio_chip *chip, unsigned offset)
+ struct gpio_chip *chip, unsigned int offset)
{
u8 addr, read;
int err;
@@ -107,7 +107,7 @@ static int lp3943_get_gpio_in_status(struct lp3943_gpio *lp3943_gpio,
}
static int lp3943_get_gpio_out_status(struct lp3943_gpio *lp3943_gpio,
- struct gpio_chip *chip, unsigned offset)
+ struct gpio_chip *chip, unsigned int offset)
{
struct lp3943 *lp3943 = lp3943_gpio->lp3943;
const struct lp3943_reg_cfg *mux = lp3943->mux_cfg;
@@ -128,7 +128,7 @@ static int lp3943_get_gpio_out_status(struct lp3943_gpio *lp3943_gpio,
return -EINVAL;
}
-static int lp3943_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int lp3943_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
@@ -147,7 +147,7 @@ static int lp3943_gpio_get(struct gpio_chip *chip, unsigned offset)
return lp3943_get_gpio_out_status(lp3943_gpio, chip, offset);
}
-static void lp3943_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
u8 data;
@@ -160,7 +160,7 @@ static void lp3943_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
lp3943_gpio_set_mode(lp3943_gpio, offset, data);
}
-static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index cb2b2f735c15..ab2a652964ec 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -121,12 +121,14 @@ static int pca9570_probe(struct i2c_client *client)
static const struct i2c_device_id pca9570_id_table[] = {
{ "pca9570", 4 },
+ { "pca9571", 8 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, pca9570_id_table);
static const struct of_device_id pca9570_of_match_table[] = {
{ .compatible = "nxp,pca9570", .data = (void *)4 },
+ { .compatible = "nxp,pca9571", .data = (void *)8 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pca9570_of_match_table);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 3a0bd8795741..ee37ecb615cb 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -37,6 +37,11 @@ struct pch_regs {
u32 reset;
};
+#define PCI_DEVICE_ID_INTEL_EG20T_PCH 0x8803
+#define PCI_DEVICE_ID_ROHM_ML7223m_IOH 0x8014
+#define PCI_DEVICE_ID_ROHM_ML7223n_IOH 0x8043
+#define PCI_DEVICE_ID_ROHM_EG20T_PCH 0x8803
+
enum pch_type_t {
INTEL_EG20T_PCH,
OKISEMI_ML7223m_IOH, /* LAPIS Semiconductor ML7223 IOH PCIe Bus-m */
@@ -357,16 +362,12 @@ static int pch_gpio_probe(struct pci_dev *pdev,
chip->dev = dev;
ret = pcim_enable_device(pdev);
- if (ret) {
- dev_err(dev, "pci_enable_device FAILED");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable PCI device\n");
ret = pcim_iomap_regions(pdev, BIT(1), KBUILD_MODNAME);
- if (ret) {
- dev_err(dev, "pci_request_regions FAILED-%d", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request and map PCI regions\n");
chip->base = pcim_iomap_table(pdev)[1];
chip->ioh = id->driver_data;
@@ -376,10 +377,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
pch_gpio_setup(chip);
ret = devm_gpiochip_add_data(dev, &chip->gpio, chip);
- if (ret) {
- dev_err(dev, "PCH gpio: Failed to register GPIO\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register GPIO\n");
irq_base = devm_irq_alloc_descs(dev, -1, 0,
gpio_pins[chip->ioh], NUMA_NO_NODE);
@@ -396,10 +395,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
ret = devm_request_irq(dev, pdev->irq, pch_gpio_handler,
IRQF_SHARED, KBUILD_MODNAME, chip);
- if (ret) {
- dev_err(dev, "request_irq failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request IRQ\n");
return pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
}
@@ -433,15 +430,11 @@ static int __maybe_unused pch_gpio_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pch_gpio_pm_ops, pch_gpio_suspend, pch_gpio_resume);
static const struct pci_device_id pch_gpio_pcidev_id[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803),
- .driver_data = INTEL_EG20T_PCH },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014),
- .driver_data = OKISEMI_ML7223m_IOH },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043),
- .driver_data = OKISEMI_ML7223n_IOH },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803),
- .driver_data = INTEL_EG20T_PCH },
- { 0, }
+ { PCI_DEVICE_DATA(INTEL, EG20T_PCH, INTEL_EG20T_PCH) },
+ { PCI_DEVICE_DATA(ROHM, ML7223m_IOH, OKISEMI_ML7223m_IOH) },
+ { PCI_DEVICE_DATA(ROHM, ML7223n_IOH, OKISEMI_ML7223n_IOH) },
+ { PCI_DEVICE_DATA(ROHM, EG20T_PCH, INTEL_EG20T_PCH) },
+ { }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index e342a6dc4c6c..f91e876fd969 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -27,6 +27,7 @@
#define GPIO_TYPE_V1 (0) /* GPIO Version ID reserved */
#define GPIO_TYPE_V2 (0x01000C2B) /* GPIO Version ID 0x01000C2B */
+#define GPIO_TYPE_V2_1 (0x0101157C) /* GPIO Version ID 0x0101157C */
static const struct rockchip_gpio_regs gpio_regs_v1 = {
.port_dr = 0x00,
@@ -664,7 +665,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank)
id = readl(bank->reg_base + gpio_regs_v2.version_id);
/* If not gpio v2, that is default to v1. */
- if (id == GPIO_TYPE_V2) {
+ if (id == GPIO_TYPE_V2 || id == GPIO_TYPE_V2_1) {
bank->gpio_regs = &gpio_regs_v2;
bank->gpio_type = GPIO_TYPE_V2;
bank->db_clk = of_clk_get(bank->of_node, 1);
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index de249726230e..5046e51af8df 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -593,27 +593,13 @@ out:
/* Cannot use as gpio_twl4030_probe() calls us */
static int gpio_twl4030_remove(struct platform_device *pdev)
{
- struct twl4030_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct gpio_twl4030_priv *priv = platform_get_drvdata(pdev);
- int status;
-
- if (pdata && pdata->teardown) {
- status = pdata->teardown(&pdev->dev, priv->gpio_chip.base,
- TWL4030_GPIO_MAX);
- if (status) {
- dev_dbg(&pdev->dev, "teardown --> %d\n", status);
- return status;
- }
- }
gpiochip_remove(&priv->gpio_chip);
- if (is_module())
- return 0;
-
/* REVISIT no support yet for deregistering all the IRQs */
- WARN_ON(1);
- return -EIO;
+ WARN_ON(!is_module());
+ return 0;
}
static const struct of_device_id twl_gpio_match[] = {
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index d2a8644864c3..386e69300332 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -64,34 +64,14 @@ static int ucb1400_gpio_probe(struct platform_device *dev)
ucb->gc.can_sleep = true;
err = devm_gpiochip_add_data(&dev->dev, &ucb->gc, ucb);
- if (err)
- goto err;
-
- if (ucb->gpio_setup)
- err = ucb->gpio_setup(&dev->dev, ucb->gc.ngpio);
err:
return err;
}
-static int ucb1400_gpio_remove(struct platform_device *dev)
-{
- int err = 0;
- struct ucb1400_gpio *ucb = platform_get_drvdata(dev);
-
- if (ucb && ucb->gpio_teardown) {
- err = ucb->gpio_teardown(&dev->dev, ucb->gc.ngpio);
- if (err)
- return err;
- }
-
- return err;
-}
-
static struct platform_driver ucb1400_gpio_driver = {
.probe = ucb1400_gpio_probe,
- .remove = ucb1400_gpio_remove,
.driver = {
.name = "ucb1400_gpio"
},
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
deleted file mode 100644
index 8d09b619c166..000000000000
--- a/drivers/gpio/gpio-vr41xx.c
+++ /dev/null
@@ -1,541 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for NEC VR4100 series General-purpose I/O Unit.
- *
- * Copyright (C) 2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <source@mvista.com>
- * Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <asm/vr41xx/giu.h>
-#include <asm/vr41xx/irq.h>
-#include <asm/vr41xx/vr41xx.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
-MODULE_LICENSE("GPL");
-
-#define GIUIOSELL 0x00
-#define GIUIOSELH 0x02
-#define GIUPIODL 0x04
-#define GIUPIODH 0x06
-#define GIUINTSTATL 0x08
-#define GIUINTSTATH 0x0a
-#define GIUINTENL 0x0c
-#define GIUINTENH 0x0e
-#define GIUINTTYPL 0x10
-#define GIUINTTYPH 0x12
-#define GIUINTALSELL 0x14
-#define GIUINTALSELH 0x16
-#define GIUINTHTSELL 0x18
-#define GIUINTHTSELH 0x1a
-#define GIUPODATL 0x1c
-#define GIUPODATEN 0x1c
-#define GIUPODATH 0x1e
- #define PIOEN0 0x0100
- #define PIOEN1 0x0200
-#define GIUPODAT 0x1e
-#define GIUFEDGEINHL 0x20
-#define GIUFEDGEINHH 0x22
-#define GIUREDGEINHL 0x24
-#define GIUREDGEINHH 0x26
-
-#define GIUUSEUPDN 0x1e0
-#define GIUTERMUPDN 0x1e2
-
-#define GPIO_HAS_PULLUPDOWN_IO 0x0001
-#define GPIO_HAS_OUTPUT_ENABLE 0x0002
-#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
-
-enum {
- GPIO_INPUT,
- GPIO_OUTPUT,
-};
-
-static DEFINE_SPINLOCK(giu_lock);
-static unsigned long giu_flags;
-
-static void __iomem *giu_base;
-static struct gpio_chip vr41xx_gpio_chip;
-
-#define giu_read(offset) readw(giu_base + (offset))
-#define giu_write(offset, value) writew((value), giu_base + (offset))
-
-#define GPIO_PIN_OF_IRQ(irq) ((irq) - GIU_IRQ_BASE)
-#define GIUINT_HIGH_OFFSET 16
-#define GIUINT_HIGH_MAX 32
-
-static inline u16 giu_set(u16 offset, u16 set)
-{
- u16 data;
-
- data = giu_read(offset);
- data |= set;
- giu_write(offset, data);
-
- return data;
-}
-
-static inline u16 giu_clear(u16 offset, u16 clear)
-{
- u16 data;
-
- data = giu_read(offset);
- data &= ~clear;
- giu_write(offset, data);
-
- return data;
-}
-
-static void ack_giuint_low(struct irq_data *d)
-{
- giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(d->irq));
-}
-
-static void mask_giuint_low(struct irq_data *d)
-{
- giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
-}
-
-static void mask_ack_giuint_low(struct irq_data *d)
-{
- unsigned int pin;
-
- pin = GPIO_PIN_OF_IRQ(d->irq);
- giu_clear(GIUINTENL, 1 << pin);
- giu_write(GIUINTSTATL, 1 << pin);
-}
-
-static void unmask_giuint_low(struct irq_data *d)
-{
- giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
-}
-
-static unsigned int startup_giuint(struct irq_data *data)
-{
- int ret;
-
- ret = gpiochip_lock_as_irq(&vr41xx_gpio_chip, irqd_to_hwirq(data));
- if (ret) {
- dev_err(vr41xx_gpio_chip.parent,
- "unable to lock HW IRQ %lu for IRQ\n",
- data->hwirq);
- return ret;
- }
-
- /* Satisfy the .enable semantics by unmasking the line */
- unmask_giuint_low(data);
- return 0;
-}
-
-static void shutdown_giuint(struct irq_data *data)
-{
- mask_giuint_low(data);
- gpiochip_unlock_as_irq(&vr41xx_gpio_chip, data->hwirq);
-}
-
-static struct irq_chip giuint_low_irq_chip = {
- .name = "GIUINTL",
- .irq_ack = ack_giuint_low,
- .irq_mask = mask_giuint_low,
- .irq_mask_ack = mask_ack_giuint_low,
- .irq_unmask = unmask_giuint_low,
- .irq_startup = startup_giuint,
- .irq_shutdown = shutdown_giuint,
-};
-
-static void ack_giuint_high(struct irq_data *d)
-{
- giu_write(GIUINTSTATH,
- 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
-}
-
-static void mask_giuint_high(struct irq_data *d)
-{
- giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
-}
-
-static void mask_ack_giuint_high(struct irq_data *d)
-{
- unsigned int pin;
-
- pin = GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET;
- giu_clear(GIUINTENH, 1 << pin);
- giu_write(GIUINTSTATH, 1 << pin);
-}
-
-static void unmask_giuint_high(struct irq_data *d)
-{
- giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
-}
-
-static struct irq_chip giuint_high_irq_chip = {
- .name = "GIUINTH",
- .irq_ack = ack_giuint_high,
- .irq_mask = mask_giuint_high,
- .irq_mask_ack = mask_ack_giuint_high,
- .irq_unmask = unmask_giuint_high,
-};
-
-static int giu_get_irq(unsigned int irq)
-{
- u16 pendl, pendh, maskl, maskh;
- int i;
-
- pendl = giu_read(GIUINTSTATL);
- pendh = giu_read(GIUINTSTATH);
- maskl = giu_read(GIUINTENL);
- maskh = giu_read(GIUINTENH);
-
- maskl &= pendl;
- maskh &= pendh;
-
- if (maskl) {
- for (i = 0; i < 16; i++) {
- if (maskl & (1 << i))
- return GIU_IRQ(i);
- }
- } else if (maskh) {
- for (i = 0; i < 16; i++) {
- if (maskh & (1 << i))
- return GIU_IRQ(i + GIUINT_HIGH_OFFSET);
- }
- }
-
- printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
- maskl, pendl, maskh, pendh);
-
- return -EINVAL;
-}
-
-void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
- irq_signal_t signal)
-{
- u16 mask;
-
- if (pin < GIUINT_HIGH_OFFSET) {
- mask = 1 << pin;
- if (trigger != IRQ_TRIGGER_LEVEL) {
- giu_set(GIUINTTYPL, mask);
- if (signal == IRQ_SIGNAL_HOLD)
- giu_set(GIUINTHTSELL, mask);
- else
- giu_clear(GIUINTHTSELL, mask);
- if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
- switch (trigger) {
- case IRQ_TRIGGER_EDGE_FALLING:
- giu_set(GIUFEDGEINHL, mask);
- giu_clear(GIUREDGEINHL, mask);
- break;
- case IRQ_TRIGGER_EDGE_RISING:
- giu_clear(GIUFEDGEINHL, mask);
- giu_set(GIUREDGEINHL, mask);
- break;
- default:
- giu_set(GIUFEDGEINHL, mask);
- giu_set(GIUREDGEINHL, mask);
- break;
- }
- }
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_low_irq_chip,
- handle_edge_irq);
- } else {
- giu_clear(GIUINTTYPL, mask);
- giu_clear(GIUINTHTSELL, mask);
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_low_irq_chip,
- handle_level_irq);
- }
- giu_write(GIUINTSTATL, mask);
- } else if (pin < GIUINT_HIGH_MAX) {
- mask = 1 << (pin - GIUINT_HIGH_OFFSET);
- if (trigger != IRQ_TRIGGER_LEVEL) {
- giu_set(GIUINTTYPH, mask);
- if (signal == IRQ_SIGNAL_HOLD)
- giu_set(GIUINTHTSELH, mask);
- else
- giu_clear(GIUINTHTSELH, mask);
- if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
- switch (trigger) {
- case IRQ_TRIGGER_EDGE_FALLING:
- giu_set(GIUFEDGEINHH, mask);
- giu_clear(GIUREDGEINHH, mask);
- break;
- case IRQ_TRIGGER_EDGE_RISING:
- giu_clear(GIUFEDGEINHH, mask);
- giu_set(GIUREDGEINHH, mask);
- break;
- default:
- giu_set(GIUFEDGEINHH, mask);
- giu_set(GIUREDGEINHH, mask);
- break;
- }
- }
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_high_irq_chip,
- handle_edge_irq);
- } else {
- giu_clear(GIUINTTYPH, mask);
- giu_clear(GIUINTHTSELH, mask);
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_high_irq_chip,
- handle_level_irq);
- }
- giu_write(GIUINTSTATH, mask);
- }
-}
-EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
-
-void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
-{
- u16 mask;
-
- if (pin < GIUINT_HIGH_OFFSET) {
- mask = 1 << pin;
- if (level == IRQ_LEVEL_HIGH)
- giu_set(GIUINTALSELL, mask);
- else
- giu_clear(GIUINTALSELL, mask);
- giu_write(GIUINTSTATL, mask);
- } else if (pin < GIUINT_HIGH_MAX) {
- mask = 1 << (pin - GIUINT_HIGH_OFFSET);
- if (level == IRQ_LEVEL_HIGH)
- giu_set(GIUINTALSELH, mask);
- else
- giu_clear(GIUINTALSELH, mask);
- giu_write(GIUINTSTATH, mask);
- }
-}
-EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
-
-static int giu_set_direction(struct gpio_chip *chip, unsigned pin, int dir)
-{
- u16 offset, mask, reg;
- unsigned long flags;
-
- if (pin >= chip->ngpio)
- return -EINVAL;
-
- if (pin < 16) {
- offset = GIUIOSELL;
- mask = 1 << pin;
- } else if (pin < 32) {
- offset = GIUIOSELH;
- mask = 1 << (pin - 16);
- } else {
- if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) {
- offset = GIUPODATEN;
- mask = 1 << (pin - 32);
- } else {
- switch (pin) {
- case 48:
- offset = GIUPODATH;
- mask = PIOEN0;
- break;
- case 49:
- offset = GIUPODATH;
- mask = PIOEN1;
- break;
- default:
- return -EINVAL;
- }
- }
- }
-
- spin_lock_irqsave(&giu_lock, flags);
-
- reg = giu_read(offset);
- if (dir == GPIO_OUTPUT)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(offset, reg);
-
- spin_unlock_irqrestore(&giu_lock, flags);
-
- return 0;
-}
-
-static int vr41xx_gpio_get(struct gpio_chip *chip, unsigned pin)
-{
- u16 reg, mask;
-
- if (pin >= chip->ngpio)
- return -EINVAL;
-
- if (pin < 16) {
- reg = giu_read(GIUPIODL);
- mask = 1 << pin;
- } else if (pin < 32) {
- reg = giu_read(GIUPIODH);
- mask = 1 << (pin - 16);
- } else if (pin < 48) {
- reg = giu_read(GIUPODATL);
- mask = 1 << (pin - 32);
- } else {
- reg = giu_read(GIUPODATH);
- mask = 1 << (pin - 48);
- }
-
- if (reg & mask)
- return 1;
-
- return 0;
-}
-
-static void vr41xx_gpio_set(struct gpio_chip *chip, unsigned pin,
- int value)
-{
- u16 offset, mask, reg;
- unsigned long flags;
-
- if (pin >= chip->ngpio)
- return;
-
- if (pin < 16) {
- offset = GIUPIODL;
- mask = 1 << pin;
- } else if (pin < 32) {
- offset = GIUPIODH;
- mask = 1 << (pin - 16);
- } else if (pin < 48) {
- offset = GIUPODATL;
- mask = 1 << (pin - 32);
- } else {
- offset = GIUPODATH;
- mask = 1 << (pin - 48);
- }
-
- spin_lock_irqsave(&giu_lock, flags);
-
- reg = giu_read(offset);
- if (value)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(offset, reg);
-
- spin_unlock_irqrestore(&giu_lock, flags);
-}
-
-
-static int vr41xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return giu_set_direction(chip, offset, GPIO_INPUT);
-}
-
-static int vr41xx_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- vr41xx_gpio_set(chip, offset, value);
-
- return giu_set_direction(chip, offset, GPIO_OUTPUT);
-}
-
-static int vr41xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- if (offset >= chip->ngpio)
- return -EINVAL;
-
- return GIU_IRQ_BASE + offset;
-}
-
-static struct gpio_chip vr41xx_gpio_chip = {
- .label = "vr41xx",
- .owner = THIS_MODULE,
- .direction_input = vr41xx_gpio_direction_input,
- .get = vr41xx_gpio_get,
- .direction_output = vr41xx_gpio_direction_output,
- .set = vr41xx_gpio_set,
- .to_irq = vr41xx_gpio_to_irq,
-};
-
-static int giu_probe(struct platform_device *pdev)
-{
- unsigned int trigger, i, pin;
- struct irq_chip *chip;
- int irq;
-
- switch (pdev->id) {
- case GPIO_50PINS_PULLUPDOWN:
- giu_flags = GPIO_HAS_PULLUPDOWN_IO;
- vr41xx_gpio_chip.ngpio = 50;
- break;
- case GPIO_36PINS:
- vr41xx_gpio_chip.ngpio = 36;
- break;
- case GPIO_48PINS_EDGE_SELECT:
- giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
- vr41xx_gpio_chip.ngpio = 48;
- break;
- default:
- dev_err(&pdev->dev, "GIU: unknown ID %d\n", pdev->id);
- return -ENODEV;
- }
-
- giu_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(giu_base))
- return PTR_ERR(giu_base);
-
- vr41xx_gpio_chip.parent = &pdev->dev;
-
- if (gpiochip_add_data(&vr41xx_gpio_chip, NULL))
- return -ENODEV;
-
- giu_write(GIUINTENL, 0);
- giu_write(GIUINTENH, 0);
-
- trigger = giu_read(GIUINTTYPH) << 16;
- trigger |= giu_read(GIUINTTYPL);
- for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
- pin = GPIO_PIN_OF_IRQ(i);
- if (pin < GIUINT_HIGH_OFFSET)
- chip = &giuint_low_irq_chip;
- else
- chip = &giuint_high_irq_chip;
-
- if (trigger & (1 << pin))
- irq_set_chip_and_handler(i, chip, handle_edge_irq);
- else
- irq_set_chip_and_handler(i, chip, handle_level_irq);
-
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0 || irq >= nr_irqs)
- return -EBUSY;
-
- return cascade_irq(irq, giu_get_irq);
-}
-
-static int giu_remove(struct platform_device *pdev)
-{
- if (giu_base) {
- giu_base = NULL;
- }
-
- return 0;
-}
-
-static struct platform_driver giu_device_driver = {
- .probe = giu_probe,
- .remove = giu_remove,
- .driver = {
- .name = "GIU",
- },
-};
-
-module_platform_driver(giu_device_driver);
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index 5078631d8014..b098f2dc196b 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -4,7 +4,6 @@
* Copyright (C) 2016 William Breathitt Gray
*/
#include <linux/bitmap.h>
-#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -17,8 +16,9 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
-#define WS16C48_EXTENT 16
+#define WS16C48_EXTENT 10
#define MAX_NUM_WS16C48 max_num_isa_dev(WS16C48_EXTENT)
static unsigned int base[MAX_NUM_WS16C48];
@@ -31,6 +31,20 @@ module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers");
/**
+ * struct ws16c48_reg - device register structure
+ * @port: Port 0 through 5 I/O
+ * @int_pending: Interrupt Pending
+ * @page_lock: Register page (Bits 7-6) and I/O port lock (Bits 5-0)
+ * @pol_enab_int_id: Interrupt polarity, enable, and ID
+ */
+struct ws16c48_reg {
+ u8 port[6];
+ u8 int_pending;
+ u8 page_lock;
+ u8 pol_enab_int_id[3];
+};
+
+/**
* struct ws16c48_gpio - GPIO device private data structure
* @chip: instance of the gpio_chip
* @io_state: bit I/O state (whether bit is set to input or output)
@@ -38,7 +52,7 @@ MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers");
* @lock: synchronization lock to prevent I/O race conditions
* @irq_mask: I/O bits affected by interrupts
* @flow_mask: IRQ flow type mask for the respective I/O bits
- * @base: base port address of the GPIO device
+ * @reg: I/O address offset for the device registers
*/
struct ws16c48_gpio {
struct gpio_chip chip;
@@ -47,7 +61,7 @@ struct ws16c48_gpio {
raw_spinlock_t lock;
unsigned long irq_mask;
unsigned long flow_mask;
- void __iomem *base;
+ struct ws16c48_reg __iomem *reg;
};
static int ws16c48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -73,7 +87,7 @@ static int ws16c48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
ws16c48gpio->io_state[port] |= mask;
ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+ iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -95,7 +109,7 @@ static int ws16c48_gpio_direction_output(struct gpio_chip *chip,
ws16c48gpio->out_state[port] |= mask;
else
ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+ iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -118,7 +132,7 @@ static int ws16c48_gpio_get(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
- port_state = ioread8(ws16c48gpio->base + port);
+ port_state = ioread8(ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -131,14 +145,16 @@ static int ws16c48_gpio_get_multiple(struct gpio_chip *chip,
struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
unsigned long offset;
unsigned long gpio_mask;
- void __iomem *port_addr;
+ size_t index;
+ u8 __iomem *port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
- port_addr = ws16c48gpio->base + offset / 8;
+ index = offset / 8;
+ port_addr = ws16c48gpio->reg->port + index;
port_state = ioread8(port_addr) & gpio_mask;
bitmap_set_value8(bits, port_state, offset);
@@ -166,7 +182,7 @@ static void ws16c48_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
ws16c48gpio->out_state[port] |= mask;
else
ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+ iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -178,13 +194,13 @@ static void ws16c48_gpio_set_multiple(struct gpio_chip *chip,
unsigned long offset;
unsigned long gpio_mask;
size_t index;
- void __iomem *port_addr;
+ u8 __iomem *port_addr;
unsigned long bitmask;
unsigned long flags;
for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
index = offset / 8;
- port_addr = ws16c48gpio->base + index;
+ port_addr = ws16c48gpio->reg->port + index;
/* mask out GPIO configured for input */
gpio_mask &= ~ws16c48gpio->io_state[index];
@@ -219,10 +235,15 @@ static void ws16c48_irq_ack(struct irq_data *data)
port_state = ws16c48gpio->irq_mask >> (8*port);
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(port_state & ~mask, ws16c48gpio->base + 8 + port);
- iowrite8(port_state | mask, ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
+
+ /* Clear pending interrupt */
+ iowrite8(port_state & ~mask, ws16c48gpio->reg->pol_enab_int_id + port);
+ iowrite8(port_state | mask, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -235,6 +256,7 @@ static void ws16c48_irq_mask(struct irq_data *data)
const unsigned long mask = BIT(offset);
const unsigned port = offset / 8;
unsigned long flags;
+ unsigned long port_state;
/* only the first 3 ports support interrupts */
if (port > 2)
@@ -243,10 +265,16 @@ static void ws16c48_irq_mask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask &= ~mask;
+ port_state = ws16c48gpio->irq_mask >> (8 * port);
+
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Disable interrupt */
+ iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -259,6 +287,7 @@ static void ws16c48_irq_unmask(struct irq_data *data)
const unsigned long mask = BIT(offset);
const unsigned port = offset / 8;
unsigned long flags;
+ unsigned long port_state;
/* only the first 3 ports support interrupts */
if (port > 2)
@@ -267,10 +296,16 @@ static void ws16c48_irq_unmask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask |= mask;
+ port_state = ws16c48gpio->irq_mask >> (8 * port);
+
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Enable interrupt */
+ iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -283,6 +318,7 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
const unsigned long mask = BIT(offset);
const unsigned port = offset / 8;
unsigned long flags;
+ unsigned long port_state;
/* only the first 3 ports support interrupts */
if (port > 2)
@@ -304,9 +340,16 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
return -EINVAL;
}
- iowrite8(0x40, ws16c48gpio->base + 7);
- iowrite8(ws16c48gpio->flow_mask >> (8*port), ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ port_state = ws16c48gpio->flow_mask >> (8 * port);
+
+ /* Select Register Page 1; Unlock all I/O ports */
+ iowrite8(0x40, &ws16c48gpio->reg->page_lock);
+
+ /* Set interrupt polarity */
+ iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -325,25 +368,26 @@ static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
{
struct ws16c48_gpio *const ws16c48gpio = dev_id;
struct gpio_chip *const chip = &ws16c48gpio->chip;
+ struct ws16c48_reg __iomem *const reg = ws16c48gpio->reg;
unsigned long int_pending;
unsigned long port;
unsigned long int_id;
unsigned long gpio;
- int_pending = ioread8(ws16c48gpio->base + 6) & 0x7;
+ int_pending = ioread8(&reg->int_pending) & 0x7;
if (!int_pending)
return IRQ_NONE;
/* loop until all pending interrupts are handled */
do {
for_each_set_bit(port, &int_pending, 3) {
- int_id = ioread8(ws16c48gpio->base + 8 + port);
+ int_id = ioread8(reg->pol_enab_int_id + port);
for_each_set_bit(gpio, &int_id, 8)
generic_handle_domain_irq(chip->irq.domain,
gpio + 8*port);
}
- int_pending = ioread8(ws16c48gpio->base + 6) & 0x7;
+ int_pending = ioread8(&reg->int_pending) & 0x7;
} while (int_pending);
return IRQ_HANDLED;
@@ -369,12 +413,16 @@ static int ws16c48_irq_init_hw(struct gpio_chip *gc)
{
struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(gc);
- /* Disable IRQ by default */
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(0, ws16c48gpio->base + 8);
- iowrite8(0, ws16c48gpio->base + 9);
- iowrite8(0, ws16c48gpio->base + 10);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
+
+ /* Disable interrupts for all lines */
+ iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[0]);
+ iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[1]);
+ iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[2]);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
return 0;
}
@@ -396,8 +444,8 @@ static int ws16c48_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- ws16c48gpio->base = devm_ioport_map(dev, base[id], WS16C48_EXTENT);
- if (!ws16c48gpio->base)
+ ws16c48gpio->reg = devm_ioport_map(dev, base[id], WS16C48_EXTENT);
+ if (!ws16c48gpio->reg)
return -ENOMEM;
ws16c48gpio->chip.label = name;
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index 43ca52fa6f9a..fd88500399c6 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -281,11 +281,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
static int iproc_gpio_remove(struct platform_device *pdev)
{
- struct iproc_gpio_chip *chip;
-
- chip = platform_get_drvdata(pdev);
- if (!chip)
- return -ENODEV;
+ struct iproc_gpio_chip *chip = platform_get_drvdata(pdev);
if (chip->intr) {
u32 val;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 7f8e2fed2988..2fc6b6ff7f16 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -117,12 +117,14 @@ static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
static void xgpio_read_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+
xgpio_set_value32(a, bit, xgpio_readreg(addr));
}
static void xgpio_write_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+
xgpio_writereg(addr, xgpio_get_value32(a, bit));
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c2523ac26fac..9be1376f9a62 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -687,6 +687,9 @@ int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
case ACPI_PIN_CONFIG_PULLDOWN:
*lookupflags |= GPIO_PULL_DOWN;
break;
+ case ACPI_PIN_CONFIG_NOPULL:
+ *lookupflags |= GPIO_PULL_DISABLE;
+ break;
default:
break;
}
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index b26e64338376..f8041d4898d1 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -434,12 +434,15 @@ struct line {
struct linereq *req;
unsigned int irq;
/*
- * eflags is set by edge_detector_setup(), edge_detector_stop() and
- * edge_detector_update(), which are themselves mutually exclusive,
- * and is accessed by edge_irq_thread() and debounce_work_func(),
- * which can both live with a slightly stale value.
+ * The flags for the active edge detector configuration.
+ *
+ * edflags is set by linereq_create(), linereq_free(), and
+ * linereq_set_config_unlocked(), which are themselves mutually
+ * exclusive, and is accessed by edge_irq_thread(),
+ * process_hw_ts_thread() and debounce_work_func(),
+ * which can all live with a slightly stale value.
*/
- u64 eflags;
+ u64 edflags;
/*
* timestamp_ns and req_seqno are accessed only by
* edge_irq_handler() and edge_irq_thread(), which are themselves
@@ -469,9 +472,7 @@ struct line {
* stale value.
*/
unsigned int level;
- /*
- * -- hte specific fields --
- */
+#ifdef CONFIG_HTE
struct hte_ts_desc hdesc;
/*
* HTE provider sets line level at the time of event. The valid
@@ -488,6 +489,7 @@ struct line {
* last sequence number before debounce period expires.
*/
u32 last_seqno;
+#endif /* CONFIG_HTE */
};
/**
@@ -545,6 +547,12 @@ struct linereq {
GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
GPIO_V2_LINE_BIAS_FLAGS)
+/* subset of flags relevant for edge detector configuration */
+#define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
+ (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
+ GPIO_V2_LINE_EDGE_FLAGS)
+
static void linereq_put_event(struct linereq *lr,
struct gpio_v2_line_event *le)
{
@@ -567,19 +575,28 @@ static u64 line_event_timestamp(struct line *line)
{
if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
return ktime_get_real_ns();
- else if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
+ else if (IS_ENABLED(CONFIG_HTE) &&
+ test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
return line->timestamp_ns;
return ktime_get_ns();
}
+static u32 line_event_id(int level)
+{
+ return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
+ GPIO_V2_LINE_EVENT_FALLING_EDGE;
+}
+
+#ifdef CONFIG_HTE
+
static enum hte_return process_hw_ts_thread(void *p)
{
struct line *line;
struct linereq *lr;
struct gpio_v2_line_event le;
+ u64 edflags;
int level;
- u64 eflags;
if (!p)
return HTE_CB_HANDLED;
@@ -590,29 +607,26 @@ static enum hte_return process_hw_ts_thread(void *p)
memset(&le, 0, sizeof(le));
le.timestamp_ns = line->timestamp_ns;
- eflags = READ_ONCE(line->eflags);
+ edflags = READ_ONCE(line->edflags);
- if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
- if (line->raw_level >= 0) {
- if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
- level = !line->raw_level;
- else
- level = line->raw_level;
- } else {
- level = gpiod_get_value_cansleep(line->desc);
- }
+ switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
+ case GPIO_V2_LINE_FLAG_EDGE_BOTH:
+ level = (line->raw_level >= 0) ?
+ line->raw_level :
+ gpiod_get_raw_value_cansleep(line->desc);
- if (level)
- le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- else
- le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
- /* Emit low-to-high event */
+ if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
+ level = !level;
+
+ le.id = line_event_id(level);
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_RISING:
le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
- /* Emit high-to-low event */
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_FALLING:
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else {
+ break;
+ default:
return HTE_CB_HANDLED;
}
le.line_seqno = line->line_seqno;
@@ -659,12 +673,47 @@ static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
return HTE_CB_HANDLED;
}
+static int hte_edge_setup(struct line *line, u64 eflags)
+{
+ int ret;
+ unsigned long flags = 0;
+ struct hte_ts_desc *hdesc = &line->hdesc;
+
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
+ flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ HTE_FALLING_EDGE_TS :
+ HTE_RISING_EDGE_TS;
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
+ flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ HTE_RISING_EDGE_TS :
+ HTE_FALLING_EDGE_TS;
+
+ line->total_discard_seq = 0;
+
+ hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
+ line->desc);
+
+ ret = hte_ts_get(NULL, hdesc, 0);
+ if (ret)
+ return ret;
+
+ return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
+ line);
+}
+
+#else
+
+static int hte_edge_setup(struct line *line, u64 eflags)
+{
+ return 0;
+}
+#endif /* CONFIG_HTE */
+
static irqreturn_t edge_irq_thread(int irq, void *p)
{
struct line *line = p;
struct linereq *lr = line->req;
struct gpio_v2_line_event le;
- u64 eflags;
/* Do not leak kernel stack to userspace */
memset(&le, 0, sizeof(le));
@@ -683,23 +732,17 @@ static irqreturn_t edge_irq_thread(int irq, void *p)
}
line->timestamp_ns = 0;
- eflags = READ_ONCE(line->eflags);
- if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
- int level = gpiod_get_value_cansleep(line->desc);
-
- if (level)
- /* Emit low-to-high event */
- le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- else
- /* Emit high-to-low event */
- le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
- /* Emit low-to-high event */
+ switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
+ case GPIO_V2_LINE_FLAG_EDGE_BOTH:
+ le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_RISING:
le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
- /* Emit high-to-low event */
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_FALLING:
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else {
+ break;
+ default:
return IRQ_NONE;
}
line->line_seqno++;
@@ -764,16 +807,16 @@ static void debounce_work_func(struct work_struct *work)
struct gpio_v2_line_event le;
struct line *line = container_of(work, struct line, work.work);
struct linereq *lr;
- int level, diff_seqno;
- u64 eflags;
+ u64 eflags, edflags = READ_ONCE(line->edflags);
+ int level = -1;
+#ifdef CONFIG_HTE
+ int diff_seqno;
- if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+ if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
level = line->raw_level;
- if (level < 0)
- level = gpiod_get_raw_value_cansleep(line->desc);
- } else {
+#endif
+ if (level < 0)
level = gpiod_get_raw_value_cansleep(line->desc);
- }
if (level < 0) {
pr_debug_ratelimited("debouncer failed to read line value\n");
return;
@@ -785,12 +828,12 @@ static void debounce_work_func(struct work_struct *work)
WRITE_ONCE(line->level, level);
/* -- edge detection -- */
- eflags = READ_ONCE(line->eflags);
+ eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (!eflags)
return;
/* switch from physical level to logical - if they differ */
- if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
level = !level;
/* ignore edges that are not being monitored */
@@ -804,7 +847,8 @@ static void debounce_work_func(struct work_struct *work)
lr = line->req;
le.timestamp_ns = line_event_timestamp(line);
le.offset = gpio_chip_hwgpio(line->desc);
- if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+#ifdef CONFIG_HTE
+ if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
/* discard events except the last one */
line->total_discard_seq -= 1;
diff_seqno = line->last_seqno - line->total_discard_seq -
@@ -813,51 +857,21 @@ static void debounce_work_func(struct work_struct *work)
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ?
le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
- } else {
+ } else
+#endif /* CONFIG_HTE */
+ {
line->line_seqno++;
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ?
le.line_seqno : atomic_inc_return(&lr->seqno);
}
- if (level)
- /* Emit low-to-high event */
- le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- else
- /* Emit high-to-low event */
- le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+ le.id = line_event_id(level);
linereq_put_event(lr, &le);
}
-static int hte_edge_setup(struct line *line, u64 eflags)
-{
- int ret;
- unsigned long flags = 0;
- struct hte_ts_desc *hdesc = &line->hdesc;
-
- if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
- HTE_FALLING_EDGE_TS : HTE_RISING_EDGE_TS;
- if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
- HTE_RISING_EDGE_TS : HTE_FALLING_EDGE_TS;
-
- line->total_discard_seq = 0;
-
- hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags,
- NULL, line->desc);
-
- ret = hte_ts_get(NULL, hdesc, 0);
- if (ret)
- return ret;
-
- return hte_request_ts_ns(hdesc, process_hw_ts,
- process_hw_ts_thread, line);
-}
-
-static int debounce_setup(struct line *line,
- unsigned int debounce_period_us, bool hte_req)
+static int debounce_setup(struct line *line, unsigned int debounce_period_us)
{
unsigned long irqflags;
int ret, level, irq;
@@ -877,7 +891,8 @@ static int debounce_setup(struct line *line,
if (level < 0)
return level;
- if (!hte_req) {
+ if (!(IS_ENABLED(CONFIG_HTE) &&
+ test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
@@ -889,9 +904,7 @@ static int debounce_setup(struct line *line,
return ret;
line->irq = irq;
} else {
- ret = hte_edge_setup(line,
- GPIO_V2_LINE_FLAG_EDGE_RISING |
- GPIO_V2_LINE_FLAG_EDGE_FALLING);
+ ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
if (ret)
return ret;
}
@@ -930,19 +943,21 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
return 0;
}
-static void edge_detector_stop(struct line *line, bool hte_en)
+static void edge_detector_stop(struct line *line)
{
- if (line->irq && !hte_en) {
+ if (line->irq) {
free_irq(line->irq, line);
line->irq = 0;
}
- if (hte_en)
+#ifdef CONFIG_HTE
+ if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
hte_ts_put(&line->hdesc);
+#endif
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
- WRITE_ONCE(line->eflags, 0);
+ WRITE_ONCE(line->edflags, 0);
if (line->desc)
WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
@@ -950,23 +965,23 @@ static void edge_detector_stop(struct line *line, bool hte_en)
static int edge_detector_setup(struct line *line,
struct gpio_v2_line_config *lc,
- unsigned int line_idx,
- u64 eflags, bool hte_req)
+ unsigned int line_idx, u64 edflags)
{
u32 debounce_period_us;
unsigned long irqflags = 0;
+ u64 eflags;
int irq, ret;
+ eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (eflags && !kfifo_initialized(&line->req->events)) {
ret = kfifo_alloc(&line->req->events,
line->req->event_buffer_size, GFP_KERNEL);
if (ret)
return ret;
}
- WRITE_ONCE(line->eflags, eflags);
if (gpio_v2_line_config_debounced(lc, line_idx)) {
debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
- ret = debounce_setup(line, debounce_period_us, hte_req);
+ ret = debounce_setup(line, debounce_period_us);
if (ret)
return ret;
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
@@ -976,8 +991,9 @@ static int edge_detector_setup(struct line *line,
if (!eflags || READ_ONCE(line->sw_debounced))
return 0;
- if (hte_req)
- return hte_edge_setup(line, eflags);
+ if (IS_ENABLED(CONFIG_HTE) &&
+ (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
+ return hte_edge_setup(line, edflags);
irq = gpiod_to_irq(line->desc);
if (irq < 0)
@@ -1003,35 +1019,29 @@ static int edge_detector_setup(struct line *line,
static int edge_detector_update(struct line *line,
struct gpio_v2_line_config *lc,
- unsigned int line_idx,
- u64 flags, bool polarity_change,
- bool prev_hte_flag)
+ unsigned int line_idx, u64 edflags)
{
- u64 eflags = flags & GPIO_V2_LINE_EDGE_FLAGS;
+ u64 active_edflags = READ_ONCE(line->edflags);
unsigned int debounce_period_us =
gpio_v2_line_config_debounce_period(lc, line_idx);
- bool hte_change = (prev_hte_flag !=
- ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) != 0));
- if ((READ_ONCE(line->eflags) == eflags) && !polarity_change &&
- (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us)
- && !hte_change)
+ if ((active_edflags == edflags) &&
+ (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
return 0;
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
- WRITE_ONCE(line->eflags, eflags);
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
return 0;
}
/* reconfiguring edge detection or sw debounce being disabled */
- if ((line->irq && !READ_ONCE(line->sw_debounced)) || prev_hte_flag ||
+ if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
+ (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
(!debounce_period_us && READ_ONCE(line->sw_debounced)))
- edge_detector_stop(line, prev_hte_flag);
+ edge_detector_stop(line);
- return edge_detector_setup(line, lc, line_idx, eflags,
- flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
+ return edge_detector_setup(line, lc, line_idx, edflags);
}
static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
@@ -1067,6 +1077,11 @@ static int gpio_v2_line_flags_validate(u64 flags)
/* Return an error if an unknown flag is set */
if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
return -EINVAL;
+
+ if (!IS_ENABLED(CONFIG_HTE) &&
+ (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
+ return -EOPNOTSUPP;
+
/*
* Do not allow both INPUT and OUTPUT flags to be set as they are
* contradictory.
@@ -1076,7 +1091,8 @@ static int gpio_v2_line_flags_validate(u64 flags)
return -EINVAL;
/* Only allow one event clock source */
- if ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
+ if (IS_ENABLED(CONFIG_HTE) &&
+ (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
(flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
return -EINVAL;
@@ -1300,22 +1316,17 @@ static long linereq_set_config_unlocked(struct linereq *lr,
struct gpio_v2_line_config *lc)
{
struct gpio_desc *desc;
+ struct line *line;
unsigned int i;
- u64 flags;
- bool polarity_change;
- bool prev_hte_flag;
+ u64 flags, edflags;
int ret;
for (i = 0; i < lr->num_lines; i++) {
+ line = &lr->lines[i];
desc = lr->lines[i].desc;
flags = gpio_v2_line_config_flags(lc, i);
- polarity_change =
- (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) !=
- ((flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) != 0));
-
- prev_hte_flag = !!test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags);
-
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
@@ -1323,7 +1334,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
- edge_detector_stop(&lr->lines[i], prev_hte_flag);
+ edge_detector_stop(line);
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
@@ -1332,12 +1343,13 @@ static long linereq_set_config_unlocked(struct linereq *lr,
if (ret)
return ret;
- ret = edge_detector_update(&lr->lines[i], lc, i,
- flags, polarity_change, prev_hte_flag);
+ ret = edge_detector_update(line, lc, i, edflags);
if (ret)
return ret;
}
+ WRITE_ONCE(line->edflags, edflags);
+
blocking_notifier_call_chain(&desc->gdev->notifier,
GPIO_V2_LINE_CHANGED_CONFIG,
desc);
@@ -1464,15 +1476,12 @@ static ssize_t linereq_read(struct file *file,
static void linereq_free(struct linereq *lr)
{
unsigned int i;
- bool hte = false;
for (i = 0; i < lr->num_lines; i++) {
- if (lr->lines[i].desc)
- hte = !!test_bit(FLAG_EVENT_CLOCK_HTE,
- &lr->lines[i].desc->flags);
- edge_detector_stop(&lr->lines[i], hte);
- if (lr->lines[i].desc)
+ if (lr->lines[i].desc) {
+ edge_detector_stop(&lr->lines[i]);
gpiod_free(lr->lines[i].desc);
+ }
}
kfifo_free(&lr->events);
kfree(lr->label);
@@ -1506,7 +1515,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
struct gpio_v2_line_config *lc;
struct linereq *lr;
struct file *file;
- u64 flags;
+ u64 flags, edflags;
unsigned int i;
int fd, ret;
@@ -1580,6 +1589,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if (ret < 0)
goto out_free_linereq;
+ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
@@ -1596,12 +1606,13 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
goto out_free_linereq;
ret = edge_detector_setup(&lr->lines[i], lc, i,
- flags & GPIO_V2_LINE_EDGE_FLAGS,
- flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
+ edflags);
if (ret)
goto out_free_linereq;
}
+ lr->lines[i].edflags = edflags;
+
blocking_notifier_call_chain(&desc->gdev->notifier,
GPIO_V2_LINE_CHANGED_REQUESTED, desc);
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 79da85d17b71..16a696249229 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -375,9 +375,6 @@ void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
-
-
-
static void devm_gpio_release(struct device *dev, void *res)
{
unsigned *gpio = res;
@@ -385,13 +382,6 @@ static void devm_gpio_release(struct device *dev, void *res)
gpio_free(*gpio);
}
-static int devm_gpio_match(struct device *dev, void *res, void *data)
-{
- unsigned *this = res, *gpio = data;
-
- return *this == *gpio;
-}
-
/**
* devm_gpio_request - request a GPIO for a managed device
* @dev: device to request the GPIO for
@@ -402,11 +392,7 @@ static int devm_gpio_match(struct device *dev, void *res, void *data)
* same arguments and performs the same function as
* gpio_request(). GPIOs requested with this function will be
* automatically freed on driver detach.
- *
- * If an GPIO allocated with this function needs to be freed
- * separately, devm_gpio_free() must be used.
*/
-
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
{
unsigned *dr;
@@ -459,24 +445,6 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
}
EXPORT_SYMBOL_GPL(devm_gpio_request_one);
-/**
- * devm_gpio_free - free a GPIO
- * @dev: device to free GPIO for
- * @gpio: GPIO to free
- *
- * Except for the extra @dev argument, this function takes the
- * same arguments and performs the same function as gpio_free().
- * This function instead of gpio_free() should be used to manually
- * free GPIOs allocated with devm_gpio_request().
- */
-void devm_gpio_free(struct device *dev, unsigned int gpio)
-{
-
- WARN_ON(devres_release(dev, devm_gpio_release, devm_gpio_match,
- &gpio));
-}
-EXPORT_SYMBOL_GPL(devm_gpio_free);
-
static void devm_gpio_chip_release(void *data)
{
struct gpio_chip *gc = data;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 3d6c3ffd5576..a037b50bef33 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -354,6 +354,9 @@ struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
if (flags & OF_GPIO_PULL_DOWN)
lflags |= GPIO_PULL_DOWN;
+ if (flags & OF_GPIO_PULL_DISABLE)
+ lflags |= GPIO_PULL_DISABLE;
+
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
gpiod_put(desc);
@@ -556,6 +559,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_PULL_UP;
if (of_flags & OF_GPIO_PULL_DOWN)
*flags |= GPIO_PULL_DOWN;
+ if (of_flags & OF_GPIO_PULL_DISABLE)
+ *flags |= GPIO_PULL_DISABLE;
return desc;
}
@@ -621,6 +626,8 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
*lflags |= GPIO_PULL_UP;
if (xlate_flags & OF_GPIO_PULL_DOWN)
*lflags |= GPIO_PULL_DOWN;
+ if (xlate_flags & OF_GPIO_PULL_DISABLE)
+ *lflags |= GPIO_PULL_DISABLE;
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
@@ -720,7 +727,7 @@ static void of_gpiochip_remove_hog(struct gpio_chip *chip,
static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
{
- return chip->gpiodev->dev.of_node == data;
+ return device_match_of_node(&chip->gpiodev->dev, data);
}
static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
@@ -860,7 +867,8 @@ int of_mm_gpiochip_add_data(struct device_node *np,
if (mm_gc->save_regs)
mm_gc->save_regs(mm_gc);
- mm_gc->gc.of_node = np;
+ of_node_put(mm_gc->gc.of_node);
+ mm_gc->gc.of_node = of_node_get(np);
ret = gpiochip_add_data(gc, data);
if (ret)
@@ -868,6 +876,7 @@ int of_mm_gpiochip_add_data(struct device_node *np,
return 0;
err2:
+ of_node_put(np);
iounmap(mm_gc->regs);
err1:
kfree(gc->label);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 68d9f95d7799..cc9c0a12259e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3942,9 +3942,11 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
- if ((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) {
+ if (((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) ||
+ ((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DISABLE)) ||
+ ((lflags & GPIO_PULL_DOWN) && (lflags & GPIO_PULL_DISABLE))) {
gpiod_err(desc,
- "both pull-up and pull-down enabled, invalid configuration\n");
+ "multiple pull-up, pull-down or pull-disable enabled, invalid configuration\n");
return -EINVAL;
}
@@ -3952,6 +3954,8 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
set_bit(FLAG_PULL_UP, &desc->flags);
else if (lflags & GPIO_PULL_DOWN)
set_bit(FLAG_PULL_DOWN, &desc->flags);
+ else if (lflags & GPIO_PULL_DISABLE)
+ set_bit(FLAG_BIAS_DISABLE, &desc->flags);
ret = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
if (ret < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index c6cc493a5486..2b97b8a96fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -148,30 +148,22 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r = 0;
dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
/* Wrong context, return error */
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_lock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
}
@@ -179,7 +171,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
* Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
* them together so that they can be completed asynchronously on multiple nodes
*/
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
if (!queue_work(system_unbound_wq,
@@ -197,7 +189,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
/* For XGMI wait for all resets to complete before proceed */
if (!r) {
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
flush_work(&tmp_adev->reset_cntl->reset_work);
r = tmp_adev->asic_reset_res;
@@ -207,7 +199,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
}
}
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
}
@@ -339,10 +331,13 @@ static int
aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r;
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==
IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
@@ -350,19 +345,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
r = aldebaran_mode2_restore_ip(tmp_adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e146810c700b..d597e2656c47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -317,7 +317,7 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-
+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 3c09dcc0986e..647220a8762d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
uint64_t vram_used;
+ uint64_t vram_used_aligned;
bool init_complete;
struct work_struct reset_work;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a699134a1e8c..cbd593f7d553 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -40,10 +40,10 @@
#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
/*
- * Align VRAM allocations to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
+ * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
* BO chunk
*/
-#define VRAM_ALLOCATION_ALIGN (1 << 21)
+#define VRAM_AVAILABLITY_ALIGN (1 << 21)
/* Impose limit on how much memory KFD can use */
static struct {
@@ -149,7 +149,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
* to avoid fragmentation caused by 4K allocations in the tail
* 2M BO chunk.
*/
- vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ vram_needed = size;
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
system_mem_needed = size;
} else if (!(alloc_flag &
@@ -182,8 +182,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
*/
WARN_ONCE(vram_needed && !adev,
"adev reference can't be null when vram is used");
- if (adev)
+ if (adev) {
adev->kfd.vram_used += vram_needed;
+ adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+ }
kfd_mem_limit.system_mem_used += system_mem_needed;
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
@@ -203,8 +205,10 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
WARN_ONCE(!adev,
"adev reference can't be null when alloc mem flags vram is set");
- if (adev)
- adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ if (adev) {
+ adev->kfd.vram_used -= size;
+ adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
+ }
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= size;
} else if (!(alloc_flag &
@@ -1608,15 +1612,14 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t available;
-
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
- - adev->kfd.vram_used
+ - adev->kfd.vram_used_aligned
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
spin_unlock(&kfd_mem_limit.mem_limit_lock);
- return ALIGN_DOWN(available, VRAM_ALLOCATION_ALIGN);
+ return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index fd8f3731758e..b81b77a9efa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -314,7 +314,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
- *vram_width = mem_channel_number * mem_channel_width;
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d8f1335bc68f..b7bae833c804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -837,16 +837,12 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
continue;
r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
}
r = amdgpu_vm_handle_moved(adev, vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index e2eec985adb3..cb00c7d6f50b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1705,7 +1705,7 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
char reg_offset[11];
- uint32_t *new, *tmp = NULL;
+ uint32_t *new = NULL, *tmp = NULL;
int ret, i = 0, len = 0;
do {
@@ -1747,7 +1747,8 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
ret = size;
error_free:
- kfree(tmp);
+ if (tmp != new)
+ kfree(tmp);
kfree(new);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c4a6fe3070b6..f095a2513aff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2456,12 +2456,14 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (!hive->reset_domain ||
!amdgpu_reset_get_reset_domain(hive->reset_domain)) {
r = -ENOENT;
+ amdgpu_put_xgmi_hive(hive);
goto init_failed;
}
/* Drop the early temporary reset domain we created for device */
amdgpu_reset_put_reset_domain(adev->reset_domain);
adev->reset_domain = hive->reset_domain;
+ amdgpu_put_xgmi_hive(hive);
}
}
@@ -4413,8 +4415,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
retry:
amdgpu_amdkfd_pre_reset(adev);
- amdgpu_amdkfd_pre_reset(adev);
-
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
else
@@ -4742,6 +4742,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_reset_reg_dumps(tmp_adev);
+
+ reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5071b96be982..b1099ee79c50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -272,10 +272,6 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
/* Signal all jobs not yet scheduled */
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
-
- if (!rq)
- continue;
-
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index b067ce45d226..1036446abc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2641,6 +2641,9 @@ static int psp_hw_fini(void *handle)
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ psp_xgmi_terminate(psp);
}
psp_asd_terminate(psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 9e55a5d7a825..ffda1560c648 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -37,6 +37,7 @@ struct amdgpu_reset_context {
struct amdgpu_device *reset_req_dev;
struct amdgpu_job *job;
struct amdgpu_hive_info *hive;
+ struct list_head *reset_device_list;
unsigned long flags;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3b4c19412625..134575a3893c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -637,6 +637,8 @@ struct amdgpu_ttm_tt {
#endif
};
+#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
+
#ifdef CONFIG_DRM_AMDGPU_USERPTR
/*
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
@@ -648,7 +650,7 @@ struct amdgpu_ttm_tt {
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{
struct ttm_tt *ttm = bo->tbo.ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
struct mm_struct *mm;
@@ -702,7 +704,7 @@ out_unlock:
*/
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
bool r = false;
if (!gtt || !gtt->userptr)
@@ -751,7 +753,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -788,7 +790,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -822,7 +824,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
struct ttm_tt *ttm = tbo->ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (amdgpu_bo_encrypted(abo))
flags |= AMDGPU_PTE_TMZ;
@@ -860,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_resource *bo_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void*)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
uint64_t flags;
int r;
@@ -927,7 +929,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
struct ttm_placement placement;
struct ttm_place placements;
struct ttm_resource *tmp;
@@ -998,7 +1000,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
@@ -1025,7 +1027,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1079,7 +1081,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_operation_ctx *ctx)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
pgoff_t i;
int ret;
@@ -1113,7 +1115,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
struct amdgpu_device *adev;
pgoff_t i;
@@ -1182,7 +1184,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
- gtt = (void *)bo->ttm;
+ gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
gtt->userptr = addr;
gtt->userflags = flags;
@@ -1199,7 +1201,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
*/
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return NULL;
@@ -1218,7 +1220,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end, unsigned long *userptr)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long size;
if (gtt == NULL || !gtt->userptr)
@@ -1241,7 +1243,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
*/
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL || !gtt->userptr)
return false;
@@ -1254,7 +1256,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
*/
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 108e8e8a1a36..576849e95296 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -496,8 +496,7 @@ static int amdgpu_vkms_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 1b108d03e785..f2aebbf3fbe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -742,7 +742,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_put_xgmi_hive(hive);
}
- return psp_xgmi_terminate(&adev->psp);
+ return 0;
}
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
index 33a8a7365aef..f0e235f98afb 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
@@ -28,13 +28,44 @@
#include "navi10_enum.h"
#include "soc15_common.h"
+#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
+#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
+
+
+static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
+ break;
+ default:
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ break;
+ }
+ return data;
+}
+
+static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
+ break;
+ default:
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ break;
+ }
+}
+
static void
athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
@@ -42,7 +73,7 @@ athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
static void
@@ -51,7 +82,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
@@ -59,7 +90,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
@@ -70,6 +101,7 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 0, 2):
athub_v3_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -88,7 +120,7 @@ void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
int data;
/* AMD_CG_SUPPORT_ATHUB_MGCG */
- data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ data = athub_v3_0_get_cg_cntl(adev);
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9c964cd3b5d4..288fce7dc0ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2796,8 +2796,7 @@ static int dce_v10_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index e0ad9f27dc3f..cbe5250b31cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2914,8 +2914,7 @@ static int dce_v11_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 77f5e998a120..b1c44fab074f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2673,8 +2673,7 @@ static int dce_v6_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_width = 16384;
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 802e5c753271..a22b45c92792 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2693,8 +2693,11 @@ static int dce_v8_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index fafbad3cf08d..a2a4dc1844c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4846,7 +4846,7 @@ static int gfx_v10_0_sw_init(void *handle)
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 2;
+ adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 6fd71cb10e54..f6b1bb40e503 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -53,6 +53,7 @@
#define GFX11_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
#define regCGTT_WD_CLK_CTRL 0x5086
#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
@@ -130,6 +131,8 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
bool all_hub, uint8_t dst_sel);
static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
+static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
+ bool enable);
static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -1138,6 +1141,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
.init_spm_golden = &gfx_v11_0_init_spm_golden_registers,
+ .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
};
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -5181,9 +5185,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
} else {
/* Program RLC_CGCG_CGLS_CTRL */
def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
@@ -5212,9 +5219,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
}
}
@@ -5279,6 +5289,38 @@ static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
.update_spm_vmid = gfx_v11_0_update_spm_vmid,
};
+static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
+
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+ WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
+
+ // Program RLC_PG_DELAY3 for CGPG hysteresis
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 1):
+ WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ gfx_v11_cntl_power_gating(adev, enable);
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+}
+
static int gfx_v11_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
@@ -5293,6 +5335,10 @@ static int gfx_v11_0_set_powergating_state(void *handle,
case IP_VERSION(11, 0, 2):
amdgpu_gfx_off_ctrl(adev, enable);
break;
+ case IP_VERSION(11, 0, 1):
+ gfx_v11_cntl_pg(adev, enable);
+ amdgpu_gfx_off_ctrl(adev, enable);
+ break;
default:
break;
}
@@ -5310,6 +5356,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c6e0f9313a7f..fc9c1043244c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2587,7 +2587,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
gfx_v9_0_tiling_mode_table_init(adev);
- gfx_v9_0_setup_rb(adev);
+ if (adev->gfx.num_gfx_rings)
+ gfx_v9_0_setup_rb(adev);
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9ae8cdaa033e..f513e2c2e964 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -419,6 +419,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -437,7 +438,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 22761a3bb818..4603653916f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -896,6 +896,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -935,7 +936,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
up_read(&adev->reset_domain->sem);
@@ -1624,12 +1625,15 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
case IP_VERSION(9, 4, 1):
adev->num_vmhubs = 3;
/* Keep the vm size same with Vega20 */
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index 39a696cd45b5..29c3484ae1f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -40,6 +40,156 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
0);
}
+static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch, forced on MEM clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* Already disabled above. The actions below are for "enabled" only */
+ if (enable) {
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ }
+
+ /* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+ }
+ }
+
+ /* disable MEM clock override after clock/power mode changing */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 0);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+
+ if (enable) {
+ hdp_clk_cntl &=
+ ~(uint32_t)
+ (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+ } else {
+ hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+ }
+
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ hdp_v5_2_update_mem_power_gating(adev, enable);
+ hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
+}
+
const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
.flush_hdp = hdp_v5_2_flush_hdp,
+ .update_clock_gating = hdp_v5_2_update_clock_gating,
+ .get_clock_gating_state = hdp_v5_2_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 92dc60a9d209..085e613f3646 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -727,6 +727,7 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
.get_wptr = ih_v6_0_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = ih_v6_0_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3f44a099c52a..3e51e773f92b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index cac72ced94c8..e8058edc1d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -518,18 +518,41 @@ static u64 mmhub_v3_0_1_get_mc_fb_offset(struct amdgpu_device *adev)
static void mmhub_v3_0_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static void mmhub_v3_0_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
mmhub_v3_0_1_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
mmhub_v3_0_1_update_medium_grain_light_sleep(adev,
@@ -539,7 +562,20 @@ static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v3_0_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
- //TODO
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ /* AMD_CG_SUPPORT_MC_MGCG */
+ if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+ /* AMD_CG_SUPPORT_MC_LS */
+ if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_LS;
}
const struct amdgpu_mmhub_funcs mmhub_v3_0_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 6e0145b2b408..445cb06b9d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -295,9 +295,17 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned int num_level, block_size;
uint32_t tmp;
int i;
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
+
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
@@ -305,7 +313,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_DEPTH,
- adev->vm_manager.num_level);
+ num_level);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
@@ -323,7 +331,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 4b5396d3e60f..eec13cb5bf75 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -409,9 +409,11 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -483,6 +485,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index 01e8288d09a8..1dc95ef21da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -247,6 +247,81 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
}
+static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (enable) {
+ data |= (BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ } else {
+ data &= ~(BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL, data);
+}
+
+static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (enable)
+ data |= BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+ else
+ data &= ~BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1);
+ if (enable) {
+ data |= (BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ } else {
+ data &= ~(BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1, data);
+}
+
+static void nbio_v7_7_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t data;
+
+ /* AMD_CG_SUPPORT_BIF_MGCG */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (data & BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+ /* AMD_CG_SUPPORT_BIF_LS */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (data & BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.get_hdp_flush_req_offset = nbio_v7_7_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_7_get_hdp_flush_done_offset,
@@ -262,6 +337,9 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.enable_doorbell_aperture = nbio_v7_7_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_7_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v7_7_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v7_7_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v7_7_get_clockgating_state,
.ih_control = nbio_v7_7_ih_control,
.init_registers = nbio_v7_7_init_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index a2588200ea58..0b2ac418e4ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -101,6 +101,16 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
adev->psp.dtm_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ if (adev->apu_flags & AMD_APU_IS_RENOIR) {
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 726a5bba40b2..a75a286e1ecf 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <linux/dev_printk.h>
#include <drm/drm_drv.h>
#include <linux/vmalloc.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 52816de5e17b..55284b24f113 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -494,6 +494,20 @@ static void soc21_pre_asic_init(struct amdgpu_device *adev)
{
}
+static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
+ bool enter)
+{
+ if (enter)
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+ else
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ if (adev->gfx.funcs->update_perfmon_mgcg)
+ adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
+
+ return 0;
+}
+
static const struct amdgpu_asic_funcs soc21_asic_funcs =
{
.read_disabled_bios = &soc21_read_disabled_bios,
@@ -513,6 +527,7 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs =
.supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &soc21_pre_asic_init,
.query_video_codecs = &soc21_query_video_codecs,
+ .update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
};
static int soc21_common_early_init(void *handle)
@@ -546,8 +561,10 @@ static int soc21_common_early_init(void *handle)
case IP_VERSION(11, 0, 0):
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
+#if 0
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
+#endif
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_FGCG |
@@ -575,7 +592,9 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_ATHUB_MGCG |
- AMD_CG_SUPPORT_ATHUB_LS;
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_HDP_SD;
adev->pg_flags =
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
@@ -586,9 +605,25 @@ static int soc21_common_early_init(void *handle)
break;
case IP_VERSION(11, 0, 1):
adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_PERF_CLK |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
+ AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
@@ -683,6 +718,8 @@ static int soc21_common_set_clockgating_state(void *handle,
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(4, 3, 0):
+ case IP_VERSION(4, 3, 1):
+ case IP_VERSION(7, 7, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index ca14c3ef742e..fb2d74f30448 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1115,7 +1115,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
*
* Stop VCN block with dpg mode
*/
-static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
uint32_t tmp;
@@ -1133,7 +1133,6 @@ static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
- return 0;
}
/**
@@ -1154,7 +1153,7 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_stop_dpg_mode(adev, i);
+ vcn_v4_0_stop_dpg_mode(adev, i);
continue;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index cdd599a08125..03b7066471f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -334,9 +334,11 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -409,6 +411,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 3b4eb8285943..2022ffbb8dba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -385,9 +385,11 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -461,6 +463,9 @@ static void vega20_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 2b3d8bc8f0aa..dc774ddf3445 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -874,7 +874,7 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
err = kfd_wait_on_events(p, args->num_events,
(void __user *)args->events_ptr,
(args->wait_for_all != 0),
- args->timeout, &args->wait_result);
+ &args->timeout, &args->wait_result);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f5853835f03a..22c0929d410b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -102,13 +102,18 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
switch (sdma_version) {
case IP_VERSION(6, 0, 0):
- case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
break;
+ case IP_VERSION(6, 0, 1):
+ /* Reserve 1 for paging and 1 for gfx */
+ kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
+ /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
+ kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
+ break;
default:
break;
}
@@ -377,12 +382,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
case IP_VERSION(10, 3, 6):
- gfx_target_version = 100306;
- if (!vf)
- f2g = &gfx_v10_3_kfd2kgd;
- break;
case IP_VERSION(10, 3, 7):
- gfx_target_version = 100307;
+ gfx_target_version = 100306;
if (!vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 3942a56c28bb..83e3ce9f6049 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -894,7 +894,8 @@ static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
return msecs_to_jiffies(user_timeout_ms) + 1;
}
-static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
+static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
+ bool undo_auto_reset)
{
uint32_t i;
@@ -903,6 +904,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
spin_lock(&waiters[i].event->lock);
remove_wait_queue(&waiters[i].event->wq,
&waiters[i].wait);
+ if (undo_auto_reset && waiters[i].activated &&
+ waiters[i].event && waiters[i].event->auto_reset)
+ set_event(waiters[i].event);
spin_unlock(&waiters[i].event->lock);
}
@@ -911,7 +915,7 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result)
{
struct kfd_event_data __user *events =
@@ -920,7 +924,7 @@ int kfd_wait_on_events(struct kfd_process *p,
int ret = 0;
struct kfd_event_waiter *event_waiters = NULL;
- long timeout = user_timeout_to_jiffies(user_timeout_ms);
+ long timeout = user_timeout_to_jiffies(*user_timeout_ms);
event_waiters = alloc_event_waiters(num_events);
if (!event_waiters) {
@@ -970,15 +974,11 @@ int kfd_wait_on_events(struct kfd_process *p,
}
if (signal_pending(current)) {
- /*
- * This is wrong when a nonzero, non-infinite timeout
- * is specified. We need to use
- * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
- * contains a union with data for each user and it's
- * in generic kernel code that I don't want to
- * touch yet.
- */
ret = -ERESTARTSYS;
+ if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
+ *user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
+ *user_timeout_ms = jiffies_to_msecs(
+ max(0l, timeout-1));
break;
}
@@ -1019,7 +1019,7 @@ int kfd_wait_on_events(struct kfd_process *p,
event_waiters, events);
out_unlock:
- free_waiters(num_events, event_waiters);
+ free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
mutex_unlock(&p->event_mutex);
out:
if (ret)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 373e5bfd4e91..b059a77b6081 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -685,13 +685,15 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.vma = vma;
migrate.start = start;
migrate.end = end;
- migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
+ if (adev->gmc.xgmi.connected_to_cpu)
+ migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
+ else
+ migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
GFP_KERNEL);
-
if (!buf)
goto out;
@@ -974,7 +976,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
{
struct kfd_dev *kfddev = adev->kfd.dev;
struct dev_pagemap *pgmap;
- struct resource *res;
+ struct resource *res = NULL;
unsigned long size;
void *r;
@@ -989,28 +991,34 @@ int svm_migrate_init(struct amdgpu_device *adev)
* should remove reserved size
*/
size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
- res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
- if (IS_ERR(res))
- return -ENOMEM;
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ pgmap->range.start = adev->gmc.aper_base;
+ pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
+ pgmap->type = MEMORY_DEVICE_COHERENT;
+ } else {
+ res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
+ if (IS_ERR(res))
+ return -ENOMEM;
+ pgmap->range.start = res->start;
+ pgmap->range.end = res->end;
+ pgmap->type = MEMORY_DEVICE_PRIVATE;
+ }
- pgmap->type = MEMORY_DEVICE_PRIVATE;
pgmap->nr_range = 1;
- pgmap->range.start = res->start;
- pgmap->range.end = res->end;
pgmap->ops = &svm_migrate_pgmap_ops;
pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
- pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
-
+ pgmap->flags = 0;
/* Device manager releases device-specific resources, memory region and
* pgmap when driver disconnects from device.
*/
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
-
/* Disable SVM support capability */
pgmap->type = 0;
- devm_release_mem_region(adev->dev, res->start, resource_size(res));
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE)
+ devm_release_mem_region(adev->dev, res->start,
+ res->end - res->start + 1);
return PTR_ERR(r);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d03a3b9c9c5d..bf610e3b683b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1317,7 +1317,7 @@ void kfd_event_free_process(struct kfd_process *p);
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result);
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index a67ba8879a56..11074cc8c333 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -541,7 +541,6 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
kfree(svm_bo);
return -ESRCH;
}
- svm_bo->svms = prange->svms;
svm_bo->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
mm,
@@ -3273,7 +3272,6 @@ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
static void svm_range_evict_svm_bo_worker(struct work_struct *work)
{
struct svm_range_bo *svm_bo;
- struct kfd_process *p;
struct mm_struct *mm;
int r = 0;
@@ -3281,13 +3279,12 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
if (!svm_bo_ref_unless_zero(svm_bo))
return; /* svm_bo was freed while eviction was pending */
- /* svm_range_bo_release destroys this worker thread. So during
- * the lifetime of this thread, kfd_process and mm will be valid.
- */
- p = container_of(svm_bo->svms, struct kfd_process, svms);
- mm = p->mm;
- if (!mm)
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ mm = svm_bo->eviction_fence->mm;
+ } else {
+ svm_range_bo_unref(svm_bo);
return;
+ }
mmap_read_lock(mm);
spin_lock(&svm_bo->list_lock);
@@ -3305,8 +3302,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
- r = svm_migrate_vram_to_ram(prange,
- svm_bo->eviction_fence->mm,
+ r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_TTM_EVICTION);
} while (!r && prange->actual_loc && --retries);
@@ -3324,6 +3320,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
}
spin_unlock(&svm_bo->list_lock);
mmap_read_unlock(mm);
+ mmput(mm);
dma_fence_signal(&svm_bo->eviction_fence->base);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 9156b041ef17..cfac13ad06ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -46,7 +46,6 @@ struct svm_range_bo {
spinlock_t list_lock;
struct amdgpu_amdkfd_fence *eviction_fence;
struct work_struct eviction_work;
- struct svm_range_list *svms;
uint32_t evicting;
struct work_struct release_work;
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 25990bec600d..3f0a4a415907 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1392,8 +1392,8 @@ static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
{
+ struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
struct kfd_iolink_properties *props = NULL, *props2 = NULL;
- struct kfd_iolink_properties *gpu_link, *cpu_link;
struct kfd_topology_device *cpu_dev;
int ret = 0;
int i, num_cpu;
@@ -1416,16 +1416,19 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
continue;
/* find CPU <--> CPU links */
+ cpu_link = NULL;
cpu_dev = kfd_topology_device_by_proximity_domain(i);
if (cpu_dev) {
- list_for_each_entry(cpu_link,
+ list_for_each_entry(tmp_link,
&cpu_dev->io_link_props, list) {
- if (cpu_link->node_to == gpu_link->node_to)
+ if (tmp_link->node_to == gpu_link->node_to) {
+ cpu_link = tmp_link;
break;
+ }
}
}
- if (cpu_link->node_to != gpu_link->node_to)
+ if (!cpu_link)
return -ENOMEM;
/* CPU <--> CPU <--> GPU, GPU node*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8660d93cc405..5140d9c2bf3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3825,8 +3825,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
/* indicates support for immediate flip */
adev_to_drm(adev)->mode_config.async_page_flip = true;
@@ -4135,6 +4138,7 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
}
}
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
/*
* In this architecture, the association
@@ -4326,6 +4330,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
adev_to_drm(adev)->vblank_disable_immediate = false;
}
}
+ amdgpu_set_panel_orientation(&aconnector->base);
}
/* Software is initialized. Now we can register interrupt handlers. */
@@ -6684,6 +6689,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
return;
+ mutex_lock(&connector->dev->mode_config.mutex);
+ amdgpu_dm_connector_get_modes(connector);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!encoder)
return;
@@ -6728,8 +6737,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* restored here.
*/
amdgpu_dm_update_freesync_caps(connector, edid);
-
- amdgpu_set_panel_orientation(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index b841b8b0a9d8..987bde4dca3d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -34,6 +34,7 @@
#include "dal_asic_id.h"
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
+#include "amdgpu_dm_plane.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -149,12 +150,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_
*size += 1;
}
-bool modifier_has_dcc(uint64_t modifier)
+static bool modifier_has_dcc(uint64_t modifier)
{
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
}
-unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return 0;
@@ -660,7 +661,7 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
add_gfx11_modifiers(adev, mods, &size, &capacity);
break;
}
@@ -1412,7 +1413,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
}
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 95168c2cfa6f..286981a2dd40 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -36,17 +36,9 @@ int fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info);
-void get_min_max_dc_plane_scaling(struct drm_device *dev,
- struct drm_framebuffer *fb,
- int *min_downscale, int *max_upscale);
-
int dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state);
-bool modifier_has_dcc(uint64_t modifier);
-
-unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier);
-
int fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 6767fab55c26..352e9afb85c6 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -100,3 +100,24 @@ void convert_float_matrix(
matrix[i] = (uint16_t)reg_value;
}
}
+
+static uint32_t find_gcd(uint32_t a, uint32_t b)
+{
+ uint32_t remainder = 0;
+ while (b != 0) {
+ remainder = a % b;
+ a = b;
+ b = remainder;
+ }
+ return a;
+}
+
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den)
+{
+ uint32_t gcd = 0;
+
+ gcd = find_gcd(num, den);
+ *out_num = num / gcd;
+ *out_den = den / gcd;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
index ade785c4fdc7..81da4e6f7a1a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -38,6 +38,9 @@ void convert_float_matrix(
struct fixed31_32 *flt,
uint32_t buffer_size);
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den);
+
static inline unsigned int log_2(unsigned int num)
{
return ilog2(num);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c76091fd1f2..f276abb63bcd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -337,7 +337,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
}
- case AMDGPU_FAMILY_GC_11_0_2: {
+ case AMDGPU_FAMILY_GC_11_0_1: {
struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
@@ -397,7 +397,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
dcn32_clk_mgr_destroy(clk_mgr);
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dcn314_clk_mgr_destroy(clk_mgr);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 0202dc682682..ca6dfd2d7561 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -24,10 +24,9 @@
*/
#include "dccg.h"
-#include "clk_mgr_internal.h"
+#include "rn_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
-#include "rn_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dce100/dce_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
index 2e088c5171b2..f1319957e400 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -28,6 +28,7 @@
#include "clk_mgr.h"
#include "dm_pp_smu.h"
+#include "clk_mgr_internal.h"
extern struct wm_table ddr4_wm_table_gs;
extern struct wm_table lpddr4_wm_table_gs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index ee99974b3b62..beb025cd3dc2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -307,16 +307,6 @@ static void dcn314_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn314_smu_enable_pme_wa(clk_mgr);
}
-void dcn314_init_clocks(struct clk_mgr *clk_mgr)
-{
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
-
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
@@ -425,7 +415,7 @@ static struct wm_table lpddr5_wm_table = {
}
};
-static DpmClocks_t dummy_clocks;
+static DpmClocks314_t dummy_clocks;
static struct dcn314_watermarks dummy_wms = { 0 };
@@ -510,7 +500,7 @@ static void dcn314_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct dcn314_smu_dpm_clks *smu_dpm_clks)
{
- DpmClocks_t *table = smu_dpm_clks->dpm_clks;
+ DpmClocks314_t *table = smu_dpm_clks->dpm_clks;
if (!clk_mgr->smu_ver)
return;
@@ -527,6 +517,26 @@ static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
dcn314_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
+static inline bool is_valid_clock_value(uint32_t clock_value)
+{
+ return clock_value > 1 && clock_value < 100000;
+}
+
+static unsigned int convert_wck_ratio(uint8_t wck_ratio)
+{
+ switch (wck_ratio) {
+ case WCK_RATIO_1_2:
+ return 2;
+
+ case WCK_RATIO_1_4:
+ return 4;
+
+ default:
+ break;
+ }
+ return 1;
+}
+
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
@@ -540,89 +550,127 @@ static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
return max;
}
-static unsigned int find_clk_for_voltage(
- const DpmClocks_t *clock_table,
- const uint32_t clocks[],
- unsigned int voltage)
-{
- int i;
- int max_voltage = 0;
- int clock = 0;
-
- for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage) {
- return clocks[i];
- } else if (clock_table->SocVoltage[i] >= max_voltage &&
- clock_table->SocVoltage[i] < voltage) {
- max_voltage = clock_table->SocVoltage[i];
- clock = clocks[i];
- }
- }
-
- ASSERT(clock);
- return clock;
-}
-
static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
- const DpmClocks_t *clock_table)
+ const DpmClocks314_t *clock_table)
{
- int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
- uint32_t max_dispclk = 0, max_dppclk = 0;
-
- j = -1;
-
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
- /* Find lowest DPM, FCLK is filled in reverse order*/
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+ uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ int i;
- for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
- if (clock_table->DfPstateTable[i].FClk != 0) {
- j = i;
- break;
+ /* Find highest valid fclk pstate */
+ for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) &&
+ clock_table->DfPstateTable[i].FClk > max_fclk) {
+ max_fclk = clock_table->DfPstateTable[i].FClk;
+ max_pstate = i;
}
}
- if (j == -1) {
- /* clock table is all 0s, just use our own hardcode */
- ASSERT(0);
- return;
- }
-
- bw_params->clk_table.num_entries = j + 1;
+ /* We expect the table to contain at least one valid fclk entry. */
+ ASSERT(is_valid_clock_value(max_fclk));
- /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ /* Dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
} else {
+ /* Invalid number of entries in the table from PMFW. */
ASSERT(0);
}
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
- bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
- switch (clock_table->DfPstateTable[j].WckRatio) {
- case WCK_RATIO_1_2:
- bw_params->clk_table.entries[i].wck_ratio = 2;
- break;
- case WCK_RATIO_1_4:
- bw_params->clk_table.entries[i].wck_ratio = 4;
- break;
- default:
- bw_params->clk_table.entries[i].wck_ratio = 1;
+ /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
+ int j;
+
+ for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[j].FClk) &&
+ clock_table->DfPstateTable[j].FClk < min_fclk &&
+ clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
+ min_fclk = clock_table->DfPstateTable[j].FClk;
+ min_pstate = j;
+ }
}
- bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
- bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
+
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+ if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+ break;
+
+ bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[min_pstate].WckRatio);
+ };
+
+ /* Make sure to include at least one entry at highest pstate */
+ if (max_pstate != min_pstate || i == 0) {
+ if (i > MAX_NUM_DPM_LVL - 1)
+ i = MAX_NUM_DPM_LVL - 1;
+
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[max_pstate].WckRatio);
+ i++;
}
+ bw_params->clk_table.num_entries = i--;
+
+ /* Make sure all highest clocks are included*/
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+ ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ /*
+ * Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_mhz)
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ }
+ ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
- bw_params->num_channels = bios_info->ma_channel_number;
+ bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
@@ -641,7 +689,7 @@ static struct clk_mgr_funcs dcn314_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn314_update_clocks,
- .init_clocks = dcn314_init_clocks,
+ .init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn314_enable_pme_wa,
.are_clock_states_equal = dcn314_are_clock_states_equal,
.notify_wm_ranges = dcn314_notify_wm_ranges
@@ -681,10 +729,10 @@ void dcn314_clk_mgr_construct(
}
ASSERT(clk_mgr->smu_wm_set.wm_set);
- smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
+ smu_dpm_clks.dpm_clks = (DpmClocks314_t *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
- sizeof(DpmClocks_t),
+ sizeof(DpmClocks314_t),
&smu_dpm_clks.mc_address.quad_part);
if (smu_dpm_clks.dpm_clks == NULL) {
@@ -729,7 +777,7 @@ void dcn314_clk_mgr_construct(
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
dcn314_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index c695a4498c50..171f84340eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -42,7 +42,7 @@ struct clk_mgr_dcn314 {
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b);
-void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
index a7958dc96581..047d19ea919c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
@@ -36,6 +36,37 @@ typedef enum {
WCK_RATIO_MAX
} WCK_RATIO_e;
+typedef struct {
+ uint32_t FClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
+} DfPstateTable314_t;
+
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t NumDfPstatesEnabled;
+ uint8_t spare[3];
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks314_t;
+
struct dcn314_watermarks {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
@@ -43,7 +74,7 @@ struct dcn314_watermarks {
};
struct dcn314_smu_dpm_clks {
- DpmClocks_t *dpm_clks;
+ DpmClocks314_t *dpm_clks;
union large_integer mc_address;
};
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e42f44fc1c08..aeecca68dea7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1074,8 +1074,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
bool should_disable = true;
- bool pipe_split_change =
- context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
+ bool pipe_split_change = false;
+
+ if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
+ (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
+ else
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
for (j = 0; j < context->stream_count; j++) {
if (old_stream == context->streams[j]) {
@@ -3229,7 +3236,7 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_lock(stream->link)) {
@@ -3247,7 +3254,6 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
top_pipe_to_program->stream_res.tg);
}
- }
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
@@ -3466,7 +3472,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
@@ -3493,21 +3499,19 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
top_pipe_to_program->stream_res.tg);
}
- }
- if (update_type != UPDATE_TYPE_FAST) {
+ if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
- /* Since phantom pipe programming is moved to post_unlock_program_front_end,
- * move the SubVP lock to after the phantom pipes have been setup
- */
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- }
+ /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+ * move the SubVP lock to after the phantom pipes have been setup
+ */
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
}
// Fire manual trigger only when bottom plane is flipped
@@ -4292,7 +4296,7 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2 &&
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
@@ -4340,6 +4344,7 @@ void dc_enable_dmub_outbox(struct dc *dc)
struct dc_context *dc_ctx = dc->ctx;
dmub_enable_outbox_notification(dc_ctx->dmub_srv);
+ DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9e51338441d0..66d2ae7aacf5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3372,7 +3372,7 @@ bool dc_link_setup_psr(struct dc_link *link,
switch(link->ctx->asic_id.chip_family) {
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
if(!dc->debug.disable_z10)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ffc0f1c0ea93..7dbab15bfa68 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -169,7 +169,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_21;
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dc_version = DCN_VERSION_3_14;
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8e1e40083ec8..5908b60db313 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.196"
+#define DC_VER "3.2.198"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -213,6 +213,7 @@ struct dc_caps {
uint32_t cache_num_ways;
uint16_t subvp_fw_processing_delay_us;
uint16_t subvp_prefetch_end_to_mall_start_us;
+ uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height
uint16_t subvp_pstate_allow_width_us;
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
@@ -352,6 +353,7 @@ struct dc_config {
bool use_pipe_ctx_sync_logic;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
+ bool use_default_clock_table;
};
enum visual_confirm {
@@ -609,6 +611,7 @@ struct dc_bounding_box_overrides {
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
int dummy_clock_change_latency_ns;
+ int fclk_clock_change_latency_ns;
/* This forces a hard min on the DCFCLK we use
* for DML. Unlike the debug option for forcing
* DCFCLK, this override affects watermark calculations
@@ -751,6 +754,7 @@ struct dc_debug_options {
uint32_t mst_start_top_delay;
uint8_t psr_power_use_phy_fsm;
enum dml_hostvm_override_opts dml_hostvm_override;
+ bool dml_disallow_alternate_prefetch_modes;
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool enable_single_display_2to1_odm_policy;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 2d61c2a91cee..09b304507bad 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -29,6 +29,7 @@
#include "dm_helpers.h"
#include "dc_hw_types.h"
#include "core_types.h"
+#include "../basics/conversion.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -275,8 +276,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
union dmub_rb_cmd cmd = { 0 };
cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
- // TODO: Uncomment once FW headers are promoted
- //cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
+ cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
@@ -601,6 +601,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ uint32_t out_num, out_den;
pipe_data->mode = SUBVP;
pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
@@ -612,6 +613,16 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param;
+
+ /* Calculate the scaling factor from the src and dst height.
+ * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
+ * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
+ */
+ reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, &out_num, &out_den);
+ // TODO: Uncomment below lines once DMCUB include headers are promoted
+ //pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
+ //pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
// Prefetch lines is equal to VACTIVE + BP + VSYNC
pipe_data->pipe_config.subvp_data.prefetch_lines =
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index a0af0f6afeef..9544abf75e84 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -344,6 +344,7 @@ enum dc_detect_reason {
DETECT_REASON_HPDRX,
DETECT_REASON_FALLBACK,
DETECT_REASON_RETRAIN,
+ DETECT_REASON_TDR,
};
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 213de8cabfad..165392380842 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -543,9 +543,11 @@ static void dce112_get_pix_clk_dividers_helper (
switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_121212:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_161616:
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index d4a6504dfe00..db7ca4b0cdb9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -361,8 +361,6 @@ void dpp1_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index b54c12400323..564e061ccb58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -278,9 +278,6 @@ void hubp1_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index bed783747f16..5b5d952b2b8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -110,6 +110,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream ||
+ !pipe_ctx->plane_state ||
!tg->funcs->is_tg_enabled(tg))
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 769974375b4b..8e9384094f6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -131,6 +131,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e1a9a45b03b6..3fc300cd1ce9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -465,6 +465,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
OTG_CLOCK_ON, 1,
1, 1000);
} else {
+
+ //last chance to clear underflow, otherwise, it will always there due to clock is off.
+ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
+ optc->funcs->clear_optc_underflow(optc);
+
REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index ea1f14af0db7..eaa7032f0f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -166,8 +166,6 @@ static void dpp2_cnv_setup (
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 936af65381ef..9570c2118ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -463,9 +463,6 @@ void hubp2_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 3d307dd58e9a..116f67a0b989 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -531,6 +531,12 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index c5e200d09038..5752271f22df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,9 +67,15 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active;
+ uint32_t riommu_active, prefetch_done;
int i;
+ REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
+
+ if (prefetch_done) {
+ hubbub->riommu_active = true;
+ return;
+ }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 77b00f86c216..4a668d6563df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -244,8 +244,6 @@ void dpp3_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 6a4dcafb9bba..dc3e8df706b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
VMID, address->vmid);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 0a67f8a5656d..d97076648acb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -372,7 +372,7 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGE) {
+ if (eng_id <= ENGINE_ID_DIGB) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
index 7c77c71591a0..82c3b3ac1f0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
@@ -162,7 +162,8 @@
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\
- SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh)
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL, HBLANK_MINIMUM_SYMBOL_WIDTH, mask_sh)
#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 468a893ff785..aedff18aff56 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -2153,7 +2153,7 @@ static bool dcn31_resource_construct(
pool->base.usb4_dpia_count = 4;
}
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2)
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1)
pool->base.usb4_dpia_count = 4;
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 41f8ec99da6b..901436591ed4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn31_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_1_ip;
-extern struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc;
struct dcn31_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
index e3b5a95e03b1..702c28c2560e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
@@ -13,31 +13,6 @@
DCN314 = dcn314_resource.o dcn314_hwseq.o dcn314_init.o \
dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN314)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 755c715ad8dc..39931d48f385 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -343,7 +343,10 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ bool two_pix_per_container = false;
+ two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
if (is_dp_128b_132b_signal(pipe_ctx)) {
@@ -355,16 +358,13 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
else
*k2_div = PIXEL_RATE_DIV_BY_4;
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
- } else if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- *k1_div = PIXEL_RATE_DIV_BY_2;
- *k2_div = PIXEL_RATE_DIV_BY_2;
} else {
- if (odm_combine_factor == 1)
- *k2_div = PIXEL_RATE_DIV_BY_4;
- else if (odm_combine_factor == 2)
+ *k1_div = PIXEL_RATE_DIV_BY_1;
+ *k2_div = PIXEL_RATE_DIV_BY_4;
+ if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -374,3 +374,31 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
return odm_combine_factor;
}
+
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t pix_per_cycle = 1;
+ uint32_t odm_combine_factor = 1;
+
+ if (!pipe_ctx || !pipe_ctx->stream || !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1
+ || dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ pix_per_cycle = 2;
+
+ if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
+ pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
+ pix_per_cycle);
+}
+
+bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return true;
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index be0f5e4d48e1..d014580592ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -39,4 +39,8 @@ void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+
+bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index b9debeb081fd..fcf67eb3478f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -145,6 +145,8 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
+ .set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
+ .is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 63861cdfb09f..3a9e3870b3a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -70,6 +70,7 @@
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn314/dcn314_fpu.h"
#include "dcn314/dcn314_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -132,155 +133,6 @@ static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C
#define DC_LOGGER_INIT(logger)
-#define DCN3_14_DEFAULT_DET_SIZE 384
-#define DCN3_14_MAX_DET_SIZE 384
-#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
-#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_14_ip = {
- .VBlankNomDefaultUS = 668,
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
- .config_return_buffer_size_in_kbytes = 1792,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 4,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 48,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 8,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 600.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 442.0,
- .sr_enter_plus_exit_z8_time_us = 560.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.5,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -1402,7 +1254,7 @@ static struct stream_encoder *dcn314_stream_encoder_create(
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGF) {
+ if (eng_id < ENGINE_ID_DIGF) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
@@ -1447,7 +1299,8 @@ static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
* VPG[8] -> HPO_DP[2]
* VPG[9] -> HPO_DP[3]
*/
- vpg_inst = hpo_dp_inst + 6;
+ //Uses offset index 5-8, but actually maps to vpg_inst 6-9
+ vpg_inst = hpo_dp_inst + 5;
/* Mapping of APG register blocks to HPO DP block instance:
* APG[0] -> HPO_DP[0]
@@ -1793,109 +1646,16 @@ static struct clock_source *dcn31_clock_source_create(
return NULL;
}
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
- int i, pipe_cnt;
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
- bool upscaled = false;
-
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing;
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
- && pipe->stream->adjust.v_total_min > timing->v_total)
- pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
-
- if (pipe->plane_state &&
- (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
- pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
- upscaled = true;
-
- /*
- * Immediate flip can be set dynamically after enabling the plane.
- * We need to require support for immediate flip or underflow can be
- * intermittently experienced depending on peak b/w requirements.
- */
- pipes[pipe_cnt].pipe.src.immediate_flip = true;
-
- pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
- pipes[pipe_cnt].pipe.src.gpuvm = true;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
- pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.src.dcc_rate = 3;
- pipes[pipe_cnt].dout.dsc_input_bpc = 0;
-
- if (pipes[pipe_cnt].dout.dsc_enable) {
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_888:
- pipes[pipe_cnt].dout.dsc_input_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- pipes[pipe_cnt].dout.dsc_input_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
-
- pipe_cnt++;
- }
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
-
- dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
- /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- pipes[0].pipe.src.unbounded_req_mode = true;
- }
- } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
- && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
- } else if (context->stream_count >= 3 && upscaled) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe->stream)
- continue;
+ int pipe_cnt;
- if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
- pipe->stream->apply_seamless_boot_optimization) {
-
- if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
- context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
- break;
- }
- }
- }
+ DC_FP_START();
+ pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+ DC_FP_END();
return pipe_cnt;
}
@@ -1906,88 +1666,9 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st *clock_tmp = dcn3_14_soc._clock_tmp;
- unsigned int i, closest_clk_lvl;
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
- dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
-
- if (bw_params->num_channels > 0)
- dcn3_14_soc.num_chans = bw_params->num_channels;
-
- ASSERT(dcn3_14_soc.num_chans);
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_14_soc.num_states - 1;
- }
-
- clock_tmp[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- clock_tmp[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- clock_tmp[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
-
- if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
- clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_tmp[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_tmp[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_tmp[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_tmp[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_tmp[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_14_soc.clock_limits[i] = clock_tmp[i];
- if (clk_table->num_entries)
- dcn3_14_soc.num_states = clk_table->num_entries;
- }
-
- if (max_dispclk_mhz) {
- dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- }
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+ DC_FP_START();
+ dcn314_update_bw_bounding_box_fpu(dc, bw_params);
+ DC_FP_END();
}
static struct resource_funcs dcn314_res_pool_funcs = {
@@ -2069,6 +1750,7 @@ static bool dcn314_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
index c41108847ce0..0dd3153aa5c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
@@ -29,6 +29,9 @@
#include "core_types.h"
+extern struct _vcs_dpi_ip_params_st dcn3_14_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc;
+
#define TO_DCN314_RES_POOL(pool)\
container_of(pool, struct dcn314_resource_pool, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
index 39929fa67a51..22849eaa6f24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn315_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_15_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_15_soc;
struct dcn315_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
index 0dc5a6c13ae7..aba6d634131b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn316_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_16_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_16_soc;
struct dcn316_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index d38341f68b17..ebd3945c71f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -250,6 +250,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
uint32_t total_lines = 0;
uint32_t lines_per_way = 0;
uint32_t num_ways = 0;
+ uint32_t prev_addr_low = 0;
for (i = 0; i < ctx->stream_count; i++) {
stream = ctx->streams[i];
@@ -267,10 +268,20 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
plane = ctx->stream_status[i].plane_states[j];
// Calculate total surface size
- surface_size = plane->plane_size.surface_pitch *
+ if (prev_addr_low != plane->address.grph.addr.u.low_part) {
+ /* if plane address are different from prev FB, then userspace allocated separate FBs*/
+ surface_size += plane->plane_size.surface_pitch *
plane->plane_size.surface_size.height *
(plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ prev_addr_low = plane->address.grph.addr.u.low_part;
+ } else {
+ /* We have the same fb for all the planes.
+ * Xorg always creates one giant fb that holds all surfaces,
+ * so allocating it once is sufficient.
+ * */
+ continue;
+ }
// Convert surface size + starting address to number of cache lines required
// (alignment accounted for)
cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
@@ -320,7 +331,10 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
- uint8_t ways;
+ uint8_t ways, i;
+ int j;
+ bool stereo_in_use = false;
+ struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
return false;
@@ -349,7 +363,23 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
* and configure HUBP's to fetch from MALL
*/
ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
- if (ways <= dc->caps.cache_num_ways) {
+
+ /* MALL not supported with Stereo3D. If any plane is using stereo,
+ * don't try to enter MALL.
+ */
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+ plane = dc->current_state->stream_status[i].plane_states[j];
+
+ if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO) {
+ stereo_in_use = true;
+ break;
+ }
+ }
+ if (stereo_in_use)
+ break;
+ }
+ if (ways <= dc->caps.cache_num_ways && !stereo_in_use) {
memset(&cmd, 0, sizeof(cmd));
cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
@@ -683,9 +713,11 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
} else {
+ // MALL not supported with Stereo3D
hubp->funcs->hubp_update_mall_sel(hubp,
num_ways <= dc->caps.cache_num_ways &&
- pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED ? 2 : 0,
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO ? 2 : 0,
cache_cursor);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index eff1f4e17689..1fad7b48bd5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -281,7 +281,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
- .set_drr = optc31_set_drr, // TODO: Update to optc32_set_drr once FW headers are promoted
+ .set_drr = optc32_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 9a26d24b579f..8b887b552f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -867,7 +867,7 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
@@ -2051,6 +2051,7 @@ static bool dcn32_resource_construct(
dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
dc->caps.subvp_vertical_int_margin_us = 30;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index b3f8503cea9c..955f52e6064d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -63,7 +63,7 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mall_region_pixels = pipe->stream->timing.h_addressable * pipe->stream->timing.v_addressable;
+ mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable;
// For bytes required in MALL, calculate based on number of MBlks required
num_mblks = (mall_region_pixels * bytes_per_pixel +
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 8157e40d2c7e..c8b7d6ff38f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -868,7 +868,7 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
@@ -1662,8 +1662,9 @@ static bool dcn321_resource_construct(
dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
-
+ dc->caps.subvp_vertical_int_margin_us = 30;
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 359f6e9a1da0..86a3b5bfd699 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -61,7 +61,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
@@ -71,6 +70,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
@@ -82,7 +82,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare
@@ -131,6 +130,7 @@ DML += dcn321/dcn321_fpu.o
DML += dcn301/dcn301_fpu.o
DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
+DML += dcn314/dcn314_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index ca44df4fca74..d34e0f1314d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,6 +30,7 @@
#include "dchubbub.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
+#include "clk_mgr/dcn21/rn_clk_mgr.h"
#include "dcn20_fpu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 7ef66e511ec8..d211cf6d234c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -26,6 +26,7 @@
#include "clk_mgr.h"
#include "dcn20/dcn20_resource.h"
#include "dcn301/dcn301_resource.h"
+#include "clk_mgr/dcn301/vg_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn301_fpu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index e36cfa5985ea..149a1b17cdf3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -25,6 +25,9 @@
#include "resource.h"
#include "clk_mgr.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn315/dcn315_resource.h"
+#include "dcn316/dcn316_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn31_fpu.h"
@@ -114,7 +117,7 @@ struct _vcs_dpi_ip_params_st dcn3_1_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
@@ -259,7 +262,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 50.0,
@@ -355,7 +358,7 @@ struct _vcs_dpi_ip_params_st dcn3_16_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 3fab19134480..d63b4209b14c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -26,7 +26,7 @@
#include "dc.h"
#include "dc_link.h"
#include "../display_mode_lib.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
#include "display_mode_vba_31.h"
#include "../dml_inline_defs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 66b82e4f05c6..35d10b4d018b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -27,7 +27,7 @@
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_31.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
static bool is_dual_plane(enum source_format_class source_format)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
new file mode 100644
index 000000000000..34a5d0f87b5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "clk_mgr.h"
+#include "resource.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn314_fpu.h"
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dml/display_mode_vba.h"
+
+struct _vcs_dpi_ip_params_st dcn3_14_ip = {
+ .VBlankNomDefaultUS = 668,
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st *clock_limits =
+ dcn3_14_soc.clock_limits;
+ unsigned int i, closest_clk_lvl;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) {
+
+ dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
+
+ if (bw_params->num_channels > 0)
+ dcn3_14_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(dcn3_14_soc.num_chans);
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_14_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+
+ if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_14_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_14_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ if (max_dispclk_mhz) {
+ dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
+ else
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+ bool upscaled = false;
+
+ dc_assert_fp_enabled();
+
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
+ && pipe->stream->adjust.v_total_min > timing->v_total)
+ pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+ pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
+
+ dc->config.enable_4to1MPC = false;
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+ /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
+ && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
+ pipe->stream->apply_seamless_boot_optimization) {
+
+ if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
+ context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
+ break;
+ }
+ }
+ }
+
+ return pipe_cnt;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
new file mode 100644
index 000000000000..d32c5bb99f4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN314_FPU_H__
+#define __DCN314_FPU_H__
+
+#define DCN3_14_DEFAULT_DET_SIZE 384
+#define DCN3_14_MAX_DET_SIZE 384
+#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 66453546e24f..8118cfc5b405 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -473,8 +473,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
// DML calculation for MALL region doesn't take into account FW delay
// and required pstate allow width for multi-display cases
+ /* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
+ * to 2 swaths (i.e. 16 lines)
+ */
phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
- pstate_width_fw_delay_lines;
+ pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
// For backporch of phantom pipe, use vstartup of the main pipe
phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -490,6 +493,7 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
phantom_stream->timing.v_front_porch +
phantom_stream->timing.v_sync_width +
phantom_bp;
+ phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
}
/**
@@ -983,9 +987,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
* prefetch mode > 0 so try capping the prefetch mode to start.
+ * Override present for testing.
*/
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ if (dc->debug.dml_disallow_alternate_prefetch_modes)
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter;
+ else
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
if (*vlevel < context->bw_ctx.dml.soc.num_states)
@@ -1014,7 +1024,9 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched
* enough to support MCLK switching.
*/
- if (*vlevel == context->bw_ctx.dml.soc.num_states) {
+ if (*vlevel == context->bw_ctx.dml.soc.num_states &&
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==
+ dm_prefetch_support_uclk_fclk_and_stutter) {
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_stutter;
/* There are params (such as FabricClock) that need to be recalculated
@@ -1344,7 +1356,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
+ int pipe_cnt, i, pipe_idx;
+ int vlevel = context->bw_ctx.dml.soc.num_states;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
dc_assert_fp_enabled();
@@ -1373,17 +1386,22 @@ bool dcn32_internal_validate_bw(struct dc *dc,
DC_FP_END();
}
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ if (fast_validate ||
+ (dc->debug.dml_disallow_alternate_prefetch_modes &&
+ (vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
/*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
+ * If dml_disallow_alternate_prefetch_modes is false, then we have already
+ * tried alternate prefetch modes during full validation.
+ *
+ * If mode is unsupported or there is no p-state support, then
+ * fall back to favouring voltage.
*
- * If Prefetch mode 0 failed for this config, or passed with Max UCLK, try if
- * supported with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
+ * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
+ * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
*/
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_fclk_and_stutter;
+ dm_prefetch_support_fclk_and_stutter;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
@@ -2098,6 +2116,13 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_2_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 890612db08dc..cb2025771646 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -221,7 +221,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
// VBA_DELTA
// Calculate DET size, swath height
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -461,7 +460,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters,
v->SurfaceSizeInMALL,
@@ -757,9 +755,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
- v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
- v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
+ v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
mode_lib->vba.DPPCLKDelaySCL,
@@ -1167,7 +1163,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
mode_lib->vba.USRRetrainingRequiredFinal,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
@@ -1952,7 +1947,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2549,7 +2543,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2749,7 +2742,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters,
mode_lib->vba.SurfaceSizeInMALL,
@@ -3266,7 +3258,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NoTimeForPrefetch[i][j][k] =
dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
mode_lib->vba.DSCDelayPerState[i][k],
@@ -3566,7 +3557,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
mode_lib->vba.USRRetrainingRequiredFinal,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.PrefetchModePerState[i][j],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 07f8f3b8626b..05fc14a47fba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -391,7 +391,6 @@ void dml32_CalculateBytePerPixelAndBlockSizes(
} // CalculateBytePerPixelAndBlockSizes
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -456,10 +455,18 @@ void dml32_CalculateSwathAndDETConfiguration(
bool ViewportSizeSupportPerSurface[],
bool *ViewportSizeSupport)
{
+ unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
+ unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpSwathSizeBytesY;
+ unsigned int RoundedUpSwathSizeBytesC;
+ double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
+ double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
unsigned int k;
-
- st_vars->TotalActiveDPP = 0;
- st_vars->NoChromaSurfaces = true;
+ unsigned int TotalActiveDPP = 0;
+ bool NoChromaSurfaces = true;
+ unsigned int DETBufferSizeInKByteForSwathCalculation;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -494,43 +501,43 @@ void dml32_CalculateSwathAndDETConfiguration(
DPPPerSurface,
/* Output */
- st_vars->SwathWidthdoubleDPP,
- st_vars->SwathWidthdoubleDPPChroma,
+ SwathWidthdoubleDPP,
+ SwathWidthdoubleDPPChroma,
SwathWidth,
SwathWidthChroma,
- st_vars->MaximumSwathHeightY,
- st_vars->MaximumSwathHeightC,
+ MaximumSwathHeightY,
+ MaximumSwathHeightC,
swath_width_luma_ub,
swath_width_chroma_ub);
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * st_vars->MaximumSwathHeightY[k];
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * st_vars->MaximumSwathHeightC[k];
+ RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * MaximumSwathHeightY[k];
+ RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MaximumSwathHeightC[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DPPPerSurface = %d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%0d swath_width_luma_ub = %d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETY = %f\n", __func__, k, BytePerPixDETY[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, st_vars->MaximumSwathHeightY[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, MaximumSwathHeightY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d swath_width_chroma_ub = %d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETC = %f\n", __func__, k, BytePerPixDETC[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, st_vars->MaximumSwathHeightC[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, MaximumSwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesC[k]);
+ RoundedUpMaxSwathSizeBytesC[k]);
#endif
if (SourcePixelFormat[k] == dm_420_10) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesY[k], 256);
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesC[k], 256);
+ RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesY[k], 256);
+ RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesC[k], 256);
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalActiveDPP = st_vars->TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
+ TotalActiveDPP = TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 ||
SourcePixelFormat[k] == dm_420_12 || SourcePixelFormat[k] == dm_rgbe_alpha) {
- st_vars->NoChromaSurfaces = false;
+ NoChromaSurfaces = false;
}
}
@@ -540,10 +547,10 @@ void dml32_CalculateSwathAndDETConfiguration(
// if unbounded req is enabled, program reserved space such that the ROB will not hold more than 8 swaths worth of data
// - assume worst-case compression rate of 4. [ROB size - 8 * swath_size / max_compression ratio]
// - assume for "narrow" vp case in which the ROB can fit 8 swaths, the DET should be big enough to do full size req
- *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (st_vars->RoundedUpMaxSwathSizeBytesY[0]/512);
+ *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (RoundedUpMaxSwathSizeBytesY[0]/512);
if (*CompBufReservedSpaceNeedAdjustment == 1) {
- *CompBufReservedSpaceKBytes = ROBSizeKBytes - st_vars->RoundedUpMaxSwathSizeBytesY[0]/512;
+ *CompBufReservedSpaceKBytes = ROBSizeKBytes - RoundedUpMaxSwathSizeBytesY[0]/512;
}
#ifdef __DML_VBA_DEBUG__
@@ -551,7 +558,7 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: CompBufReservedSpaceNeedAdjustment = %d\n", __func__, *CompBufReservedSpaceNeedAdjustment);
#endif
- *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, st_vars->TotalActiveDPP, st_vars->NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
+ *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, TotalActiveDPP, NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
dml32_CalculateDETBufferSize(DETSizeOverride,
UseMALLForPStateChange,
@@ -566,8 +573,8 @@ void dml32_CalculateSwathAndDETConfiguration(
SourcePixelFormat,
ReadBandwidthLuma,
ReadBandwidthChroma,
- st_vars->RoundedUpMaxSwathSizeBytesY,
- st_vars->RoundedUpMaxSwathSizeBytesC,
+ RoundedUpMaxSwathSizeBytesY,
+ RoundedUpMaxSwathSizeBytesC,
DPPPerSurface,
/* Output */
@@ -575,7 +582,7 @@ void dml32_CalculateSwathAndDETConfiguration(
CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, st_vars->TotalActiveDPP);
+ dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, TotalActiveDPP);
dml_print("DML::%s: nomDETInKByte = %d\n", __func__, nomDETInKByte);
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: UseUnboundedRequestingFinal = %d\n", __func__, UseUnboundedRequestingFinal);
@@ -586,42 +593,42 @@ void dml32_CalculateSwathAndDETConfiguration(
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
+ DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
dm_use_mall_pstate_change_phantom_pipe ? 1024 : DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DETBufferSizeInKByteForSwathCalculation = %d\n", __func__, k,
- st_vars->DETBufferSizeInKByteForSwathCalculation);
+ DETBufferSizeInKByteForSwathCalculation);
#endif
- if (st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] < 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ if (RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] < 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] / 2 <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
} else {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
}
- if ((st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 >
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
+ if ((RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] / 2 >
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
|| SwathWidth[k] > MaximumSwathWidthLuma[k] || (SwathHeightC[k] > 0 &&
SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
*ViewportSizeSupport = false;
@@ -636,7 +643,7 @@ void dml32_CalculateSwathAndDETConfiguration(
#endif
DETBufferSizeY[k] = DETBufferSizeInKByte[k] * 1024;
DETBufferSizeC[k] = 0;
- } else if (st_vars->RoundedUpSwathSizeBytesY <= 1.5 * st_vars->RoundedUpSwathSizeBytesC) {
+ } else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d Half DET for plane0, half for plane1\n", __func__, k);
#endif
@@ -654,11 +661,11 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: k=%0d SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
dml_print("DML::%s: k=%0d SwathHeightC = %d\n", __func__, k, SwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ k, RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesC[k]);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesY);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesC);
+ k, RoundedUpMaxSwathSizeBytesC[k]);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, RoundedUpSwathSizeBytesY);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, RoundedUpSwathSizeBytesC);
dml_print("DML::%s: k=%0d DETBufferSizeInKByte = %d\n", __func__, k, DETBufferSizeInKByte[k]);
dml_print("DML::%s: k=%0d DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%0d DETBufferSizeC = %d\n", __func__, k, DETBufferSizeC[k]);
@@ -1867,7 +1874,6 @@ void dml32_CalculateSurfaceSizeInMall(
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -1933,6 +1939,21 @@ void dml32_CalculateVMRowAndSwath(
unsigned int BIGK_FRAGMENT_SIZE[])
{
unsigned int k;
+ unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
+ unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
+ unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
+ bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (HostVMEnable == true) {
@@ -1954,15 +1975,15 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].SourcePixelFormat == dm_rgbe_alpha) {
if ((myPipe[k].SourcePixelFormat == dm_420_10 || myPipe[k].SourcePixelFormat == dm_420_12) &&
!IsVertical(myPipe[k].SourceRotation)) {
- st_vars->PTEBufferSizeInRequestsForLuma[k] =
+ PTEBufferSizeInRequestsForLuma[k] =
(PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma) / 2;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = st_vars->PTEBufferSizeInRequestsForLuma[k];
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsForLuma[k];
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
}
- st_vars->PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -1982,21 +2003,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForChroma[k],
+ PTEBufferSizeInRequestsForChroma[k],
myPipe[k].PitchC,
myPipe[k].DCCMetaPitchC,
myPipe[k].BlockWidthC,
myPipe[k].BlockHeightC,
/* Output */
- &st_vars->MetaRowByteC[k],
- &st_vars->PixelPTEBytesPerRowC[k],
+ &MetaRowByteC[k],
+ &PixelPTEBytesPerRowC[k],
&dpte_row_width_chroma_ub[k],
&dpte_row_height_chroma[k],
&dpte_row_height_linear_chroma[k],
- &st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k],
- &st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_chroma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowC_one_row_per_frame[k],
+ &dpte_row_width_chroma_ub_one_row_per_frame[k],
+ &dpte_row_height_chroma_one_row_per_frame[k],
&meta_req_width_chroma[k],
&meta_req_height_chroma[k],
&meta_row_width_chroma[k],
@@ -2024,19 +2045,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillC[k],
&MaxNumSwathC[k]);
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = 0;
- st_vars->PixelPTEBytesPerRowC[k] = 0;
- st_vars->PDEAndMetaPTEBytesFrameC = 0;
- st_vars->MetaRowByteC[k] = 0;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForChroma[k] = 0;
+ PixelPTEBytesPerRowC[k] = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC[k] = 0;
MaxNumSwathC[k] = 0;
PrefetchSourceLinesC[k] = 0;
- st_vars->dpte_row_height_chroma_one_row_per_frame[k] = 0;
- st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
+ dpte_row_height_chroma_one_row_per_frame[k] = 0;
+ dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
+ PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
}
- st_vars->PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -2056,21 +2077,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForLuma[k],
+ PTEBufferSizeInRequestsForLuma[k],
myPipe[k].PitchY,
myPipe[k].DCCMetaPitchY,
myPipe[k].BlockWidthY,
myPipe[k].BlockHeightY,
/* Output */
- &st_vars->MetaRowByteY[k],
- &st_vars->PixelPTEBytesPerRowY[k],
+ &MetaRowByteY[k],
+ &PixelPTEBytesPerRowY[k],
&dpte_row_width_luma_ub[k],
&dpte_row_height_luma[k],
&dpte_row_height_linear_luma[k],
- &st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k],
- &st_vars->dpte_row_width_luma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_luma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowY_one_row_per_frame[k],
+ &dpte_row_width_luma_ub_one_row_per_frame[k],
+ &dpte_row_height_luma_one_row_per_frame[k],
&meta_req_width[k],
&meta_req_height[k],
&meta_row_width[k],
@@ -2098,19 +2119,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillY[k],
&MaxNumSwathY[k]);
- PDEAndMetaPTEBytesFrame[k] = st_vars->PDEAndMetaPTEBytesFrameY + st_vars->PDEAndMetaPTEBytesFrameC;
- MetaRowByte[k] = st_vars->MetaRowByteY[k] + st_vars->MetaRowByteC[k];
+ PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC;
+ MetaRowByte[k] = MetaRowByteY[k] + MetaRowByteC[k];
- if (st_vars->PixelPTEBytesPerRowY[k] <= 64 * st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC[k] <= 64 * st_vars->PTEBufferSizeInRequestsForChroma[k]) {
+ if (PixelPTEBytesPerRowY[k] <= 64 * PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC[k] <= 64 * PTEBufferSizeInRequestsForChroma[k]) {
PTEBufferSizeNotExceeded[k] = true;
} else {
PTEBufferSizeNotExceeded[k] = false;
}
- st_vars->one_row_per_frame_fits_in_buffer[k] = (st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
- st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * st_vars->PTEBufferSizeInRequestsForChroma[k]);
+ one_row_per_frame_fits_in_buffer[k] = (PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
+ PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * PTEBufferSizeInRequestsForChroma[k]);
}
dml32_CalculateMALLUseForStaticScreen(
@@ -2118,7 +2139,7 @@ void dml32_CalculateVMRowAndSwath(
MALLAllocatedForDCN,
UseMALLForStaticScreen, // mode
SurfaceSizeInMALL,
- st_vars->one_row_per_frame_fits_in_buffer,
+ one_row_per_frame_fits_in_buffer,
/* Output */
UsesMALLForStaticScreen); // boolen
@@ -2144,13 +2165,13 @@ void dml32_CalculateVMRowAndSwath(
!(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame);
if (use_one_row_for_frame[k]) {
- dpte_row_height_luma[k] = st_vars->dpte_row_height_luma_one_row_per_frame[k];
- dpte_row_width_luma_ub[k] = st_vars->dpte_row_width_luma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowY[k] = st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k];
- dpte_row_height_chroma[k] = st_vars->dpte_row_height_chroma_one_row_per_frame[k];
- dpte_row_width_chroma_ub[k] = st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowC[k] = st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k];
- PTEBufferSizeNotExceeded[k] = st_vars->one_row_per_frame_fits_in_buffer[k];
+ dpte_row_height_luma[k] = dpte_row_height_luma_one_row_per_frame[k];
+ dpte_row_width_luma_ub[k] = dpte_row_width_luma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowY[k] = PixelPTEBytesPerRowY_one_row_per_frame[k];
+ dpte_row_height_chroma[k] = dpte_row_height_chroma_one_row_per_frame[k];
+ dpte_row_width_chroma_ub[k] = dpte_row_width_chroma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowC[k] = PixelPTEBytesPerRowC_one_row_per_frame[k];
+ PTEBufferSizeNotExceeded[k] = one_row_per_frame_fits_in_buffer[k];
}
if (MetaRowByte[k] <= DCCMetaBufferSizeBytes)
@@ -2158,7 +2179,7 @@ void dml32_CalculateVMRowAndSwath(
else
DCCMetaBufferSizeNotExceeded[k] = false;
- PixelPTEBytesPerRow[k] = st_vars->PixelPTEBytesPerRowY[k] + st_vars->PixelPTEBytesPerRowC[k];
+ PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY[k] + PixelPTEBytesPerRowC[k];
if (use_one_row_for_frame[k])
PixelPTEBytesPerRow[k] = PixelPTEBytesPerRow[k] / 2;
@@ -2169,11 +2190,11 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].VRatioChroma,
myPipe[k].DCCEnable,
myPipe[k].HTotal / myPipe[k].PixelClock,
- st_vars->MetaRowByteY[k], st_vars->MetaRowByteC[k],
+ MetaRowByteY[k], MetaRowByteC[k],
meta_row_height[k],
meta_row_height_chroma[k],
- st_vars->PixelPTEBytesPerRowY[k],
- st_vars->PixelPTEBytesPerRowC[k],
+ PixelPTEBytesPerRowY[k],
+ PixelPTEBytesPerRowC[k],
dpte_row_height_luma[k],
dpte_row_height_chroma[k],
@@ -2189,12 +2210,12 @@ void dml32_CalculateVMRowAndSwath(
dml_print("DML::%s: k=%d, dpte_row_height_luma = %d\n", __func__, k, dpte_row_height_luma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_luma_ub = %d\n",
__func__, k, dpte_row_width_luma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowY[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, PixelPTEBytesPerRowY[k]);
dml_print("DML::%s: k=%d, dpte_row_height_chroma = %d\n",
__func__, k, dpte_row_height_chroma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_chroma_ub = %d\n",
__func__, k, dpte_row_width_chroma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowC[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, PixelPTEBytesPerRowC[k]);
dml_print("DML::%s: k=%d, PixelPTEBytesPerRow = %d\n", __func__, k, PixelPTEBytesPerRow[k]);
dml_print("DML::%s: k=%d, PTEBufferSizeNotExceeded = %d\n",
__func__, k, PTEBufferSizeNotExceeded[k]);
@@ -3342,7 +3363,6 @@ double dml32_CalculateExtraLatency(
} // CalculateExtraLatency
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
@@ -3406,18 +3426,45 @@ bool dml32_CalculatePrefetchSchedule(
double *VReadyOffsetPix)
{
bool MyError = false;
-
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
- st_vars->max_vratio_pre = __DML_MAX_VRATIO_PRE__;
- st_vars->Tsw_est1 = 0;
- st_vars->Tsw_est3 = 0;
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DSTTotalPixelsAfterScaler;
+ double LineTime;
+ double dst_y_prefetch_equ;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tvm_oto_lines;
+ double Tr0_oto_lines;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+ unsigned int HostVMDynamicLevelsTrips;
+ double trip_to_mem;
+ double Tvm_trips;
+ double Tr0_trips;
+ double Tvm_trips_rounded;
+ double Tr0_trips_rounded;
+ double Lsw_oto;
+ double Tpre_rounded;
+ double prefetch_bw_equ;
+ double Tvm_equ;
+ double Tr0_equ;
+ double Tdmbf;
+ double Tdmec;
+ double Tdmsks;
+ double prefetch_sw_bytes;
+ double bytes_pp;
+ double dep_bytes;
+ unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+ double min_Lsw;
+ double Tsw_est1 = 0;
+ double Tsw_est3 = 0;
if (GPUVMEnable == true && HostVMEnable == true)
- st_vars->HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
else
- st_vars->HostVMDynamicLevelsTrips = 0;
+ HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
@@ -3440,19 +3487,19 @@ bool dml32_CalculatePrefetchSchedule(
TSetup,
/* output */
- &st_vars->Tdmbf,
- &st_vars->Tdmec,
- &st_vars->Tdmsks,
+ &Tdmbf,
+ &Tdmec,
+ &Tdmsks,
VUpdateOffsetPix,
VUpdateWidthPix,
VReadyOffsetPix);
- st_vars->LineTime = myPipe->HTotal / myPipe->PixelClock;
- st_vars->trip_to_mem = UrgentLatency;
- st_vars->Tvm_trips = UrgentExtraLatency + st_vars->trip_to_mem * (GPUVMPageTableLevels * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+ LineTime = myPipe->HTotal / myPipe->PixelClock;
+ trip_to_mem = UrgentLatency;
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
if (DynamicMetadataVMEnabled == true)
- *Tdmdl = TWait + st_vars->Tvm_trips + st_vars->trip_to_mem;
+ *Tdmdl = TWait + Tvm_trips + trip_to_mem;
else
*Tdmdl = TWait + UrgentExtraLatency;
@@ -3462,15 +3509,15 @@ bool dml32_CalculatePrefetchSchedule(
#endif
if (DynamicMetadataEnable == true) {
- if (VStartup * st_vars->LineTime < *TSetup + *Tdmdl + st_vars->Tdmbf + st_vars->Tdmec + st_vars->Tdmsks) {
+ if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n",
- __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n",
- __func__, st_vars->Tdmsks);
+ __func__, Tdmsks);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n",
__func__, *Tdmdl);
#endif
@@ -3482,21 +3529,21 @@ bool dml32_CalculatePrefetchSchedule(
}
*Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
- GPUVMEnable == true ? TWait + st_vars->Tvm_trips : 0);
+ GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
else
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
- st_vars->DPPCycles = st_vars->DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
- st_vars->DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = DISPCLKDelaySubtotal;
if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
return true;
- *DSTXAfterScaler = st_vars->DPPCycles * myPipe->PixelClock / myPipe->Dppclk + st_vars->DISPCLKCycles *
+ *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->Dppclk + DISPCLKCycles *
myPipe->PixelClock / myPipe->Dispclk + DSCDelay;
*DSTXAfterScaler = *DSTXAfterScaler + (myPipe->ODMMode != dm_odm_combine_mode_disabled ? 18 : 0)
@@ -3506,10 +3553,10 @@ bool dml32_CalculatePrefetchSchedule(
+ ((myPipe->ODMMode == dm_odm_mode_mso_1to4) ? myPipe->HActive * 3 / 4 : 0);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DPPCycles: %d\n", __func__, st_vars->DPPCycles);
+ dml_print("DML::%s: DPPCycles: %d\n", __func__, DPPCycles);
dml_print("DML::%s: PixelClock: %f\n", __func__, myPipe->PixelClock);
dml_print("DML::%s: Dppclk: %f\n", __func__, myPipe->Dppclk);
- dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, st_vars->DISPCLKCycles);
+ dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, DISPCLKCycles);
dml_print("DML::%s: DISPCLK: %f\n", __func__, myPipe->Dispclk);
dml_print("DML::%s: DSCDelay: %d\n", __func__, DSCDelay);
dml_print("DML::%s: ODMMode: %d\n", __func__, myPipe->ODMMode);
@@ -3522,9 +3569,9 @@ bool dml32_CalculatePrefetchSchedule(
else
*DSTYAfterScaler = 0;
- st_vars->DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
- *DSTYAfterScaler = dml_floor(st_vars->DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
- *DSTXAfterScaler = st_vars->DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+ DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DSTXAfterScaler: %d (final)\n", __func__, *DSTXAfterScaler);
dml_print("DML::%s: DSTYAfterScaler: %d (final)\n", __func__, *DSTYAfterScaler);
@@ -3532,132 +3579,132 @@ bool dml32_CalculatePrefetchSchedule(
MyError = false;
- st_vars->Tr0_trips = st_vars->trip_to_mem * (st_vars->HostVMDynamicLevelsTrips + 1);
+ Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
if (GPUVMEnable == true) {
- st_vars->Tvm_trips_rounded = dml_ceil(4.0 * st_vars->Tvm_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+ Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
if (GPUVMPageTableLevels >= 3) {
- *Tno_bw = UrgentExtraLatency + st_vars->trip_to_mem *
- (double) ((GPUVMPageTableLevels - 2) * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+ *Tno_bw = UrgentExtraLatency + trip_to_mem *
+ (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
} else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / st_vars->LineTime, 1.0) /
- 4.0 * st_vars->LineTime; // VBA_ERROR
+ Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
+ 4.0 * LineTime; // VBA_ERROR
*Tno_bw = UrgentExtraLatency;
} else {
*Tno_bw = 0;
}
} else if (myPipe->DCCEnable == true) {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
*Tno_bw = 0;
} else {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = st_vars->LineTime / 2.0;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = LineTime / 2.0;
*Tno_bw = 0;
}
- st_vars->Tvm_trips_rounded = dml_max(st_vars->Tvm_trips_rounded, st_vars->LineTime / 4.0);
- st_vars->Tr0_trips_rounded = dml_max(st_vars->Tr0_trips_rounded, st_vars->LineTime / 4.0);
+ Tvm_trips_rounded = dml_max(Tvm_trips_rounded, LineTime / 4.0);
+ Tr0_trips_rounded = dml_max(Tr0_trips_rounded, LineTime / 4.0);
if (myPipe->SourcePixelFormat == dm_420_8 || myPipe->SourcePixelFormat == dm_420_10
|| myPipe->SourcePixelFormat == dm_420_12) {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
} else {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
}
- st_vars->prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
- st_vars->prefetch_bw_oto = dml_max(st_vars->bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
- st_vars->prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * st_vars->LineTime));
+ prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
+ prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
- st_vars->min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / st_vars->max_vratio_pre;
- st_vars->min_Lsw = dml_max(st_vars->min_Lsw, 1.0);
- st_vars->Lsw_oto = dml_ceil(4.0 * dml_max(st_vars->prefetch_sw_bytes / st_vars->prefetch_bw_oto / st_vars->LineTime, st_vars->min_Lsw), 1.0) / 4.0;
+ min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre;
+ min_Lsw = dml_max(min_Lsw, 1.0);
+ Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
if (GPUVMEnable == true) {
- st_vars->Tvm_oto = dml_max3(
- st_vars->Tvm_trips,
- *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / st_vars->prefetch_bw_oto,
- st_vars->LineTime / 4.0);
+ Tvm_oto = dml_max3(
+ Tvm_trips,
+ *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ LineTime / 4.0);
} else
- st_vars->Tvm_oto = st_vars->LineTime / 4.0;
+ Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_oto = dml_max4(
- st_vars->Tr0_trips,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto,
- (st_vars->LineTime - st_vars->Tvm_oto)/2.0,
- st_vars->LineTime / 4.0);
+ Tr0_oto = dml_max4(
+ Tr0_trips,
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
+ (LineTime - Tvm_oto)/2.0,
+ LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tr0_oto max0 = %f\n", __func__,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, st_vars->LineTime - st_vars->Tvm_oto);
- dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, st_vars->LineTime / 4);
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, LineTime - Tvm_oto);
+ dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, LineTime / 4);
#endif
} else
- st_vars->Tr0_oto = (st_vars->LineTime - st_vars->Tvm_oto) / 2.0;
+ Tr0_oto = (LineTime - Tvm_oto) / 2.0;
- st_vars->Tvm_oto_lines = dml_ceil(4.0 * st_vars->Tvm_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->Tr0_oto_lines = dml_ceil(4.0 * st_vars->Tr0_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->dst_y_prefetch_oto = st_vars->Tvm_oto_lines + 2 * st_vars->Tr0_oto_lines + st_vars->Lsw_oto;
+ Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
+ Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
+ dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
- st_vars->dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / st_vars->LineTime -
+ dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
(*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
- dml_print("DML::%s: min_Lsw = %f\n", __func__, st_vars->min_Lsw);
+ dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
dml_print("DML::%s: *Tno_bw = %f\n", __func__, *Tno_bw);
dml_print("DML::%s: UrgentExtraLatency = %f\n", __func__, UrgentExtraLatency);
- dml_print("DML::%s: trip_to_mem = %f\n", __func__, st_vars->trip_to_mem);
+ dml_print("DML::%s: trip_to_mem = %f\n", __func__, trip_to_mem);
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
dml_print("DML::%s: BytePerPixelC = %d\n", __func__, myPipe->BytePerPixelC);
dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
dml_print("DML::%s: swath_width_chroma_ub = %d\n", __func__, swath_width_chroma_ub);
- dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, st_vars->prefetch_sw_bytes);
- dml_print("DML::%s: bytes_pp = %f\n", __func__, st_vars->bytes_pp);
+ dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, prefetch_sw_bytes);
+ dml_print("DML::%s: bytes_pp = %f\n", __func__, bytes_pp);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml_print("DML::%s: Tvm_trips = %f\n", __func__, st_vars->Tvm_trips);
- dml_print("DML::%s: Tr0_trips = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto = %f\n", __func__, st_vars->Tr0_oto);
- dml_print("DML::%s: Tvm_oto = %f\n", __func__, st_vars->Tvm_oto);
- dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, st_vars->Tvm_oto_lines);
- dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, st_vars->Tr0_oto_lines);
- dml_print("DML::%s: Lsw_oto = %f\n", __func__, st_vars->Lsw_oto);
- dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, st_vars->dst_y_prefetch_oto);
- dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, st_vars->dst_y_prefetch_equ);
+ dml_print("DML::%s: Tvm_trips = %f\n", __func__, Tvm_trips);
+ dml_print("DML::%s: Tr0_trips = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto = %f\n", __func__, Tr0_oto);
+ dml_print("DML::%s: Tvm_oto = %f\n", __func__, Tvm_oto);
+ dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, Tvm_oto_lines);
+ dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, Tr0_oto_lines);
+ dml_print("DML::%s: Lsw_oto = %f\n", __func__, Lsw_oto);
+ dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, dst_y_prefetch_oto);
+ dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, dst_y_prefetch_equ);
#endif
- st_vars->dst_y_prefetch_equ = dml_floor(4.0 * (st_vars->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- st_vars->Tpre_rounded = st_vars->dst_y_prefetch_equ * st_vars->LineTime;
+ dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
+ Tpre_rounded = dst_y_prefetch_equ * LineTime;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, st_vars->dst_y_prefetch_equ);
- dml_print("DML::%s: LineTime: %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
+ dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
dml_print("DML::%s: VStartup: %d\n", __func__, VStartup);
dml_print("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n",
- __func__, VStartup * st_vars->LineTime);
+ __func__, VStartup * LineTime);
dml_print("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *TSetup);
dml_print("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, TCalc);
- dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd\n", __func__, *Tdmdl_vm);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n", __func__, *Tdmdl);
dml_print("DML::%s: DSTYAfterScaler: %d lines - number of lines of pipeline and buffer delay after scaler\n",
__func__, *DSTYAfterScaler);
#endif
- st_vars->dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
+ dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor);
- if (st_vars->prefetch_sw_bytes < st_vars->dep_bytes)
- st_vars->prefetch_sw_bytes = 2 * st_vars->dep_bytes;
+ if (prefetch_sw_bytes < dep_bytes)
+ prefetch_sw_bytes = 2 * dep_bytes;
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@@ -3665,61 +3712,61 @@ bool dml32_CalculatePrefetchSchedule(
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
- if (st_vars->dst_y_prefetch_equ > 1) {
+ if (dst_y_prefetch_equ > 1) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
double PrefetchBandwidth4;
- if (st_vars->Tpre_rounded - *Tno_bw > 0) {
+ if (Tpre_rounded - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - *Tno_bw);
- st_vars->Tsw_est1 = st_vars->prefetch_sw_bytes / PrefetchBandwidth1;
+ + prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw);
+ Tsw_est1 = prefetch_sw_bytes / PrefetchBandwidth1;
} else
PrefetchBandwidth1 = 0;
- if (VStartup == MaxVStartup && (st_vars->Tsw_est1 / st_vars->LineTime < st_vars->min_Lsw)
- && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw > 0) {
+ if (VStartup == MaxVStartup && (Tsw_est1 / LineTime < min_Lsw)
+ && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw);
}
- if (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded > 0)
- PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + st_vars->prefetch_sw_bytes) /
- (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+ PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + prefetch_sw_bytes) /
+ (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded > 0) {
+ if (Tpre_rounded - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded);
- st_vars->Tsw_est3 = st_vars->prefetch_sw_bytes / PrefetchBandwidth3;
+ + prefetch_sw_bytes) / (Tpre_rounded - Tvm_trips_rounded);
+ Tsw_est3 = prefetch_sw_bytes / PrefetchBandwidth3;
} else
PrefetchBandwidth3 = 0;
if (VStartup == MaxVStartup &&
- (st_vars->Tsw_est3 / st_vars->LineTime < st_vars->min_Lsw) && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 *
- st_vars->LineTime - st_vars->Tvm_trips_rounded > 0) {
+ (Tsw_est3 / LineTime < min_Lsw) && Tpre_rounded - min_Lsw * LineTime - 0.75 *
+ LineTime - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - st_vars->Tvm_trips_rounded);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded);
}
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded > 0) {
- PrefetchBandwidth4 = st_vars->prefetch_sw_bytes /
- (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0) {
+ PrefetchBandwidth4 = prefetch_sw_bytes /
+ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
} else {
PrefetchBandwidth4 = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: Tpre_rounded: %f\n", __func__, st_vars->Tpre_rounded);
+ dml_print("DML::%s: Tpre_rounded: %f\n", __func__, Tpre_rounded);
dml_print("DML::%s: Tno_bw: %f\n", __func__, *Tno_bw);
- dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, st_vars->Tvm_trips_rounded);
- dml_print("DML::%s: Tsw_est1: %f\n", __func__, st_vars->Tsw_est1);
- dml_print("DML::%s: Tsw_est3: %f\n", __func__, st_vars->Tsw_est3);
+ dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, Tvm_trips_rounded);
+ dml_print("DML::%s: Tsw_est1: %f\n", __func__, Tsw_est1);
+ dml_print("DML::%s: Tsw_est3: %f\n", __func__, Tsw_est3);
dml_print("DML::%s: PrefetchBandwidth1: %f\n", __func__, PrefetchBandwidth1);
dml_print("DML::%s: PrefetchBandwidth2: %f\n", __func__, PrefetchBandwidth2);
dml_print("DML::%s: PrefetchBandwidth3: %f\n", __func__, PrefetchBandwidth3);
@@ -3732,9 +3779,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth1 >= st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
Case1OK = false;
@@ -3745,9 +3792,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth2 < st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
Case2OK = false;
@@ -3758,9 +3805,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 <
- st_vars->Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
+ Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / PrefetchBandwidth3 >=
- st_vars->Tr0_trips_rounded) {
+ Tr0_trips_rounded) {
Case3OK = true;
} else {
Case3OK = false;
@@ -3770,80 +3817,80 @@ bool dml32_CalculatePrefetchSchedule(
}
if (Case1OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth1;
+ prefetch_bw_equ = PrefetchBandwidth1;
else if (Case2OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth2;
+ prefetch_bw_equ = PrefetchBandwidth2;
else if (Case3OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth3;
+ prefetch_bw_equ = PrefetchBandwidth3;
else
- st_vars->prefetch_bw_equ = PrefetchBandwidth4;
+ prefetch_bw_equ = PrefetchBandwidth4;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Case1OK: %d\n", __func__, Case1OK);
dml_print("DML::%s: Case2OK: %d\n", __func__, Case2OK);
dml_print("DML::%s: Case3OK: %d\n", __func__, Case3OK);
- dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, st_vars->prefetch_bw_equ);
+ dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, prefetch_bw_equ);
#endif
- if (st_vars->prefetch_bw_equ > 0) {
+ if (prefetch_bw_equ > 0) {
if (GPUVMEnable == true) {
- st_vars->Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
- HostVMInefficiencyFactor / st_vars->prefetch_bw_equ,
- st_vars->Tvm_trips, st_vars->LineTime / 4);
+ Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
+ HostVMInefficiencyFactor / prefetch_bw_equ,
+ Tvm_trips, LineTime / 4);
} else {
- st_vars->Tvm_equ = st_vars->LineTime / 4;
+ Tvm_equ = LineTime / 4;
}
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
- HostVMInefficiencyFactor) / st_vars->prefetch_bw_equ, st_vars->Tr0_trips,
- (st_vars->LineTime - st_vars->Tvm_equ) / 2, st_vars->LineTime / 4);
+ Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
+ HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
+ (LineTime - Tvm_equ) / 2, LineTime / 4);
} else {
- st_vars->Tr0_equ = (st_vars->LineTime - st_vars->Tvm_equ) / 2;
+ Tr0_equ = (LineTime - Tvm_equ) / 2;
}
} else {
- st_vars->Tvm_equ = 0;
- st_vars->Tr0_equ = 0;
+ Tvm_equ = 0;
+ Tr0_equ = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
#endif
}
}
- if (st_vars->dst_y_prefetch_oto < st_vars->dst_y_prefetch_equ) {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_oto;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_oto;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_oto;
- *PrefetchBandwidth = st_vars->prefetch_bw_oto;
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ TimeForFetchingMetaPTE = Tvm_oto;
+ TimeForFetchingRowInVBlank = Tr0_oto;
+ *PrefetchBandwidth = prefetch_bw_oto;
} else {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_equ;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_equ;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_equ;
- *PrefetchBandwidth = st_vars->prefetch_bw_equ;
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ TimeForFetchingMetaPTE = Tvm_equ;
+ TimeForFetchingRowInVBlank = Tr0_equ;
+ *PrefetchBandwidth = prefetch_bw_equ;
}
- *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * st_vars->TimeForFetchingMetaPTE / st_vars->LineTime, 1.0) / 4.0;
+ *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank =
- dml_ceil(4.0 * st_vars->TimeForFetchingRowInVBlank / st_vars->LineTime, 1.0) / 4.0;
+ dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
- st_vars->LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesForPrefetch = %f\n", __func__, *DestinationLinesForPrefetch);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, st_vars->TimeForFetchingRowInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, TimeForFetchingRowInVBlank);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n",
__func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
- dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, st_vars->LinesToRequestPrefetchPixelData);
+ dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, LinesToRequestPrefetchPixelData);
#endif
- if (st_vars->LinesToRequestPrefetchPixelData >= 1 && st_vars->prefetch_bw_equ > 0) {
- *VRatioPrefetchY = (double) PrefetchSourceLinesY / st_vars->LinesToRequestPrefetchPixelData;
+ if (LinesToRequestPrefetchPixelData >= 1 && prefetch_bw_equ > 0) {
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
@@ -3851,12 +3898,12 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillY = %d\n", __func__, VInitPreFillY);
#endif
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY =
dml_max((double) PrefetchSourceLinesY /
- st_vars->LinesToRequestPrefetchPixelData,
+ LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY * SwathHeightY /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillY - 3.0) / 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
@@ -3870,7 +3917,7 @@ bool dml32_CalculatePrefetchSchedule(
#endif
}
- *VRatioPrefetchC = (double) PrefetchSourceLinesC / st_vars->LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
@@ -3879,11 +3926,11 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillC = %d\n", __func__, VInitPreFillC);
#endif
if ((SwathHeightC > 4)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC =
dml_max(*VRatioPrefetchC,
(double) MaxNumSwathC * SwathHeightC /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillC - 3.0) / 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
@@ -3898,25 +3945,25 @@ bool dml32_CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY
- / st_vars->LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
- / st_vars->LineTime;
+ / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
+ / LineTime;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: RequiredPrefetchPixDataBWLuma = %f\n",
__func__, *RequiredPrefetchPixDataBWLuma);
#endif
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC /
- st_vars->LinesToRequestPrefetchPixelData
+ LinesToRequestPrefetchPixelData
* myPipe->BytePerPixelC
- * swath_width_chroma_ub / st_vars->LineTime;
+ * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML:%s: MyErr set. LinesToRequestPrefetchPixelData: %f, should be > 0\n",
- __func__, st_vars->LinesToRequestPrefetchPixelData);
+ __func__, LinesToRequestPrefetchPixelData);
#endif
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
@@ -3925,15 +3972,15 @@ bool dml32_CalculatePrefetchSchedule(
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML: Tpre: %fus - sum of time to request meta pte, 2 x data pte + meta data, swaths\n",
- (double)st_vars->LinesToRequestPrefetchPixelData * st_vars->LineTime +
- 2.0*st_vars->TimeForFetchingRowInVBlank + st_vars->TimeForFetchingMetaPTE);
- dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", st_vars->TimeForFetchingMetaPTE);
+ (double)LinesToRequestPrefetchPixelData * LineTime +
+ 2.0*TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
+ dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n",
- (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime);
+ (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - TSetup - Tcalc - Twait - Tpre - To > 0\n");
- dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * st_vars->LineTime -
- st_vars->TimeForFetchingMetaPTE - 2*st_vars->TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
- ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime - TWait - TCalc - *TSetup);
+ dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime -
+ TimeForFetchingMetaPTE - 2*TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
+ ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - *TSetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n",
PixelPTEBytesPerRow);
#endif
@@ -3941,7 +3988,7 @@ bool dml32_CalculatePrefetchSchedule(
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n",
- __func__, st_vars->dst_y_prefetch_equ);
+ __func__, dst_y_prefetch_equ);
#endif
}
@@ -3957,10 +4004,10 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor /
- (*DestinationLinesToRequestVMInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestVMInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
@@ -3977,7 +4024,7 @@ bool dml32_CalculatePrefetchSchedule(
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) /
- (*DestinationLinesToRequestRowInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestRowInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
@@ -4000,12 +4047,12 @@ bool dml32_CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
+ LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
@@ -4159,7 +4206,6 @@ void dml32_CalculateFlipSchedule(
} // CalculateFlipSchedule
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
bool USRRetrainingRequiredFinal,
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int PrefetchMode,
@@ -4221,15 +4267,37 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
double ActiveDRAMClockChangeLatencyMargin[])
{
unsigned int i, j, k;
-
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = 0;
- st_vars->DRAMClockChangeSupportNumber = 0;
- st_vars->DRAMClockChangeMethod = 0;
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
- st_vars->MinActiveFCLKChangeMargin = 0.;
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
- st_vars->TotalPixelBW = 0.0;
- st_vars->TotalActiveWriteback = 0;
+ unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
+ unsigned int DRAMClockChangeSupportNumber = 0;
+ unsigned int LastSurfaceWithoutMargin;
+ unsigned int DRAMClockChangeMethod = 0;
+ bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
+ double MinActiveFCLKChangeMargin = 0.;
+ double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
+ double ActiveClockChangeLatencyHidingY;
+ double ActiveClockChangeLatencyHidingC;
+ double ActiveClockChangeLatencyHiding;
+ double EffectiveDETBufferSizeY;
+ double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
+ double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
+ double TotalPixelBW = 0.0;
+ bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double LinesInDETY[DC__NUM_DPP__MAX];
+ double LinesInDETC[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
+ double FullDETBufferingTimeY;
+ double FullDETBufferingTimeC;
+ double WritebackDRAMClockChangeLatencyMargin;
+ double WritebackFCLKChangeLatencyMargin;
+ double WritebackLatencyHiding;
+ bool SameTimingForFCLKChange;
+
+ unsigned int TotalActiveWriteback = 0;
+ unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
+ unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
@@ -4261,13 +4329,13 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
#endif
- st_vars->TotalActiveWriteback = 0;
+ TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (WritebackEnable[k] == true)
- st_vars->TotalActiveWriteback = st_vars->TotalActiveWriteback + 1;
+ TotalActiveWriteback = TotalActiveWriteback + 1;
}
- if (st_vars->TotalActiveWriteback <= 1) {
+ if (TotalActiveWriteback <= 1) {
Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
} else {
Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
@@ -4277,7 +4345,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (st_vars->TotalActiveWriteback <= 1) {
+ if (TotalActiveWriteback <= 1) {
Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency;
Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
@@ -4307,14 +4375,14 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
#endif
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalPixelBW = st_vars->TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
+ TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
- st_vars->LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+ LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
+ LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
#ifdef __DML_VBA_DEBUG__
@@ -4325,72 +4393,72 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dml_print("DML::%s: k=%d, VTaps = %d\n", __func__, k, VTaps[k]);
#endif
- st_vars->EffectiveLBLatencyHidingY = st_vars->LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveLBLatencyHidingC = st_vars->LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveDETBufferSizeY = DETBufferSizeY[k];
+ EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveDETBufferSizeY = DETBufferSizeY[k];
if (UnboundedRequestEnabled) {
- st_vars->EffectiveDETBufferSizeY = st_vars->EffectiveDETBufferSizeY
+ EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024
* (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
- / (HTotal[k] / PixelClock[k]) / st_vars->TotalPixelBW;
+ / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
}
- st_vars->LinesInDETY[k] = (double) st_vars->EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
- st_vars->LinesInDETYRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETY[k], SwathHeightY[k]);
- st_vars->FullDETBufferingTimeY = st_vars->LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
+ LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->EffectiveLBLatencyHidingY + st_vars->FullDETBufferingTimeY
+ ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->ActiveClockChangeLatencyHidingY
+ ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
- (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
/ PixelClock[k] / VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
- st_vars->LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
- st_vars->LinesInDETCRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETC[k], SwathHeightC[k]);
- st_vars->FullDETBufferingTimeC = st_vars->LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
+ LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
/ VRatioChroma[k];
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->EffectiveLBLatencyHidingC + st_vars->FullDETBufferingTimeC
+ ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
/ PixelClock[k];
if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->ActiveClockChangeLatencyHidingC
+ ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
- (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
/ PixelClock[k] / VRatioChroma[k];
}
- st_vars->ActiveClockChangeLatencyHiding = dml_min(st_vars->ActiveClockChangeLatencyHidingY,
- st_vars->ActiveClockChangeLatencyHidingC);
+ ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
+ ActiveClockChangeLatencyHidingC);
} else {
- st_vars->ActiveClockChangeLatencyHiding = st_vars->ActiveClockChangeLatencyHidingY;
+ ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
}
- ActiveDRAMClockChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
+ ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- Watermark->DRAMClockChangeWatermark;
- st_vars->ActiveFCLKChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
+ ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- Watermark->FCLKChangeWatermark;
- st_vars->USRRetrainingLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
+ USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
if (WritebackEnable[k]) {
- st_vars->WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
+ WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
/ (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
/ (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
if (WritebackPixelFormat[k] == dm_444_64)
- st_vars->WritebackLatencyHiding = st_vars->WritebackLatencyHiding / 2;
+ WritebackLatencyHiding = WritebackLatencyHiding / 2;
- st_vars->WritebackDRAMClockChangeLatencyMargin = st_vars->WritebackLatencyHiding
+ WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
- Watermark->WritebackDRAMClockChangeWatermark;
- st_vars->WritebackFCLKChangeLatencyMargin = st_vars->WritebackLatencyHiding
+ WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
- Watermark->WritebackFCLKChangeWatermark;
ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
- st_vars->WritebackFCLKChangeLatencyMargin);
- st_vars->ActiveFCLKChangeLatencyMargin[k] = dml_min(st_vars->ActiveFCLKChangeLatencyMargin[k],
- st_vars->WritebackDRAMClockChangeLatencyMargin);
+ WritebackFCLKChangeLatencyMargin);
+ ActiveFCLKChangeLatencyMargin[k] = dml_min(ActiveFCLKChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
}
MaxActiveDRAMClockChangeLatencySupported[k] =
(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
@@ -4409,41 +4477,41 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
(DRRDisplay[i] || DRRDisplay[j]))) {
- st_vars->SynchronizedSurfaces[i][j] = true;
+ SynchronizedSurfaces[i][j] = true;
} else {
- st_vars->SynchronizedSurfaces[i][j] = false;
+ SynchronizedSurfaces[i][j] = false;
}
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (!st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] < st_vars->MinActiveFCLKChangeMargin)) {
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
- st_vars->MinActiveFCLKChangeMargin = st_vars->ActiveFCLKChangeLatencyMargin[k];
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = k;
+ (!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
+ ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
+ FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
+ MinActiveFCLKChangeMargin = ActiveFCLKChangeLatencyMargin[k];
+ SurfaceWithMinActiveFCLKChangeMargin = k;
}
}
- *MinActiveFCLKChangeLatencySupported = st_vars->MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
+ *MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
- st_vars->SameTimingForFCLKChange = true;
+ SameTimingForFCLKChange = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (!st_vars->SynchronizedSurfaces[k][st_vars->SurfaceWithMinActiveFCLKChangeMargin]) {
+ if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->SameTimingForFCLKChange ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] <
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = st_vars->ActiveFCLKChangeLatencyMargin[k];
+ (SameTimingForFCLKChange ||
+ ActiveFCLKChangeLatencyMargin[k] <
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = ActiveFCLKChangeLatencyMargin[k];
}
- st_vars->SameTimingForFCLKChange = false;
+ SameTimingForFCLKChange = false;
}
}
- if (st_vars->MinActiveFCLKChangeMargin > 0) {
+ if (MinActiveFCLKChangeMargin > 0) {
*FCLKChangeSupport = dm_fclock_change_vactive;
- } else if ((st_vars->SameTimingForFCLKChange || st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
+ } else if ((SameTimingForFCLKChange || SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
(PrefetchMode <= 1)) {
*FCLKChangeSupport = dm_fclock_change_vblank;
} else {
@@ -4453,7 +4521,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
*USRRetrainingSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->USRRetrainingLatencyMargin[k] < 0)) {
+ (USRRetrainingLatencyMargin[k] < 0)) {
*USRRetrainingSupport = false;
}
}
@@ -4464,42 +4532,42 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
ActiveDRAMClockChangeLatencyMargin[k] < 0) {
if (PrefetchMode > 0) {
- st_vars->DRAMClockChangeSupportNumber = 2;
- } else if (st_vars->DRAMClockChangeSupportNumber == 0) {
- st_vars->DRAMClockChangeSupportNumber = 1;
- st_vars->LastSurfaceWithoutMargin = k;
- } else if (st_vars->DRAMClockChangeSupportNumber == 1 &&
- !st_vars->SynchronizedSurfaces[st_vars->LastSurfaceWithoutMargin][k]) {
- st_vars->DRAMClockChangeSupportNumber = 2;
+ DRAMClockChangeSupportNumber = 2;
+ } else if (DRAMClockChangeSupportNumber == 0) {
+ DRAMClockChangeSupportNumber = 1;
+ LastSurfaceWithoutMargin = k;
+ } else if (DRAMClockChangeSupportNumber == 1 &&
+ !SynchronizedSurfaces[LastSurfaceWithoutMargin][k]) {
+ DRAMClockChangeSupportNumber = 2;
}
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
- st_vars->DRAMClockChangeMethod = 1;
+ DRAMClockChangeMethod = 1;
else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
- st_vars->DRAMClockChangeMethod = 2;
+ DRAMClockChangeMethod = 2;
}
- if (st_vars->DRAMClockChangeMethod == 0) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeMethod == 0) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
- } else if (st_vars->DRAMClockChangeMethod == 1) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ } else if (DRAMClockChangeMethod == 1) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_full_frame;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_full_frame;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
} else {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_sub_vp;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_sub_vp;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
@@ -4513,7 +4581,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
- src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + st_vars->LBLatencyHidingSourceLinesY[k];
+ src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
#ifdef __DML_VBA_DEBUG__
@@ -4521,7 +4589,7 @@ dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DET
dml_print("DML::%s: k=%d, BytePerPixelDETY = %f\n", __func__, k, BytePerPixelDETY[k]);
dml_print("DML::%s: k=%d, SwathWidthY = %d\n", __func__, k, SwathWidthY[k]);
dml_print("DML::%s: k=%d, SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
-dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, st_vars->LBLatencyHidingSourceLinesY[k]);
+dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBLatencyHidingSourceLinesY[k]);
dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate);
dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l);
dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l);
@@ -4532,7 +4600,7 @@ dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l
if (BytePerPixelDETC[k] > 0) {
src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
- src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + st_vars->LBLatencyHidingSourceLinesC[k];
+ src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 37a314ce284b..d293856ba906 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -30,7 +30,6 @@
#include "os_types.h"
#include "../dc_features.h"
#include "../display_mode_structs.h"
-#include "dml/display_mode_vba.h"
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
@@ -82,7 +81,6 @@ void dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(
double *DPPCLKUsingSingleDPP);
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -362,7 +360,6 @@ void dml32_CalculateSurfaceSizeInMall(
bool *ExceededMALLSize);
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -715,7 +712,6 @@ double dml32_CalculateExtraLatency(
unsigned int HostVMMaxNonCachedPageTableLevels);
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
@@ -811,7 +807,6 @@ void dml32_CalculateFlipSchedule(
bool *ImmediateFlipSupportedForPipe);
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
bool USRRetrainingRequiredFinal,
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int PrefetchMode,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 84b4b00f29cb..c87091683b5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -498,6 +498,13 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_21_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 8460aefe7b6d..492aec634b68 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -182,108 +182,6 @@ void Calculate256BBlockSizes(
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC);
-struct dml32_CalculateSwathAndDETConfiguration {
- unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
- unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpSwathSizeBytesY;
- unsigned int RoundedUpSwathSizeBytesC;
- double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
- double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
- unsigned int TotalActiveDPP;
- bool NoChromaSurfaces;
- unsigned int DETBufferSizeInKByteForSwathCalculation;
-};
-
-struct dml32_CalculateVMRowAndSwath {
- unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
- unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
- unsigned int PDEAndMetaPTEBytesFrameY;
- unsigned int PDEAndMetaPTEBytesFrameC;
- unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
- unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
- bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport {
- unsigned int SurfaceWithMinActiveFCLKChangeMargin;
- unsigned int DRAMClockChangeSupportNumber;
- unsigned int LastSurfaceWithoutMargin;
- unsigned int DRAMClockChangeMethod;
- bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin;
- double MinActiveFCLKChangeMargin;
- double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank;
- double ActiveClockChangeLatencyHidingY;
- double ActiveClockChangeLatencyHidingC;
- double ActiveClockChangeLatencyHiding;
- double EffectiveDETBufferSizeY;
- double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
- double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
- double TotalPixelBW;
- bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
- double EffectiveLBLatencyHidingY;
- double EffectiveLBLatencyHidingC;
- double LinesInDETY[DC__NUM_DPP__MAX];
- double LinesInDETC[DC__NUM_DPP__MAX];
- unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
- unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
- double FullDETBufferingTimeY;
- double FullDETBufferingTimeC;
- double WritebackDRAMClockChangeLatencyMargin;
- double WritebackFCLKChangeLatencyMargin;
- double WritebackLatencyHiding;
- bool SameTimingForFCLKChange;
- unsigned int TotalActiveWriteback;
- unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
- unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculatePrefetchSchedule {
- unsigned int DPPCycles, DISPCLKCycles;
- double DSTTotalPixelsAfterScaler;
- double LineTime;
- double dst_y_prefetch_equ;
- double prefetch_bw_oto;
- double Tvm_oto;
- double Tr0_oto;
- double Tvm_oto_lines;
- double Tr0_oto_lines;
- double dst_y_prefetch_oto;
- double TimeForFetchingMetaPTE;
- double TimeForFetchingRowInVBlank;
- double LinesToRequestPrefetchPixelData;
- unsigned int HostVMDynamicLevelsTrips;
- double trip_to_mem;
- double Tvm_trips;
- double Tr0_trips;
- double Tvm_trips_rounded;
- double Tr0_trips_rounded;
- double Lsw_oto;
- double Tpre_rounded;
- double prefetch_bw_equ;
- double Tvm_equ;
- double Tr0_equ;
- double Tdmbf;
- double Tdmec;
- double Tdmsks;
- double prefetch_sw_bytes;
- double bytes_pp;
- double dep_bytes;
- unsigned int max_vratio_pre;
- double min_Lsw;
- double Tsw_est1;
- double Tsw_est3;
-};
-
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation {
unsigned int dummy_integer_array[2][DC__NUM_DPP__MAX];
double dummy_single_array[2][DC__NUM_DPP__MAX];
@@ -355,10 +253,6 @@ struct dummy_vars {
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation;
struct dml32_ModeSupportAndSystemConfigurationFull dml32_ModeSupportAndSystemConfigurationFull;
- struct dml32_CalculateSwathAndDETConfiguration dml32_CalculateSwathAndDETConfiguration;
- struct dml32_CalculateVMRowAndSwath dml32_CalculateVMRowAndSwath;
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport;
- struct dml32_CalculatePrefetchSchedule dml32_CalculatePrefetchSchedule;
};
struct vba_vars_st {
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index ab06c7fc7452..9f3558c0ef11 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -244,13 +244,15 @@ enum {
#define ASICREV_IS_GC_10_3_7(eChipRev) ((eChipRev >= GC_10_3_7_A0) && (eChipRev < GC_10_3_7_UNKNOWN))
#define AMDGPU_FAMILY_GC_11_0_0 145
-#define AMDGPU_FAMILY_GC_11_0_2 148
+#define AMDGPU_FAMILY_GC_11_0_1 148
#define GC_11_0_0_A0 0x1
#define GC_11_0_2_A0 0x10
+#define GC_11_0_3_A0 0x20
#define GC_11_UNKNOWN 0xFF
#define ASICREV_IS_GC_11_0_0(eChipRev) (eChipRev < GC_11_0_2_A0)
-#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
+#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index f093b49c5e6e..3bf08a60c45c 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -119,13 +119,15 @@ enum dc_log_type {
LOG_HDMI_RETIMER_REDRIVER,
LOG_DSC,
LOG_SMU_MSG,
+ LOG_DC2RESERVED4,
+ LOG_DC2RESERVED5,
LOG_DWB,
LOG_GAMMA_DEBUG,
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
LOG_DP2,
- LOG_SECTION_TOTAL_COUNT
+ LOG_DC2RESERVED12,
};
#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index da09ba7589f7..0f39ab9dc5b4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -613,10 +613,6 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
* Note: We should never go above the field rate of the mode timing set.
*/
infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
-
- /* FreeSync HDR */
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
@@ -684,10 +680,6 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
/* PB16 : Reserved bits 7:1, FixedRate bit 0 */
infopacket->sb[16] = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? 1 : 0;
-
- //FreeSync HDR
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
@@ -772,8 +764,7 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal,
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */
infopacket->hb2 = 0x09;
- *payload_size = 0x0A;
-
+ *payload_size = 0x09;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
@@ -822,9 +813,9 @@ static void build_vrr_infopacket_header_v3(enum signal_type signal,
infopacket->hb1 = version;
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length] */
- *payload_size = 0x10;
- infopacket->hb2 = *payload_size - 1; //-1 for checksum
+ infopacket->hb2 = 0x10;
+ *payload_size = 0x10;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
index 2ed95790a600..cf8d60c4df1b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
@@ -15243,6 +15243,8 @@
#define regBIF0_PCIE_TX_TRACKING_ADDR_HI_BASE_IDX 5
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS 0x420186
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS_BASE_IDX 5
+#define regBIF0_PCIE_TX_POWER_CTRL_1 0x420187
+#define regBIF0_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
#define regBIF0_PCIE_TX_CTRL_4 0x42018b
#define regBIF0_PCIE_TX_CTRL_4_BASE_IDX 5
#define regBIF0_PCIE_TX_STATUS 0x420194
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
index eb62a18fcc48..3d60c9e92548 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
@@ -85627,6 +85627,19 @@
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK 0x0000000EL
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK 0x00007F00L
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK 0x00008000L
+//BIF0_PCIE_TX_POWER_CTRL_1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN__SHIFT 0x0
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN__SHIFT 0x1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN__SHIFT 0x2
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN__SHIFT 0x3
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN__SHIFT 0x4
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN__SHIFT 0x5
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN_MASK 0x00000002L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN_MASK 0x00000004L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN_MASK 0x00000010L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN_MASK 0x00000020L
//BIF0_PCIE_TX_CTRL_4
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW__SHIFT 0x0
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW_MASK 0x0000000FL
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index 78620b0bd279..f745cd8f1ab7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -24,12 +24,8 @@
#ifndef SMU13_DRIVER_IF_V13_0_0_H
#define SMU13_DRIVER_IF_V13_0_0_H
-// *** IMPORTANT ***
-// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x23
-
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x1D
+#define PPTABLE_VERSION 0x22
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -1193,8 +1189,17 @@ typedef struct {
// SECTION: Advanced Options
uint32_t DebugOverrides;
+ // Section: Total Board Power idle vs active coefficients
+ uint8_t TotalBoardPowerSupport;
+ uint8_t TotalBoardPowerPadding[3];
+
+ int16_t TotalIdleBoardPowerM;
+ int16_t TotalIdleBoardPowerB;
+ int16_t TotalBoardPowerM;
+ int16_t TotalBoardPowerB;
+
// SECTION: Sku Reserved
- uint32_t Spare[64];
+ uint32_t Spare[61];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
@@ -1259,7 +1264,8 @@ typedef struct {
// SECTION: Clock Spread Spectrum
// UCLK Spread Spectrum
- uint16_t UclkSpreadPadding;
+ uint8_t UclkTrainingModeSpreadPercent;
+ uint8_t UclkSpreadPadding;
uint16_t UclkSpreadFreq; // kHz
// UCLK Spread Spectrum
@@ -1272,11 +1278,7 @@ typedef struct {
// Section: Memory Config
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
- uint8_t PaddingMem1[3];
-
- // Section: Total Board Power
- uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
- uint16_t BoardPowerPadding;
+ uint8_t PaddingMem1[7];
// SECTION: UMC feature flags
uint8_t HsrEnabled;
@@ -1375,8 +1377,11 @@ typedef struct {
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
- uint16_t AverageSocketPower ;
+ uint16_t AverageSocketPower;
+ uint16_t AverageTotalBoardPower;
+
uint16_t AvgTemperature[TEMP_COUNT];
+ uint16_t TempPadding;
uint8_t PcieRate ;
uint8_t PcieWidth ;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index 76f695a1d065..ae2d337158f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 4
+#define PMFW_DRIVER_IF_VERSION 5
typedef struct {
int32_t value;
@@ -197,6 +197,8 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
+ uint16_t CurTemp; //[centi-Celsius]
+ uint16_t spare2;
} SmuMetrics_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index c02e5e576728..ac308e72241a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,9 +28,9 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2E
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index fa520d79ef67..6db67f082d91 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -4283,6 +4283,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
+ .fini_microcode = smu_v11_0_fini_microcode,
.init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e8fe84f806d1..18ee3b5e64c5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -212,6 +212,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7))
+ return 0;
+
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
@@ -219,16 +222,10 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3667)
- pptable_id = 36671;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3688)
- pptable_id = 36881;
/*
* Temporary solution for SMU V13.0.0 with SCPM enabled:
* - use 36831 signed pptable when pp_table_id is 3683
+ * - use 37151 signed pptable when pp_table_id is 3715
* - use 36641 signed pptable when pp_table_id is 3664 or 0
* TODO: drop these when the pptable carried in vbios is ready.
*/
@@ -241,6 +238,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
case 3683:
pptable_id = 36831;
break;
+ case 3715:
+ pptable_id = 37151;
+ break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
return -EINVAL;
@@ -478,7 +478,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
/*
* Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use 3664 or 3683 on request
+ * - use 3664, 3683 or 3715 on request
* - use 3664 when pptable_id is 0
* TODO: drop these when the pptable carried in vbios is ready.
*/
@@ -489,6 +489,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
break;
case 3664:
case 3683:
+ case 3715:
break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
@@ -2344,8 +2345,8 @@ int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_EnableGfxImu);
-
- return smu_cmn_send_msg_without_waiting(smu, index, 0);
+ /* Param 1 to tell PMFW to enable GFXOFF feature */
+ return smu_cmn_send_msg_without_waiting(smu, index, 1);
}
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 1bbeceeb9e3c..df4a47acd724 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1792,7 +1792,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.dump_pptable = smu_v13_0_0_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_0_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_check_fw_status,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 82d3718d8324..97e1d55dcaad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -71,7 +71,6 @@ static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
@@ -199,6 +198,9 @@ static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
@@ -226,18 +228,6 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
return ret;
}
-static int smu_v13_0_4_post_smu_init(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0;
-
- /* allow message will be sent after enable message */
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
- if (ret)
- dev_err(adev->dev, "Failed to Enable GfxOff!\n");
- return ret;
-}
-
static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1026,7 +1016,6 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
- .post_init = smu_v13_0_4_post_smu_init,
.mode2_reset = smu_v13_0_4_mode2_reset,
.get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v13_0_od_edit_dpm_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 47360ef5c175..66445964efbd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -176,6 +176,9 @@ static int smu_v13_0_5_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9dd56e73218b..1016d1c216d8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -1567,6 +1567,16 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
+static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ /* SRIOV does not support SMU mode1 reset */
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return true;
+}
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1574,7 +1584,9 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dump_pptable = smu_v13_0_7_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_7_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_7_check_fw_status,
@@ -1624,6 +1636,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.baco_set_state = smu_v13_0_baco_set_state,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
+ .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
+ .mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
};
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 702ea803a743..39e7004de720 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -180,7 +180,7 @@ static int lvds_codec_probe(struct platform_device *pdev)
of_node_put(bus_node);
if (ret == -ENODEV) {
dev_warn(dev, "missing 'data-mapping' DT property\n");
- } else if (ret) {
+ } else if (ret < 0) {
dev_err(dev, "invalid 'data-mapping' DT property\n");
return ret;
} else {
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 281f8a9ba4fd..7ab38d734ad6 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -550,8 +550,9 @@ static int sii902x_audio_hw_params(struct device *dev, void *data,
unsigned long mclk_rate;
int i, ret;
- if (daifmt->bit_clk_master || daifmt->frame_clk_master) {
- dev_dbg(dev, "%s: I2S master mode not supported\n", __func__);
+ if (daifmt->bit_clk_provider || daifmt->frame_clk_provider) {
+ dev_dbg(dev, "%s: I2S clock provider mode not supported\n",
+ __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index f50b47ac11a8..a2f0860b20bb 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -45,7 +45,7 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
u8 inputclkfs = 0;
/* it cares I2S only */
- if (fmt->bit_clk_master | fmt->frame_clk_master) {
+ if (fmt->bit_clk_provider | fmt->frame_clk_provider) {
dev_err(dev, "unsupported clock settings\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index eb0c2d041f13..ad068865ba20 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
-
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -1226,7 +1211,7 @@ retry:
ret = dma_resv_lock_slow_interruptible(obj->resv,
acquire_ctx);
if (ret) {
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
@@ -1251,7 +1236,7 @@ retry:
goto retry;
}
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 8ad0e02991ca..904fc893c905 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -302,6 +302,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (WARN_ON(map->is_iomem)) {
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
ret = -EIO;
goto err_put_pages;
}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1fbbc19f1ac0..7bb98e6a446d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle);
/* drm_drv.c */
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a3f180653b8b..eb09e86044c6 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
{
struct rb_node *rb;
- rb = prime_fpriv->dmabufs.rb_node;
+ mutex_lock(&prime_fpriv->lock);
+
+ rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
- if (member->dma_buf == dma_buf) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
- dma_buf_put(dma_buf);
+ dma_buf_put(member->dma_buf);
kfree(member);
- return;
- } else if (member->dma_buf < dma_buf) {
+ break;
+ } else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
+
+ mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 7655142a4651..10b0036f8a2e 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1594,12 +1594,12 @@ static int hdmi_audio_hw_params(struct device *dev, void *data,
struct hdmi_context *hdata = dev_get_drvdata(dev);
if (daifmt->fmt != HDMI_I2S || daifmt->bit_clk_inv ||
- daifmt->frame_clk_inv || daifmt->bit_clk_master ||
- daifmt->frame_clk_master) {
+ daifmt->frame_clk_inv || daifmt->bit_clk_provider ||
+ daifmt->frame_clk_provider) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 7c4455541dbb..f8eb6f69be05 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1096,11 +1096,11 @@ static int tda998x_audio_hw_params(struct device *dev, void *data,
if (!spdif &&
(daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
- daifmt->bit_clk_master || daifmt->frame_clk_master)) {
+ daifmt->bit_clk_provider || daifmt->frame_clk_provider)) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index a0f84cbe974f..fc5d94862ef3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -27,7 +27,6 @@
#include <acpi/video.h>
#include <linux/i2c.h>
#include <linux/input.h>
-#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-resv.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b7b2c14fd9e1..cd75b0ca2555 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -6,7 +6,6 @@
#include <linux/dma-resv.h>
#include <linux/highmem.h>
-#include <linux/intel-iommu.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index ccec4055fde3..389e9f157ca5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -268,7 +268,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
*/
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
- assert_object_held(obj);
+ assert_object_held_shared(obj);
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
@@ -331,15 +331,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
continue;
}
- if (!i915_gem_object_trylock(obj, NULL)) {
- /* busy, toss it back to the pile */
- if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
- continue;
- }
-
__i915_gem_object_pages_fini(obj);
- i915_gem_object_unlock(obj);
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
@@ -359,7 +351,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
- container_of(work, struct drm_i915_private, mm.free_work.work);
+ container_of(work, struct drm_i915_private, mm.free_work);
i915_gem_flush_free_objects(i915);
}
@@ -391,7 +383,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
+ queue_work(i915->wq, &i915->mm.free_work);
}
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
@@ -745,7 +737,7 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
void i915_gem_init__objects(struct drm_i915_private *i915)
{
- INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
void i915_objects_module_exit(void)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 5cf36a130061..9f6b14ec189a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -335,7 +335,6 @@ struct drm_i915_gem_object {
#define I915_BO_READONLY BIT(7)
#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(9)
-#define I915_BO_WAS_BOUND_BIT 10
/**
* @mem_flags - Mutable placement-related flags
*
@@ -616,6 +615,8 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;
+
+ u32 tlb;
} mm;
struct {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 97c820eee115..8357dbdcab5c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -6,14 +6,15 @@
#include <drm/drm_cache.h>
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
-#include "gt/intel_gt.h"
-
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@@ -190,6 +191,18 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
vunmap(ptr);
}
+static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_gt *gt = to_gt(i915);
+
+ if (!obj->mm.tlb)
+ return;
+
+ intel_gt_invalidate_tlb(gt, obj->mm.tlb);
+ obj->mm.tlb = 0;
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
@@ -215,13 +228,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
- if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
- intel_gt_invalidate_tlbs(to_gt(i915));
- }
+ flush_tlb_invalidate(obj);
return pages;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 1030053571a2..8dc5c8874d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -426,7 +426,8 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
i915->mm.shrinker.seeks = DEFAULT_SEEKS;
i915->mm.shrinker.batch = 4096;
- drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker));
+ drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker,
+ "drm-i915_gem"));
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 68c2b0d8f187..f435e06125aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -11,7 +11,9 @@
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
@@ -36,8 +38,6 @@ static void __intel_gt_init_early(struct intel_gt *gt)
{
spin_lock_init(&gt->irq_lock);
- mutex_init(&gt->tlb_invalidate_lock);
-
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -48,6 +48,8 @@ static void __intel_gt_init_early(struct intel_gt *gt)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
+ mutex_init(&gt->tlb.invalidate_lock);
+ seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
intel_gt_pm_init_early(gt);
intel_uc_init_early(&gt->uc);
@@ -768,6 +770,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
intel_gt_fini_timelines(gt);
+ mutex_destroy(&gt->tlb.invalidate_lock);
intel_engines_free(gt);
}
}
@@ -906,7 +909,7 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
return rb;
}
-void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+static void mmio_invalidate_full(struct intel_gt *gt)
{
static const i915_reg_t gen8_regs[] = {
[RENDER_CLASS] = GEN8_RTCR,
@@ -924,13 +927,11 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
const i915_reg_t *regs;
unsigned int num = 0;
- if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
- return;
-
if (GRAPHICS_VER(i915) == 12) {
regs = gen12_regs;
num = ARRAY_SIZE(gen12_regs);
@@ -945,28 +946,41 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
"Platform does not implement TLB invalidation!"))
return;
- GEM_TRACE("\n");
-
- assert_rpm_wakelock_held(&i915->runtime_pm);
-
- mutex_lock(&gt->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+ awake = 0;
for_each_engine(engine, gt, id) {
struct reg_and_bit rb;
+ if (!intel_engine_pm_is_awake(engine))
+ continue;
+
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
continue;
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ awake |= engine->mask;
}
+ GT_TRACE(gt, "invalidated engines %08x\n", awake);
+
+ /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
+ if (awake &&
+ (IS_TIGERLAKE(i915) ||
+ IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_ALDERLAKE_S(i915) ||
+ IS_ALDERLAKE_P(i915)))
+ intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
+
spin_unlock_irq(&uncore->lock);
- for_each_engine(engine, gt, id) {
+ for_each_engine_masked(engine, gt, awake, tmp) {
+ struct reg_and_bit rb;
+
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
@@ -974,12 +988,8 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
*/
const unsigned int timeout_us = 100;
const unsigned int timeout_ms = 4;
- struct reg_and_bit rb;
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- if (!i915_mmio_reg_offset(rb.reg))
- continue;
-
if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,
@@ -996,5 +1006,38 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
* transitions.
*/
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
- mutex_unlock(&gt->tlb_invalidate_lock);
+}
+
+static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
+{
+ u32 cur = intel_gt_tlb_seqno(gt);
+
+ /* Only skip if a *full* TLB invalidate barrier has passed */
+ return (s32)(cur - ALIGN(seqno, 2)) > 0;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
+{
+ intel_wakeref_t wakeref;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (intel_gt_is_wedged(gt))
+ return;
+
+ if (tlb_seqno_passed(gt, seqno))
+ return;
+
+ with_intel_gt_pm_if_awake(gt, wakeref) {
+ mutex_lock(&gt->tlb.invalidate_lock);
+ if (tlb_seqno_passed(gt, seqno))
+ goto unlock;
+
+ mmio_invalidate_full(gt);
+
+ write_seqcount_invalidate(&gt->tlb.seqno);
+unlock:
+ mutex_unlock(&gt->tlb.invalidate_lock);
+ }
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 82d6f248d876..40b06adf509a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -101,6 +101,16 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
-void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
+{
+ return seqprop_sequence(&gt->tlb.seqno);
+}
+
+static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
+{
+ return intel_gt_tlb_seqno(gt) | 1;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index bc898df7a48c..a334787a4939 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -55,6 +55,9 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
intel_gt_pm_put(gt), tmp = 0)
+#define with_intel_gt_pm_if_awake(gt, wf) \
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(&gt->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index df708802889d..3804a583382b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -11,6 +11,7 @@
#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/seqlock.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -83,7 +84,22 @@ struct intel_gt {
struct intel_uc uc;
struct intel_gsc gsc;
- struct mutex tlb_invalidate_lock;
+ struct {
+ /* Serialize global tlb invalidations */
+ struct mutex invalidate_lock;
+
+ /*
+ * Batch TLB invalidations
+ *
+ * After unbinding the PTE, we need to ensure the TLB
+ * are invalidated prior to releasing the physical pages.
+ * But we only need one such invalidation for all unbinds,
+ * so we track how many TLB invalidations have been
+ * performed since unbind the PTE and only emit an extra
+ * invalidate if no full barrier has been passed.
+ */
+ seqcount_mutex_t seqno;
+ } tlb;
struct i915_wa_list wa_list;
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 2c35324b5f68..2b10b96b17b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -708,7 +708,7 @@ intel_context_migrate_copy(struct intel_context *ce,
u8 src_access, dst_access;
struct i915_request *rq;
int src_sz, dst_sz;
- bool ccs_is_src;
+ bool ccs_is_src, overwrite_ccs;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
@@ -749,6 +749,8 @@ intel_context_migrate_copy(struct intel_context *ce,
get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
}
+ overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
+
src_offset = 0;
dst_offset = CHUNK_SZ;
if (HAS_64K_PAGES(ce->engine->i915)) {
@@ -852,6 +854,25 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
ccs_bytes_to_cpy -= ccs_sz;
+ } else if (overwrite_ccs) {
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ /*
+ * While we can't always restore/manage the CCS state,
+ * we still need to ensure we don't leak the CCS state
+ * from the previous user, so make sure we overwrite it
+ * with something.
+ */
+ err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
+ dst_offset, DIRECT_ACCESS, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
}
/* Arbitration is re-enabled between requests. */
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index d8b94d638559..6ee8d1127016 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -206,8 +206,12 @@ void ppgtt_bind_vma(struct i915_address_space *vm,
void ppgtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
- if (vma_res->allocated)
- vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (!vma_res->allocated)
+ return;
+
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (vma_res->tlb)
+ vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 6e90032e12e9..aa6aed837194 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -15,6 +15,7 @@
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
+#ifdef CONFIG_64BIT
static void _release_bars(struct pci_dev *pdev)
{
int resno;
@@ -111,6 +112,9 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
}
+#else
+static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
+#endif
static int
region_lmem_release(struct intel_memory_region *mem)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index aee1a45da74b..705689e64011 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -226,7 +226,6 @@ struct intel_vgpu {
unsigned long nr_cache_entries;
struct mutex cache_lock;
- struct notifier_block iommu_notifier;
atomic_t released;
struct kvm_page_track_notifier_node track_node;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index e2f6c56ab342..e3cd58946477 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -231,57 +231,38 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- int total_pages;
- int npage;
- int ret;
-
- total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
-
- for (npage = 0; npage < total_pages; npage++) {
- unsigned long cur_gfn = gfn + npage;
-
- ret = vfio_unpin_pages(&vgpu->vfio_device, &cur_gfn, 1);
- drm_WARN_ON(&i915->drm, ret != 1);
- }
+ vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT,
+ DIV_ROUND_UP(size, PAGE_SIZE));
}
/* Pin a normal or compound guest page for dma. */
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
- unsigned long base_pfn = 0;
- int total_pages;
+ int total_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ struct page *base_page = NULL;
int npage;
int ret;
- total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
/*
* We pin the pages one-by-one to avoid allocating a big arrary
* on stack to hold pfns.
*/
for (npage = 0; npage < total_pages; npage++) {
- unsigned long cur_gfn = gfn + npage;
- unsigned long pfn;
+ dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT;
+ struct page *cur_page;
- ret = vfio_pin_pages(&vgpu->vfio_device, &cur_gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1,
+ IOMMU_READ | IOMMU_WRITE, &cur_page);
if (ret != 1) {
- gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
- cur_gfn, ret);
- goto err;
- }
-
- if (!pfn_valid(pfn)) {
- gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
- npage++;
- ret = -EFAULT;
+ gvt_vgpu_err("vfio_pin_pages failed for iova %pad, ret %d\n",
+ &cur_iova, ret);
goto err;
}
if (npage == 0)
- base_pfn = pfn;
- else if (base_pfn + npage != pfn) {
+ base_page = cur_page;
+ else if (base_page + npage != cur_page) {
gvt_vgpu_err("The pages are not continuous\n");
ret = -EINVAL;
npage++;
@@ -289,7 +270,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
}
}
- *page = pfn_to_page(base_pfn);
+ *page = base_page;
return 0;
err:
gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
@@ -729,34 +710,25 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
return ret;
}
-static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static void intel_vgpu_dma_unmap(struct vfio_device *vfio_dev, u64 iova,
+ u64 length)
{
- struct intel_vgpu *vgpu =
- container_of(nb, struct intel_vgpu, iommu_notifier);
-
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
- struct gvt_dma *entry;
- unsigned long iov_pfn, end_iov_pfn;
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+ struct gvt_dma *entry;
+ u64 iov_pfn = iova >> PAGE_SHIFT;
+ u64 end_iov_pfn = iov_pfn + length / PAGE_SIZE;
- iov_pfn = unmap->iova >> PAGE_SHIFT;
- end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
+ mutex_lock(&vgpu->cache_lock);
+ for (; iov_pfn < end_iov_pfn; iov_pfn++) {
+ entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
+ if (!entry)
+ continue;
- mutex_lock(&vgpu->cache_lock);
- for (; iov_pfn < end_iov_pfn; iov_pfn++) {
- entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
- if (!entry)
- continue;
-
- gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
- entry->size);
- __gvt_cache_remove_entry(vgpu, entry);
- }
- mutex_unlock(&vgpu->cache_lock);
+ gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
+ __gvt_cache_remove_entry(vgpu, entry);
}
-
- return NOTIFY_OK;
+ mutex_unlock(&vgpu->cache_lock);
}
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
@@ -783,36 +755,20 @@ out:
static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- unsigned long events;
- int ret;
-
- vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
-
- events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, &events,
- &vgpu->iommu_notifier);
- if (ret != 0) {
- gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
- ret);
- goto out;
- }
- ret = -EEXIST;
if (vgpu->attached)
- goto undo_iommu;
+ return -EEXIST;
- ret = -ESRCH;
if (!vgpu->vfio_device.kvm ||
vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- goto undo_iommu;
+ return -ESRCH;
}
kvm_get_kvm(vgpu->vfio_device.kvm);
- ret = -EEXIST;
if (__kvmgt_vgpu_exist(vgpu))
- goto undo_iommu;
+ return -EEXIST;
vgpu->attached = true;
@@ -831,12 +787,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
atomic_set(&vgpu->released, 0);
return 0;
-
-undo_iommu:
- vfio_unregister_notifier(vfio_dev, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
-out:
- return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
@@ -853,8 +803,6 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- int ret;
if (!vgpu->attached)
return;
@@ -864,11 +812,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
intel_gvt_release_vgpu(vgpu);
- ret = vfio_unregister_notifier(&vgpu->vfio_device, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
- drm_WARN(&i915->drm, ret,
- "vfio_unregister_notifier for iommu failed: %d\n", ret);
-
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
@@ -1610,6 +1553,7 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
.write = intel_vgpu_write,
.mmap = intel_vgpu_mmap,
.ioctl = intel_vgpu_ioctl,
+ .dma_unmap = intel_vgpu_dma_unmap,
};
static int intel_vgpu_probe(struct mdev_device *mdev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d25647be25d1..086bbe8945d6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -247,7 +247,7 @@ struct i915_gem_mm {
* List of objects which are pending destruction.
*/
struct llist_head free_list;
- struct delayed_work free_work;
+ struct work_struct free_work;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -1378,7 +1378,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
* armed the work again.
*/
while (atomic_read(&i915->mm.free_count)) {
- flush_delayed_work(&i915->mm.free_work);
+ flush_work(&i915->mm.free_work);
flush_delayed_work(&i915->bdev.wq);
rcu_barrier();
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ef3b04c7e153..260371716490 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -538,8 +538,6 @@ int i915_vma_bind(struct i915_vma *vma,
bind_flags);
}
- set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
-
atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -1310,6 +1308,19 @@ err_unpin:
return err;
}
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
+{
+ /*
+ * Before we release the pages that were bound by this vma, we
+ * must invalidate all the TLBs that may still have a reference
+ * back to our physical address. It only needs to be done once,
+ * so after updating the PTE to point away from the pages, record
+ * the most recent TLB invalidation seqno, and if we have not yet
+ * flushed the TLBs upon release, perform a full invalidation.
+ */
+ WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
+}
+
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
/* We allocate under vma_get_pages, so beware the shrinker */
@@ -1941,7 +1952,12 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
- unbind_fence = i915_vma_resource_unbind(vma_res);
+ if (async)
+ unbind_fence = i915_vma_resource_unbind(vma_res,
+ &vma->obj->mm.tlb);
+ else
+ unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
+
vma->resource = NULL;
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
@@ -1949,10 +1965,13 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
i915_vma_detach(vma);
- if (!async && unbind_fence) {
- dma_fence_wait(unbind_fence, false);
- dma_fence_put(unbind_fence);
- unbind_fence = NULL;
+ if (!async) {
+ if (unbind_fence) {
+ dma_fence_wait(unbind_fence, false);
+ dma_fence_put(unbind_fence);
+ unbind_fence = NULL;
+ }
+ vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 88ca0bd9c900..33a58f605d75 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -213,6 +213,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 27c55027387a..5a67995ea5fe 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -223,10 +223,13 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
* Return: A refcounted pointer to a dma-fence that signals when unbinding is
* complete.
*/
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res)
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb)
{
struct i915_address_space *vm = vma_res->vm;
+ vma_res->tlb = tlb;
+
/* Reference for the sw fence */
i915_vma_resource_get(vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
index 5d8427caa2ba..06923d1816e7 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.h
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -67,6 +67,7 @@ struct i915_page_sizes {
* taken when the unbind is scheduled.
* @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
* needs to be skipped for unbind.
+ * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
*
* The lifetime of a struct i915_vma_resource is from a binding request to
* the actual possible asynchronous unbind has completed.
@@ -119,6 +120,8 @@ struct i915_vma_resource {
bool immediate_unbind:1;
bool needs_wakeref:1;
bool skip_pte_rewrite:1;
+
+ u32 *tlb;
};
bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
@@ -131,7 +134,8 @@ struct i915_vma_resource *i915_vma_resource_alloc(void);
void i915_vma_resource_free(struct i915_vma_resource *vma_res);
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res);
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb);
void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 9b84df34a6a1..8cf3352d8858 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -142,8 +142,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
drm_kms_helper_poll_init(drm);
- drm_bridge_connector_enable_hpd(kms->connector);
-
ret = drm_dev_register(drm, 0);
if (ret)
goto cleanup_crtc;
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
index 8989e215dfc9..011be7ff51e1 100644
--- a/drivers/gpu/drm/lima/lima_devfreq.c
+++ b/drivers/gpu/drm/lima/lima_devfreq.c
@@ -111,6 +111,12 @@ int lima_devfreq_init(struct lima_device *ldev)
struct dev_pm_opp *opp;
unsigned long cur_freq;
int ret;
+ const char *regulator_names[] = { "mali", NULL };
+ const char *clk_names[] = { "core", NULL };
+ struct dev_pm_opp_config config = {
+ .regulator_names = regulator_names,
+ .clk_names = clk_names,
+ };
if (!device_property_present(dev, "operating-points-v2"))
/* Optional, continue without devfreq */
@@ -118,11 +124,7 @@ int lima_devfreq_init(struct lima_device *ldev)
spin_lock_init(&ldevfreq->lock);
- ret = devm_pm_opp_set_clkname(dev, "core");
- if (ret)
- return ret;
-
- ret = devm_pm_opp_set_regulators(dev, (const char *[]){ "mali" }, 1);
+ ret = devm_pm_opp_set_config(dev, &config);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV)
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 1b70938cfd2c..bd4ca11d3ff5 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -115,8 +115,11 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
for_each_endpoint_of_node(dev->of_node, ep) {
/* If the endpoint node exists, consider it enabled */
remote = of_graph_get_remote_port(ep);
- if (remote)
+ if (remote) {
+ of_node_put(remote);
+ of_node_put(ep);
return true;
+ }
}
return false;
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 259f3e6bec90..bb7e109534de 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -469,17 +469,17 @@ void meson_viu_init(struct meson_drm *priv)
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
- VIU_OSD_BLEND_REORDER(1, 0) |
- VIU_OSD_BLEND_REORDER(2, 0) |
- VIU_OSD_BLEND_REORDER(3, 0) |
- VIU_OSD_BLEND_DIN_EN(1) |
- VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
- VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
- VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
- VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
- VIU_OSD_BLEND_HOLD_LINES(4),
- priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+ u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) |
+ (u32)VIU_OSD_BLEND_REORDER(1, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(2, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(3, 0) |
+ (u32)VIU_OSD_BLEND_DIN_EN(1) |
+ (u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
+ (u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
+ (u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
+ (u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
+ (u32)VIU_OSD_BLEND_HOLD_LINES(4);
+ writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 6e39d959b9f0..0317055e3253 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -221,7 +221,7 @@ void msm_gem_shrinker_init(struct drm_device *dev)
priv->shrinker.count_objects = msm_gem_shrinker_count;
priv->shrinker.scan_objects = msm_gem_shrinker_scan;
priv->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&priv->shrinker));
+ WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 05076e530e7d..e29175e4b44c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -820,6 +820,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
+ /* TODO: figure out a better solution here
+ *
+ * wait on the fence here explicitly as going through
+ * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+ *
+ * Without this the operation can timeout and we'll fallback to a
+ * software copy, which might take several minutes to finish.
+ */
+ nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict, false,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 568182e68dd7..d8cf71fb0512 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2605,6 +2605,27 @@ nv172_chipset = {
};
static const struct nvkm_device_chip
+nv173_chipset = {
+ .name = "GA103",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv174_chipset = {
.name = "GA104",
.bar = { 0x00000001, tu102_bar_new },
@@ -3067,6 +3088,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
+ case 0x173: device->chip = &nv173_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 194af7f607a6..5110cd9b2425 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -101,8 +101,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return 0;
}
- ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names,
- pfdev->comp->num_supplies);
+ ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV) {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 2d870cf73b07..2fa5afe21288 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -626,24 +626,29 @@ static int panfrost_remove(struct platform_device *pdev)
return 0;
}
-static const char * const default_supplies[] = { "mali" };
+/*
+ * The OPP core wants the supply names to be NULL terminated, but we need the
+ * correct num_supplies value for regulator core. Hence, we NULL terminate here
+ * and then initialize num_supplies with ARRAY_SIZE - 1.
+ */
+static const char * const default_supplies[] = { "mali", NULL };
static const struct panfrost_compatible default_data = {
- .num_supplies = ARRAY_SIZE(default_supplies),
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.num_pm_domains = 1, /* optional */
.pm_domain_names = NULL,
};
static const struct panfrost_compatible amlogic_data = {
- .num_supplies = ARRAY_SIZE(default_supplies),
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
-static const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
+static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
static const struct panfrost_compatible mediatek_mt8183_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
+ .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
.supply_names = mediatek_mt8183_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 77e7cb6d1ae3..bf0170782f25 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -103,7 +103,7 @@ void panfrost_gem_shrinker_init(struct drm_device *dev)
pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
pfdev->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&pfdev->shrinker));
+ WARN_ON(register_shrinker(&pfdev->shrinker, "drm-panfrost"));
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2b12389f841a..ee0165687239 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1605,6 +1605,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
if (r) {
/* delay GPU reset to resume */
radeon_fence_driver_force_completion(rdev, i);
+ } else {
+ /* finish executing delayed work */
+ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
}
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 61a034a01764..cb82622877d2 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1176,12 +1176,12 @@ static int hdmi_audio_hw_params(struct device *dev,
DRM_DEBUG_DRIVER("\n");
if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
- daifmt->frame_clk_inv || daifmt->bit_clk_master ||
- daifmt->frame_clk_master) {
+ daifmt->frame_clk_inv || daifmt->bit_clk_provider ||
+ daifmt->frame_clk_provider) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index b4dfa166eccd..34234a144e87 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -531,7 +531,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
- unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+ int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
u32 basic_ctl = 0;
size_t bytes;
@@ -555,7 +555,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* (4 bytes). Its minimal size is therefore 10 bytes
*/
#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ hsa = max(HSA_PACKET_OVERHEAD,
(mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
/*
@@ -564,7 +564,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* therefore 6 bytes
*/
#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ hbp = max(HBP_PACKET_OVERHEAD,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
@@ -574,7 +574,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* 16 bytes
*/
#define HFP_PACKET_OVERHEAD 16
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ hfp = max(HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
/*
@@ -583,7 +583,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* bytes). Its minimal size is therefore 10 bytes.
*/
#define HBLK_PACKET_OVERHEAD 10
- hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+ hblk = max(HBLK_PACKET_OVERHEAD,
(mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
HBLK_PACKET_OVERHEAD);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e210df65c30..97184c333526 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -912,7 +912,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* We might need to add a TTM.
*/
- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 1bba0a0ed3f9..21b61631f73a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -722,7 +722,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
mm_shrinker.count_objects = ttm_pool_shrinker_count;
mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
mm_shrinker.seeks = 1;
- return register_shrinker(&mm_shrinker);
+ return register_shrinker(&mm_shrinker, "drm-ttm_pool");
}
/**
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 061be9a6619d..b0f3117102ca 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -8,6 +8,7 @@ config DRM_VC4
depends on DRM
depends on SND && SND_SOC
depends on COMMON_CLK
+ depends on PM
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 6ab83296b0e4..1e5f68704d7d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -2003,6 +2003,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
.name = "vc4-hdmi-cpu-dai-component",
+ .legacy_dai_naming = 1,
};
static int vc4_hdmi_audio_cpu_dai_probe(struct snd_soc_dai *dai)
@@ -2854,7 +2855,7 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-static int __maybe_unused vc4_hdmi_runtime_suspend(struct device *dev)
+static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
@@ -2971,17 +2972,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_4kp60 = true;
}
+ pm_runtime_enable(dev);
+
/*
- * We need to have the device powered up at this point to call
- * our reset hook and for the CEC init.
+ * We need to have the device powered up at this point to call
+ * our reset hook and for the CEC init.
*/
- ret = vc4_hdmi_runtime_resume(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret)
- goto err_put_ddc;
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ goto err_disable_runtime_pm;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
@@ -3027,6 +3026,7 @@ err_destroy_conn:
err_destroy_encoder:
drm_encoder_cleanup(encoder);
pm_runtime_put_sync(dev);
+err_disable_runtime_pm:
pm_runtime_disable(dev);
err_put_ddc:
put_device(&vc4_hdmi->ddc->dev);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index aff37d6f587c..9c1d31f63f85 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2089,6 +2089,7 @@ const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
return NULL;
}
+EXPORT_SYMBOL_GPL(hid_match_id);
static const struct hid_device_id hid_hiddev_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 68f9e9d207f4..71a9c258a20b 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -41,6 +41,9 @@ module_param(disable_tap_to_click, bool, 0644);
MODULE_PARM_DESC(disable_tap_to_click,
"Disable Tap-To-Click mode reporting for touchpads (only on the K400 currently).");
+/* Define a non-zero software ID to identify our own requests */
+#define LINUX_KERNEL_SW_ID 0x01
+
#define REPORT_ID_HIDPP_SHORT 0x10
#define REPORT_ID_HIDPP_LONG 0x11
#define REPORT_ID_HIDPP_VERY_LONG 0x12
@@ -71,21 +74,18 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
#define HIDPP_QUIRK_UNIFYING BIT(25)
-#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
-#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
-#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
-#define HIDPP_QUIRK_HIDPP_WHEELS BIT(29)
-#define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(30)
-#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(31)
+#define HIDPP_QUIRK_HIDPP_WHEELS BIT(26)
+#define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(27)
+#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(28)
/* These are just aliases for now */
#define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
#define HIDPP_QUIRK_KBD_ZOOM_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
/* Convenience constant to check for any high-res support. */
-#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
- HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
- HIDPP_QUIRK_HI_RES_SCROLL_X2121)
+#define HIDPP_CAPABILITY_HI_RES_SCROLL (HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL | \
+ HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL | \
+ HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL)
#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
@@ -96,6 +96,9 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4)
#define HIDPP_CAPABILITY_BATTERY_PERCENTAGE BIT(5)
#define HIDPP_CAPABILITY_UNIFIED_BATTERY BIT(6)
+#define HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL BIT(7)
+#define HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL BIT(8)
+#define HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL BIT(9)
#define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
@@ -343,7 +346,7 @@ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
else
message->report_id = REPORT_ID_HIDPP_LONG;
message->fap.feature_index = feat_index;
- message->fap.funcindex_clientid = funcindex_clientid;
+ message->fap.funcindex_clientid = funcindex_clientid | LINUX_KERNEL_SW_ID;
memcpy(&message->fap.params, params, param_count);
ret = hidpp_send_message_sync(hidpp, message, response);
@@ -856,8 +859,8 @@ static int hidpp_unifying_init(struct hidpp_device *hidpp)
#define HIDPP_PAGE_ROOT 0x0000
#define HIDPP_PAGE_ROOT_IDX 0x00
-#define CMD_ROOT_GET_FEATURE 0x01
-#define CMD_ROOT_GET_PROTOCOL_VERSION 0x11
+#define CMD_ROOT_GET_FEATURE 0x00
+#define CMD_ROOT_GET_PROTOCOL_VERSION 0x10
static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
u8 *feature_index, u8 *feature_type)
@@ -934,9 +937,9 @@ print_version:
#define HIDPP_PAGE_GET_DEVICE_NAME_TYPE 0x0005
-#define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x01
-#define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x11
-#define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x21
+#define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x00
+#define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x10
+#define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x20
static int hidpp_devicenametype_get_count(struct hidpp_device *hidpp,
u8 feature_index, u8 *nameLength)
@@ -1966,8 +1969,8 @@ static int hidpp_touchpad_fw_items_set(struct hidpp_device *hidpp,
#define HIDPP_PAGE_TOUCHPAD_RAW_XY 0x6100
-#define CMD_TOUCHPAD_GET_RAW_INFO 0x01
-#define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x21
+#define CMD_TOUCHPAD_GET_RAW_INFO 0x00
+#define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x20
#define EVENT_TOUCHPAD_RAW_XY 0x00
@@ -3415,14 +3418,14 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
int ret;
u8 multiplier = 1;
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
+ if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL) {
ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
if (ret == 0)
ret = hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
- } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
+ } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL) {
ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
&multiplier);
- } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */ {
+ } else /* if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL) */ {
ret = hidpp10_enable_scrolling_acceleration(hidpp);
multiplier = 8;
}
@@ -3437,6 +3440,49 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
return 0;
}
+static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp)
+{
+ int ret;
+ unsigned long capabilities;
+
+ capabilities = hidpp->capabilities;
+
+ if (hidpp->protocol_major >= 2) {
+ u8 feature_index;
+ u8 feature_type;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (!ret) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL;
+ hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scroll wheel\n");
+ return 0;
+ }
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
+ &feature_index, &feature_type);
+ if (!ret) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL;
+ hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scrolling\n");
+ }
+ } else {
+ struct hidpp_report response;
+
+ ret = hidpp_send_rap_command_sync(hidpp,
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_GET_REGISTER,
+ HIDPP_ENABLE_FAST_SCROLL,
+ NULL, 0, &response);
+ if (!ret) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL;
+ hid_dbg(hidpp->hid_dev, "Detected HID++ 1.0 fast scroll\n");
+ }
+ }
+
+ if (hidpp->capabilities == capabilities)
+ hid_dbg(hidpp->hid_dev, "Did not detect HID++ hi-res scrolling hardware support\n");
+ return 0;
+}
+
/* -------------------------------------------------------------------------- */
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
@@ -3691,8 +3737,9 @@ static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
* cases we must return early (falling back to default behaviour) to
* avoid a crash in hidpp_scroll_counter_handle_scroll.
*/
- if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
- || hidpp->input == NULL || counter->wheel_multiplier == 0)
+ if (!(hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL)
+ || value == 0 || hidpp->input == NULL
+ || counter->wheel_multiplier == 0)
return 0;
hidpp_scroll_counter_handle_scroll(hidpp->input, counter, value);
@@ -3924,6 +3971,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
}
hidpp_initialize_battery(hidpp);
+ hidpp_initialize_hires_scroll(hidpp);
/* forward current battery state */
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
@@ -3943,7 +3991,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
+ if (hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL)
hi_res_scroll_enable(hidpp);
if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
@@ -3959,8 +4007,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
hidpp_populate_input(hidpp, input);
ret = input_register_device(input);
- if (ret)
+ if (ret) {
input_free_device(input);
+ return;
+ }
hidpp->delayed_input = input;
}
@@ -4219,6 +4269,21 @@ static void hidpp_remove(struct hid_device *hdev)
mutex_destroy(&hidpp->send_mutex);
}
+static const struct hid_device_id unhandled_hidpp_devices[] = {
+ /* Logitech Harmony Adapter for PS3, handled in hid-sony */
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
+ /* Handled in hid-generic */
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD) },
+ {}
+};
+
+static bool hidpp_match(struct hid_device *hdev,
+ bool ignore_special_driver)
+{
+ /* Refuse to handle devices handled by other HID drivers */
+ return !hid_match_id(hdev, unhandled_hidpp_devices);
+}
+
#define LDJ_DEVICE(product) \
HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
USB_VENDOR_ID_LOGITECH, (product))
@@ -4239,42 +4304,9 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_T651),
.driver_data = HIDPP_QUIRK_CLASS_WTP },
- { /* Mouse Logitech Anywhere MX */
- LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
- { /* Mouse Logitech Cube */
- LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
- { /* Mouse Logitech M335 */
- LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech M515 */
- LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
{ /* Mouse logitech M560 */
LDJ_DEVICE(0x402d),
- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560
- | HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
- { /* Mouse Logitech M705 (firmware RQM17) */
- LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
- { /* Mouse Logitech M705 (firmware RQM67) */
- LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech M720 */
- LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Anywhere 2 */
- LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4072), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Anywhere 2S */
- LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master */
- LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master 2S */
- LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master 3 */
- LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech Performance MX */
- LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
{ /* Keyboard logitech K400 */
LDJ_DEVICE(0x4024),
.driver_data = HIDPP_QUIRK_CLASS_K400 },
@@ -4335,18 +4367,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
- { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
- { /* MX Master mouse over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
- .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* MX Ergo trackball over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
- .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* MX Master 3 mouse over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023),
- .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+
+ { /* And try to enable HID++ for all the Logitech Bluetooth devices */
+ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_ANY, USB_VENDOR_ID_LOGITECH, HID_ANY_ID) },
{}
};
@@ -4360,6 +4383,7 @@ static const struct hid_usage_id hidpp_usages[] = {
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
+ .match = hidpp_match,
.report_fixup = hidpp_report_fixup,
.probe = hidpp_probe,
.remove = hidpp_remove,
diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
index e46330b2e561..87637f813de2 100644
--- a/drivers/hid/surface-hid/surface_hid_core.c
+++ b/drivers/hid/surface-hid/surface_hid_core.c
@@ -19,12 +19,30 @@
#include "surface_hid_core.h"
+/* -- Utility functions. ---------------------------------------------------- */
+
+static bool surface_hid_is_hot_removed(struct surface_hid_device *shid)
+{
+ /*
+ * Non-ssam client devices, i.e. platform client devices, cannot be
+ * hot-removed.
+ */
+ if (!is_ssam_device(shid->dev))
+ return false;
+
+ return ssam_device_is_hot_removed(to_ssam_device(shid->dev));
+}
+
+
/* -- Device descriptor access. --------------------------------------------- */
static int surface_hid_load_hid_descriptor(struct surface_hid_device *shid)
{
int status;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_HID,
(u8 *)&shid->hid_desc, sizeof(shid->hid_desc));
if (status)
@@ -61,6 +79,9 @@ static int surface_hid_load_device_attributes(struct surface_hid_device *shid)
{
int status;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_ATTRS,
(u8 *)&shid->attrs, sizeof(shid->attrs));
if (status)
@@ -88,9 +109,18 @@ static int surface_hid_start(struct hid_device *hid)
static void surface_hid_stop(struct hid_device *hid)
{
struct surface_hid_device *shid = hid->driver_data;
+ bool hot_removed;
+
+ /*
+ * Communication may fail for devices that have been hot-removed. This
+ * also includes unregistration of HID events, so we need to check this
+ * here. Only if the device has not been marked as hot-removed, we can
+ * safely disable events.
+ */
+ hot_removed = surface_hid_is_hot_removed(shid);
/* Note: This call will log errors for us, so ignore them here. */
- ssam_notifier_unregister(shid->ctrl, &shid->notif);
+ __ssam_notifier_unregister(shid->ctrl, &shid->notif, !hot_removed);
}
static int surface_hid_open(struct hid_device *hid)
@@ -109,6 +139,9 @@ static int surface_hid_parse(struct hid_device *hid)
u8 *buf;
int status;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -126,6 +159,9 @@ static int surface_hid_raw_request(struct hid_device *hid, unsigned char reportn
{
struct surface_hid_device *shid = hid->driver_data;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT)
return shid->ops.output_report(shid, reportnum, buf, len);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 6218bbf6863a..eca7afd366d6 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -171,6 +171,14 @@ int vmbus_connect(void)
goto cleanup;
}
+ vmbus_connection.rescind_work_queue =
+ create_workqueue("hv_vmbus_rescind");
+ if (!vmbus_connection.rescind_work_queue) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ vmbus_connection.ignore_any_offer_msg = false;
+
vmbus_connection.handle_primary_chan_wq =
create_workqueue("hv_pri_chan");
if (!vmbus_connection.handle_primary_chan_wq) {
@@ -357,6 +365,9 @@ void vmbus_disconnect(void)
if (vmbus_connection.handle_primary_chan_wq)
destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
+ if (vmbus_connection.rescind_work_queue)
+ destroy_workqueue(vmbus_connection.rescind_work_queue);
+
if (vmbus_connection.work_queue)
destroy_workqueue(vmbus_connection.work_queue);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 91e8a72eee14..fdf6decacf06 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mman.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -248,7 +249,7 @@ struct dm_capabilities_resp_msg {
* num_committed: Committed memory in pages.
* page_file_size: The accumulated size of all page files
* in the system in pages.
- * zero_free: The nunber of zero and free pages.
+ * zero_free: The number of zero and free pages.
* page_file_writes: The writes to the page file in pages.
* io_diff: An indicator of file cache efficiency or page file activity,
* calculated as File Cache Page Fault Count - Page Read Count.
@@ -567,6 +568,11 @@ struct hv_dynmem_device {
__u32 version;
struct page_reporting_dev_info pr_dev_info;
+
+ /*
+ * Maximum number of pages that can be hot_add-ed
+ */
+ __u64 max_dynamic_page_count;
};
static struct hv_dynmem_device dm_device;
@@ -1078,6 +1084,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
pr_info("Max. dynamic memory size: %llu MB\n",
(*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
+ dm->max_dynamic_page_count = *max_page_count;
}
break;
@@ -1117,6 +1124,19 @@ static unsigned long compute_balloon_floor(void)
}
/*
+ * Compute total committed memory pages
+ */
+
+static unsigned long get_pages_committed(struct hv_dynmem_device *dm)
+{
+ return vm_memory_committed() +
+ dm->num_pages_ballooned +
+ (dm->num_pages_added > dm->num_pages_onlined ?
+ dm->num_pages_added - dm->num_pages_onlined : 0) +
+ compute_balloon_floor();
+}
+
+/*
* Post our status as it relates memory pressure to the
* host. Host expects the guests to post this status
* periodically at 1 second intervals.
@@ -1157,11 +1177,7 @@ static void post_status(struct hv_dynmem_device *dm)
* asking us to balloon them out.
*/
num_pages_avail = si_mem_available();
- num_pages_committed = vm_memory_committed() +
- dm->num_pages_ballooned +
- (dm->num_pages_added > dm->num_pages_onlined ?
- dm->num_pages_added - dm->num_pages_onlined : 0) +
- compute_balloon_floor();
+ num_pages_committed = get_pages_committed(dm);
trace_balloon_status(num_pages_avail, num_pages_committed,
vm_memory_committed(), dm->num_pages_ballooned,
@@ -1807,6 +1823,109 @@ out:
return ret;
}
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * hv_balloon_debug_show - shows statistics of balloon operations.
+ * @f: pointer to the &struct seq_file.
+ * @offset: ignored.
+ *
+ * Provides the statistics that can be accessed in hv-balloon in the debugfs.
+ *
+ * Return: zero on success or an error code.
+ */
+static int hv_balloon_debug_show(struct seq_file *f, void *offset)
+{
+ struct hv_dynmem_device *dm = f->private;
+ char *sname;
+
+ seq_printf(f, "%-22s: %u.%u\n", "host_version",
+ DYNMEM_MAJOR_VERSION(dm->version),
+ DYNMEM_MINOR_VERSION(dm->version));
+
+ seq_printf(f, "%-22s:", "capabilities");
+ if (ballooning_enabled())
+ seq_puts(f, " enabled");
+
+ if (hot_add_enabled())
+ seq_puts(f, " hot_add");
+
+ seq_puts(f, "\n");
+
+ seq_printf(f, "%-22s: %u", "state", dm->state);
+ switch (dm->state) {
+ case DM_INITIALIZING:
+ sname = "Initializing";
+ break;
+ case DM_INITIALIZED:
+ sname = "Initialized";
+ break;
+ case DM_BALLOON_UP:
+ sname = "Balloon Up";
+ break;
+ case DM_BALLOON_DOWN:
+ sname = "Balloon Down";
+ break;
+ case DM_HOT_ADD:
+ sname = "Hot Add";
+ break;
+ case DM_INIT_ERROR:
+ sname = "Error";
+ break;
+ default:
+ sname = "Unknown";
+ }
+ seq_printf(f, " (%s)\n", sname);
+
+ /* HV Page Size */
+ seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE);
+
+ /* Pages added with hot_add */
+ seq_printf(f, "%-22s: %u\n", "pages_added", dm->num_pages_added);
+
+ /* pages that are "onlined"/used from pages_added */
+ seq_printf(f, "%-22s: %u\n", "pages_onlined", dm->num_pages_onlined);
+
+ /* pages we have given back to host */
+ seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
+
+ seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
+ get_pages_committed(dm));
+
+ seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
+ dm->max_dynamic_page_count);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
+
+static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+{
+ debugfs_create_file("hv-balloon", 0444, NULL, b,
+ &hv_balloon_debug_fops);
+}
+
+static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+{
+ debugfs_remove(debugfs_lookup("hv-balloon", NULL));
+}
+
+#else
+
+static inline void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+{
+}
+
+static inline void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
static int balloon_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
@@ -1854,6 +1973,8 @@ static int balloon_probe(struct hv_device *dev,
goto probe_error;
}
+ hv_balloon_debugfs_init(&dm_device);
+
return 0;
probe_error:
@@ -1879,6 +2000,8 @@ static int balloon_remove(struct hv_device *dev)
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
+ hv_balloon_debugfs_exit(dm);
+
cancel_work_sync(&dm->balloon_wrk.wrk);
cancel_work_sync(&dm->ha_wrk.wrk);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 4f5b824b16cf..dc673edf053c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -261,6 +261,13 @@ struct vmbus_connection {
struct workqueue_struct *work_queue;
struct workqueue_struct *handle_primary_chan_wq;
struct workqueue_struct *handle_sub_chan_wq;
+ struct workqueue_struct *rescind_work_queue;
+
+ /*
+ * On suspension of the vmbus, the accumulated offer messages
+ * must be dropped.
+ */
+ bool ignore_any_offer_msg;
/*
* The number of sub-channels and hv_sock channels that should be
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 547ae334e5cd..23c680d1a0f5 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1160,7 +1160,9 @@ void vmbus_on_msg_dpc(unsigned long data)
* work queue: the RESCIND handler can not start to
* run before the OFFER handler finishes.
*/
- schedule_work(&ctx->work);
+ if (vmbus_connection.ignore_any_offer_msg)
+ break;
+ queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
break;
case CHANNELMSG_OFFERCHANNEL:
@@ -1186,6 +1188,8 @@ void vmbus_on_msg_dpc(unsigned long data)
* to the CPUs which will execute the offer & rescind
* works by the time these works will start execution.
*/
+ if (vmbus_connection.ignore_any_offer_msg)
+ break;
atomic_inc(&vmbus_connection.offer_in_progress);
fallthrough;
@@ -2446,15 +2450,20 @@ acpi_walk_err:
#ifdef CONFIG_PM_SLEEP
static int vmbus_bus_suspend(struct device *dev)
{
+ struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
+ hv_context.cpu_context, VMBUS_CONNECT_CPU);
struct vmbus_channel *channel, *sc;
- while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
- /*
- * We wait here until the completion of any channel
- * offers that are currently in progress.
- */
- usleep_range(1000, 2000);
- }
+ tasklet_disable(&hv_cpu->msg_dpc);
+ vmbus_connection.ignore_any_offer_msg = true;
+ /* The tasklet_enable() takes care of providing a memory barrier */
+ tasklet_enable(&hv_cpu->msg_dpc);
+
+ /* Drain all the workqueues as we are in suspend */
+ drain_workqueue(vmbus_connection.rescind_work_queue);
+ drain_workqueue(vmbus_connection.work_queue);
+ drain_workqueue(vmbus_connection.handle_primary_chan_wq);
+ drain_workqueue(vmbus_connection.handle_sub_chan_wq);
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
@@ -2531,6 +2540,8 @@ static int vmbus_bus_resume(struct device *dev)
size_t msgsize;
int ret;
+ vmbus_connection.ignore_any_offer_msg = false;
+
/*
* We only use the 'vmbus_proto_version', which was in use before
* hibernation, to re-negotiate with the host.
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 03d07da8c2dc..221de01a327a 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -2321,7 +2321,7 @@ static const char *lm90_detect_nuvoton(struct i2c_client *client, int chip_id,
const char *name = NULL;
if (config2 < 0)
- return ERR_PTR(-ENODEV);
+ return NULL;
if (address == 0x4c && !(config1 & 0x2a) && !(config2 & 0xf8)) {
if (chip_id == 0x01 && convrate <= 0x09) {
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 446964cbae4c..da9ec6983e13 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -1480,7 +1480,7 @@ static int nct6775_update_pwm_limits(struct device *dev)
return 0;
}
-static struct nct6775_data *nct6775_update_device(struct device *dev)
+struct nct6775_data *nct6775_update_device(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
int i, j, err = 0;
@@ -1615,6 +1615,7 @@ out:
mutex_unlock(&data->update_lock);
return err ? ERR_PTR(err) : data;
}
+EXPORT_SYMBOL_GPL(nct6775_update_device);
/*
* Sysfs callback functions
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index ab30437221ce..41c97cfacfb8 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -359,7 +359,7 @@ static int __maybe_unused nct6775_suspend(struct device *dev)
{
int err;
u16 tmp;
- struct nct6775_data *data = dev_get_drvdata(dev);
+ struct nct6775_data *data = nct6775_update_device(dev);
if (IS_ERR(data))
return PTR_ERR(data);
diff --git a/drivers/hwmon/nct6775.h b/drivers/hwmon/nct6775.h
index 93f708148e65..be41848c3cd2 100644
--- a/drivers/hwmon/nct6775.h
+++ b/drivers/hwmon/nct6775.h
@@ -196,6 +196,8 @@ static inline int nct6775_write_value(struct nct6775_data *data, u16 reg, u16 va
return regmap_write(data->regmap, reg, value);
}
+struct nct6775_data *nct6775_update_device(struct device *dev);
+
bool nct6775_reg_is_word_sized(struct nct6775_data *data, u16 reg);
int nct6775_probe(struct device *dev, struct nct6775_data *data,
const struct regmap_config *regmapcfg);
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 33eeff94fc2a..1fb3a2550e29 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -94,11 +94,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
* the module SYSSTATUS register
*/
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
goto runtime_err;
- }
/* Determine number of locks */
i = readl(io_base + SYSSTATUS_OFFSET);
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 364710966665..80ea45b3a815 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -19,6 +19,11 @@
#define QCOM_MUTEX_APPS_PROC_ID 1
#define QCOM_MUTEX_NUM_LOCKS 32
+struct qcom_hwspinlock_of_data {
+ u32 offset;
+ u32 stride;
+};
+
static int qcom_hwspinlock_trylock(struct hwspinlock *lock)
{
struct regmap_field *field = lock->priv;
@@ -63,9 +68,20 @@ static const struct hwspinlock_ops qcom_hwspinlock_ops = {
.unlock = qcom_hwspinlock_unlock,
};
+static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
+ .offset = 0x4,
+ .stride = 0x4,
+};
+
+/* All modern platform has offset 0 and stride of 4k */
+static const struct qcom_hwspinlock_of_data of_tcsr_mutex = {
+ .offset = 0,
+ .stride = 0x1000,
+};
+
static const struct of_device_id qcom_hwspinlock_of_match[] = {
- { .compatible = "qcom,sfpb-mutex" },
- { .compatible = "qcom,tcsr-mutex" },
+ { .compatible = "qcom,sfpb-mutex", .data = &of_sfpb_mutex },
+ { .compatible = "qcom,tcsr-mutex", .data = &of_tcsr_mutex },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
@@ -112,12 +128,14 @@ static const struct regmap_config tcsr_mutex_config = {
static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev,
u32 *offset, u32 *stride)
{
+ const struct qcom_hwspinlock_of_data *data;
struct device *dev = &pdev->dev;
void __iomem *base;
- /* All modern platform has offset 0 and stride of 4k */
- *offset = 0;
- *stride = 0x1000;
+ data = of_device_get_match_data(dev);
+
+ *offset = data->offset;
+ *stride = data->stride;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index cf249ecad5a5..d39660a3e50c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -98,7 +98,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
- __iormb(res); /* Imitate the !relaxed I/O helpers */
+ __io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@@ -106,7 +106,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
- __iowmb(); /* Imitate the !relaxed I/O helpers */
+ __io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
@@ -130,7 +130,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
- __iormb(res); /* Imitate the !relaxed I/O helpers */
+ __io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@@ -138,7 +138,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
- __iowmb(); /* Imitate the !relaxed I/O helpers */
+ __io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index a7bfea31f7d8..4b21bb79f168 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -547,14 +547,14 @@
#define etm4x_read32(csa, offset) \
({ \
u32 __val = etm4x_relaxed_read32((csa), (offset)); \
- __iormb(__val); \
+ __io_ar(__val); \
__val; \
})
#define etm4x_read64(csa, offset) \
({ \
u64 __val = etm4x_relaxed_read64((csa), (offset)); \
- __iormb(__val); \
+ __io_ar(__val); \
__val; \
})
@@ -578,13 +578,13 @@
#define etm4x_write32(csa, val, offset) \
do { \
- __iowmb(); \
+ __io_bw(); \
etm4x_relaxed_write32((csa), (val), (offset)); \
} while (0)
#define etm4x_write64(csa, val, offset) \
do { \
- __iowmb(); \
+ __io_bw(); \
etm4x_relaxed_write64((csa), (val), (offset)); \
} while (0)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e95d6bc34070..7284206b278b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -108,6 +108,7 @@ config I2C_HIX5HD2
config I2C_I801
tristate "Intel 82801 (ICH/PCH)"
depends on PCI
+ select P2SB if X86
select CHECK_SIGNATURE if X86 && DMI
select I2C_SMBUS
help
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 354cf7e45c4a..50e7f3f670b6 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -447,7 +447,7 @@ static int altr_i2c_probe(struct platform_device *pdev)
mutex_unlock(&idev->isr_mutex);
i2c_set_adapdata(&idev->adapter, idev);
- strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
+ strscpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
idev->adapter.owner = THIS_MODULE;
idev->adapter.algo = &altr_i2c_algo;
idev->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 771e53d3d197..185dedfebbac 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -1022,7 +1022,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
bus->adap.algo = &aspeed_i2c_algo;
bus->adap.dev.parent = &pdev->dev;
bus->adap.dev.of_node = pdev->dev.of_node;
- strlcpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
+ strscpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
i2c_set_adapdata(&bus->adap, bus);
bus->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 22aed922552b..99bd24d0e6a5 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -321,7 +321,7 @@ i2c_au1550_probe(struct platform_device *pdev)
priv->adap.algo = &au1550_algo;
priv->adap.algo_data = priv;
priv->adap.dev.parent = &pdev->dev;
- strlcpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
/* Now, set up the PSC for SMBus PIO mode. */
i2c_au1550_setup(priv);
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 5294b73beca8..bdf3b50de8ad 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -783,7 +783,7 @@ static int axxia_i2c_probe(struct platform_device *pdev)
}
i2c_set_adapdata(&idev->adapter, idev);
- strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
+ strscpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
idev->adapter.owner = THIS_MODULE;
idev->adapter.algo = &axxia_i2c_algo;
idev->adapter.bus_recovery_info = &axxia_i2c_recovery_info;
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 16bf41f1f086..f3e369f0fd40 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -839,7 +839,7 @@ static int bcm_kona_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- strlcpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name));
adap->algo = &bcm_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 2ae187e2b642..69383be47905 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -674,7 +674,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
adap->algo = &brcmstb_i2c_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index f8639a4457d2..d97c61eec95c 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -245,7 +245,7 @@ static int cbus_i2c_probe(struct platform_device *pdev)
adapter->nr = pdev->id;
adapter->timeout = HZ;
adapter->algo = &cbus_i2c_algo;
- strlcpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
+ strscpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
spin_lock_init(&chost->lock);
chost->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index de15f09c9b47..190abdc46dd3 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -404,7 +404,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
adap->adapter.class = I2C_CLASS_HWMON;
adap->adapter.algo = &cht_wc_i2c_adap_algo;
adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
- strlcpy(adap->adapter.name, "PMIC I2C Adapter",
+ strscpy(adap->adapter.name, "PMIC I2C Adapter",
sizeof(adap->adapter.name));
adap->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 892213d51f43..4e787dc709f9 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -267,7 +267,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
bus->dev = dev;
bus->adap.owner = THIS_MODULE;
- strlcpy(bus->adap.name, "cros-ec-i2c-tunnel", sizeof(bus->adap.name));
+ strscpy(bus->adap.name, "cros-ec-i2c-tunnel", sizeof(bus->adap.name));
bus->adap.algo = &ec_i2c_algorithm;
bus->adap.algo_data = bus;
bus->adap.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 9e09db31a937..471c47db546b 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -845,7 +845,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
adap->algo = &i2c_davinci_algo;
adap->dev.parent = &pdev->dev;
adap->timeout = DAVINCI_I2C_TIMEOUT;
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 60c838c7c454..50925d97fa42 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -322,7 +322,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- strlcpy(i2c->adap.name, "Conexant Digicolor I2C adapter",
+ strscpy(i2c->adap.name, "Conexant Digicolor I2C adapter",
sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &dc_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 321b2770feab..4914bfbee2a9 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -773,7 +773,7 @@ static int pch_i2c_probe(struct pci_dev *pdev,
pch_adap->owner = THIS_MODULE;
pch_adap->class = I2C_CLASS_HWMON;
- strlcpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
+ strscpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
pch_adap->algo = &pch_algorithm;
pch_adap->algo_data = &adap_info->pch_data[i];
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index bdff0e6345d9..f2e537b137b2 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -371,7 +371,7 @@ static int em_i2c_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- strlcpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
priv->sclk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(priv->sclk))
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b812d1090c0f..4a6260d04db2 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -802,7 +802,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-frequency", &i2c->op_clock))
i2c->op_clock = I2C_MAX_STANDARD_MODE_FREQ;
- strlcpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &exynos5_i2c_algorithm;
i2c->adap.retries = 3;
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 7a048abbf92b..b1985c1667e1 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -436,7 +436,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
if (np)
- strlcpy(adap->name, dev_name(dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(dev), sizeof(adap->name));
else
snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index a2add128d084..4374a8677271 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -402,7 +402,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON;
- strlcpy(adap->name, "HL FPGA I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "HL FPGA I2C adapter", sizeof(adap->name));
adap->algo = &highlander_i2c_algo;
adap->dev.parent = &pdev->dev;
adap->nr = pdev->id;
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 61ae58f57047..0e34cbaca22d 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -423,7 +423,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
}
clk_prepare_enable(priv->clk);
- strlcpy(priv->adap.name, "hix5hd2-i2c", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "hix5hd2-i2c", sizeof(priv->adap.name));
priv->dev = &pdev->dev;
priv->adap.owner = THIS_MODULE;
priv->adap.algo = &hix5hd2_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 9e5b87e107ba..a176296f4fff 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -112,6 +112,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/platform_data/itco_wdt.h>
+#include <linux/platform_data/x86/p2sb.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
@@ -141,7 +142,6 @@
#define TCOBASE 0x050
#define TCOCTL 0x054
-#define SBREG_BAR 0x10
#define SBREG_SMBCTRL 0xc6000c
#define SBREG_SMBCTRL_DNV 0xcf000c
@@ -1116,7 +1116,7 @@ static void dmi_check_onboard_device(u8 type, const char *name,
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = dmi_devices[i].i2c_addr;
- strlcpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE);
+ strscpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE);
i2c_new_client_device(adap, &info);
break;
}
@@ -1267,7 +1267,7 @@ static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = dell_lis3lv02d_devices[i].i2c_addr;
- strlcpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
+ strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
i2c_new_client_device(&priv->adapter, &info);
}
@@ -1485,45 +1485,24 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
.version = 4,
};
struct resource *res;
- unsigned int devfn;
- u64 base64_addr;
- u32 base_addr;
- u8 hidden;
+ int ret;
/*
* We must access the NO_REBOOT bit over the Primary to Sideband
- * bridge (P2SB). The BIOS prevents the P2SB device from being
- * enumerated by the PCI subsystem, so we need to unhide/hide it
- * to lookup the P2SB BAR.
+ * (P2SB) bridge.
*/
- pci_lock_rescan_remove();
-
- devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 1);
-
- /* Unhide the P2SB device, if it is hidden */
- pci_bus_read_config_byte(pci_dev->bus, devfn, 0xe1, &hidden);
- if (hidden)
- pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x0);
-
- pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR, &base_addr);
- base64_addr = base_addr & 0xfffffff0;
-
- pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR + 0x4, &base_addr);
- base64_addr |= (u64)base_addr << 32;
-
- /* Hide the P2SB device, if it was hidden before */
- if (hidden)
- pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden);
- pci_unlock_rescan_remove();
res = &tco_res[1];
+ ret = p2sb_bar(pci_dev->bus, 0, res);
+ if (ret)
+ return ERR_PTR(ret);
+
if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+ res->start += SBREG_SMBCTRL_DNV;
else
- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+ res->start += SBREG_SMBCTRL;
res->end = res->start + 3;
- res->flags = IORESOURCE_MEM;
return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
tco_res, 2, &pldata, sizeof(pldata));
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 9f71daf6db64..eeb80e34f9ad 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -738,7 +738,7 @@ static int iic_probe(struct platform_device *ofdev)
adap = &dev->adap;
adap->dev.parent = &ofdev->dev;
adap->dev.of_node = of_node_get(np);
- strlcpy(adap->name, "IBM IIC", sizeof(adap->name));
+ strscpy(adap->name, "IBM IIC", sizeof(adap->name));
i2c_set_adapdata(adap, dev);
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->algo = &iic_algo;
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
index 5dae7cab7260..febcb6f01d4d 100644
--- a/drivers/i2c/busses/i2c-icy.c
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -141,7 +141,7 @@ static int icy_probe(struct zorro_dev *z,
i2c->adapter.owner = THIS_MODULE;
/* i2c->adapter.algo assigned by i2c_pcf_add_bus() */
i2c->adapter.algo_data = algo_data;
- strlcpy(i2c->adapter.name, "ICY I2C Zorro adapter",
+ strscpy(i2c->adapter.name, "ICY I2C Zorro adapter",
sizeof(i2c->adapter.name));
if (!devm_request_mem_region(&z->dev,
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 8b9ba055c418..b51ab3cad2b1 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -558,7 +558,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
lpi2c_imx->adapter.algo = &lpi2c_imx_algo;
lpi2c_imx->adapter.dev.parent = &pdev->dev;
lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
- strlcpy(lpi2c_imx->adapter.name, pdev->name,
+ strscpy(lpi2c_imx->adapter.name, pdev->name,
sizeof(lpi2c_imx->adapter.name));
lpi2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 78fb1a4274a6..e47fa3465671 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1572,9 +1572,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int irq, ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(&pdev->dev);
hrtimer_cancel(&i2c_imx->slave_timer);
@@ -1585,17 +1583,21 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (i2c_imx->dma)
i2c_imx_dma_free(i2c_imx);
- /* setup chip registers to defaults */
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ if (ret == 0) {
+ /* setup chip registers to defaults */
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ clk_disable(i2c_imx->clk);
+ }
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, i2c_imx);
- clk_disable_unprepare(i2c_imx->clk);
+
+ clk_unprepare(i2c_imx->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index 5bbb7f0d7852..cf857cf22507 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -303,6 +303,7 @@ static int kempld_i2c_probe(struct platform_device *pdev)
i2c->dev = &pdev->dev;
i2c->adap = kempld_i2c_adapter;
i2c->adap.dev.parent = i2c->dev;
+ ACPI_COMPANION_SET(&i2c->adap.dev, ACPI_COMPANION(&pdev->dev));
i2c_set_adapdata(&i2c->adap, i2c);
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 4e30c5267142..8fff6fbb7065 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -417,7 +417,7 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.owner = THIS_MODULE;
- strlcpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name));
i2c->adap.algo = &i2c_lpc2k_algorithm;
i2c->adap.dev.parent = &pdev->dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 61cc5b2462c6..889eff06b78f 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -502,7 +502,7 @@ static int meson_i2c_probe(struct platform_device *pdev)
return ret;
}
- strlcpy(i2c->adap.name, "Meson I2C adapter",
+ strscpy(i2c->adap.name, "Meson I2C adapter",
sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &meson_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 6df0f1c33278..4d7e9b25f018 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -206,7 +206,7 @@ static void mchp_corei2c_empty_rx(struct mchp_corei2c_dev *idev)
idev->msg_len--;
}
- if (idev->msg_len == 0) {
+ if (idev->msg_len <= 1) {
ctrl = readb(idev->base + CORE_I2C_CTRL);
ctrl &= ~CTRL_AA;
writeb(ctrl, idev->base + CORE_I2C_CTRL);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 6c698c10d3cd..81ac92bb4f6f 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -239,6 +239,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
u32 *real_clk)
{
+ struct fwnode_handle *fwnode = of_fwnode_handle(node);
const struct mpc_i2c_divider *div = NULL;
unsigned int pvr = mfspr(SPRN_PVR);
u32 divider;
@@ -246,12 +247,12 @@ static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
if (clock == MPC_I2C_CLOCK_LEGACY) {
/* see below - default fdr = 0x3f -> div = 2048 */
- *real_clk = mpc5xxx_get_bus_frequency(node) / 2048;
+ *real_clk = mpc5xxx_fwnode_get_bus_frequency(fwnode) / 2048;
return -EINVAL;
}
/* Determine divider value */
- divider = mpc5xxx_get_bus_frequency(node) / clock;
+ divider = mpc5xxx_fwnode_get_bus_frequency(fwnode) / clock;
/*
* We want to choose an FDR/DFSR that generates an I2C bus speed that
@@ -266,7 +267,7 @@ static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
break;
}
- *real_clk = mpc5xxx_get_bus_frequency(node) / div->divider;
+ *real_clk = mpc5xxx_fwnode_get_bus_frequency(fwnode) / div->divider;
return (int)div->fdr;
}
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 8e6985354fd5..fc7bfd98156b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -229,6 +229,35 @@ static const u16 mt_i2c_regs_v2[] = {
[OFFSET_DCM_EN] = 0xf88,
};
+static const u16 mt_i2c_regs_v3[] = {
+ [OFFSET_DATA_PORT] = 0x0,
+ [OFFSET_INTR_MASK] = 0x8,
+ [OFFSET_INTR_STAT] = 0xc,
+ [OFFSET_CONTROL] = 0x10,
+ [OFFSET_TRANSFER_LEN] = 0x14,
+ [OFFSET_TRANSAC_LEN] = 0x18,
+ [OFFSET_DELAY_LEN] = 0x1c,
+ [OFFSET_TIMING] = 0x20,
+ [OFFSET_START] = 0x24,
+ [OFFSET_EXT_CONF] = 0x28,
+ [OFFSET_LTIMING] = 0x2c,
+ [OFFSET_HS] = 0x30,
+ [OFFSET_IO_CONFIG] = 0x34,
+ [OFFSET_FIFO_ADDR_CLR] = 0x38,
+ [OFFSET_SDA_TIMING] = 0x3c,
+ [OFFSET_TRANSFER_LEN_AUX] = 0x44,
+ [OFFSET_CLOCK_DIV] = 0x48,
+ [OFFSET_SOFTRESET] = 0x50,
+ [OFFSET_MULTI_DMA] = 0x8c,
+ [OFFSET_SCL_MIS_COMP_POINT] = 0x90,
+ [OFFSET_SLAVE_ADDR] = 0x94,
+ [OFFSET_DEBUGSTAT] = 0xe4,
+ [OFFSET_DEBUGCTRL] = 0xe8,
+ [OFFSET_FIFO_STAT] = 0xf4,
+ [OFFSET_FIFO_THRESH] = 0xf8,
+ [OFFSET_DCM_EN] = 0xf88,
+};
+
struct mtk_i2c_compatible {
const struct i2c_adapter_quirks *quirks;
const u16 *regs;
@@ -442,6 +471,19 @@ static const struct mtk_i2c_compatible mt8186_compat = {
.max_dma_support = 36,
};
+static const struct mtk_i2c_compatible mt8188_compat = {
+ .regs = mt_i2c_regs_v3,
+ .pmic_i2c = 0,
+ .dcm = 0,
+ .auto_restart = 1,
+ .aux_len_reg = 1,
+ .timing_adjust = 1,
+ .dma_sync = 0,
+ .ltiming_adjust = 1,
+ .apdma_sync = 1,
+ .max_dma_support = 36,
+};
+
static const struct mtk_i2c_compatible mt8192_compat = {
.quirks = &mt8183_i2c_quirks,
.regs = mt_i2c_regs_v2,
@@ -465,6 +507,7 @@ static const struct of_device_id mtk_i2c_of_match[] = {
{ .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
{ .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
{ .compatible = "mediatek,mt8186-i2c", .data = &mt8186_compat },
+ { .compatible = "mediatek,mt8188-i2c", .data = &mt8188_compat },
{ .compatible = "mediatek,mt8192-i2c", .data = &mt8192_compat },
{}
};
@@ -1389,7 +1432,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
speed_clk = I2C_MT65XX_CLK_MAIN;
}
- strlcpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
ret = mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk));
if (ret) {
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index cfe6de8175dd..20eda5738ac4 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -312,7 +312,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
i2c_set_adapdata(adap, i2c);
adap->dev.of_node = pdev->dev.of_node;
- strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 103a05ecc3d6..047dfef7a657 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -989,7 +989,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
if (IS_ERR(drv_data->reg_base))
return PTR_ERR(drv_data->reg_base);
- strlcpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
+ strscpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
sizeof(drv_data->adapter.name));
init_waitqueue_head(&drv_data->waitq);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 68f67d084c63..5af5cffc444e 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -838,7 +838,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
return err;
adap = &i2c->adapter;
- strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &mxs_i2c_algo;
adap->quirks = &mxs_i2c_quirks;
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 6920c1b9a126..12e330cd7635 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -299,7 +299,7 @@ static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
i2c_set_adapdata(&i2cd->adapter, i2cd);
i2cd->adapter.owner = THIS_MODULE;
- strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
+ strscpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
sizeof(i2cd->adapter.name));
i2cd->adapter.algo = &gpu_i2c_algorithm;
i2cd->adapter.quirks = &gpu_i2c_quirks;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d4f6c6d60683..f9ae520aed22 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1488,7 +1488,7 @@ omap_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, omap);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->quirks = &omap_i2c_quirks;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
index 6eb0f50c5d28..9f773b4f5ed8 100644
--- a/drivers/i2c/busses/i2c-opal.c
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -220,9 +220,9 @@ static int i2c_opal_probe(struct platform_device *pdev)
adapter->dev.of_node = of_node_get(pdev->dev.of_node);
pname = of_get_property(pdev->dev.of_node, "ibm,port-name", NULL);
if (pname)
- strlcpy(adapter->name, pname, sizeof(adapter->name));
+ strscpy(adapter->name, pname, sizeof(adapter->name));
else
- strlcpy(adapter->name, "opal", sizeof(adapter->name));
+ strscpy(adapter->name, "opal", sizeof(adapter->name));
platform_set_drvdata(pdev, adapter);
rc = i2c_add_adapter(adapter);
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 231145c48728..0af86a542568 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -308,7 +308,7 @@ static void i2c_parport_attach(struct parport *port)
/* Fill the rest of the structure */
adapter->adapter.owner = THIS_MODULE;
adapter->adapter.class = I2C_CLASS_HWMON;
- strlcpy(adapter->adapter.name, "Parallel port adapter",
+ strscpy(adapter->adapter.name, "Parallel port adapter",
sizeof(adapter->adapter.name));
adapter->algo_data = parport_algo_data;
/* Slow down if we can't sense SCL */
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 690188a9ffff..b605b6e43cb9 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1403,7 +1403,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- strlcpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
i2c->clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(i2c->clk)) {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 6ac179a373ff..84a77512614d 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -494,12 +494,12 @@ static void geni_i2c_gpi_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
{
if (tx_buf) {
dma_unmap_single(gi2c->se.dev->parent, tx_addr, msg->len, DMA_TO_DEVICE);
- i2c_put_dma_safe_msg_buf(tx_buf, msg, false);
+ i2c_put_dma_safe_msg_buf(tx_buf, msg, !gi2c->err);
}
if (rx_buf) {
dma_unmap_single(gi2c->se.dev->parent, rx_addr, msg->len, DMA_FROM_DEVICE);
- i2c_put_dma_safe_msg_buf(rx_buf, msg, false);
+ i2c_put_dma_safe_msg_buf(rx_buf, msg, !gi2c->err);
}
}
@@ -563,6 +563,7 @@ static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
desc->callback_param = gi2c;
dmaengine_submit(desc);
+ *buf = dma_buf;
*dma_addr_p = addr;
return 0;
@@ -816,7 +817,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&gi2c->adap, gi2c);
gi2c->adap.dev.parent = dev;
gi2c->adap.dev.of_node = dev->of_node;
- strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
+ strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
ret = geni_icc_get(&gi2c->se, "qup-memory");
if (ret)
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 69e9f3ecf87d..2e153f2f71b6 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1878,7 +1878,7 @@ nodma:
qup->adap.dev.of_node = pdev->dev.of_node;
qup->is_last = true;
- strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
+ strscpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(qup->dev);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 6e7be9d9f504..cef82b205c26 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1076,7 +1076,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
adap->bus_recovery_info = &rcar_i2c_bri;
adap->quirks = &rcar_i2c_quirks;
i2c_set_adapdata(adap, priv);
- strlcpy(adap->name, pdev->name, sizeof(adap->name));
+ strscpy(adap->name, pdev->name, sizeof(adap->name));
/* Init DMA */
sg_init_table(&priv->sg, 1);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index cded77e06670..ecba1dfc1278 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -448,7 +448,7 @@ static int riic_i2c_probe(struct platform_device *pdev)
adap = &riic->adapter;
i2c_set_adapdata(adap, riic);
- strlcpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
+ strscpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &riic_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 989040a73626..2e98e7793bba 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1240,7 +1240,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
/* use common interface to get I2C timing properties */
i2c_parse_fw_timings(&pdev->dev, &i2c->t, true);
- strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &rk3x_i2c_algorithm;
i2c->adap.retries = 3;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index b49a1b170bb2..36dab9cd208c 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1076,7 +1076,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
else
s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c);
- strlcpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &s3c24xx_i2c_algorithm;
i2c->adap.retries = 2;
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 79798fc7462a..6746aa46d96c 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -30,7 +30,7 @@ struct acpi_smbus_cmi {
u8 cap_info:1;
u8 cap_read:1;
u8 cap_write:1;
- const struct smbus_methods_t *methods;
+ struct smbus_methods_t *methods;
};
static const struct smbus_methods_t smbus_methods = {
@@ -361,6 +361,7 @@ static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level,
static int acpi_smbus_cmi_add(struct acpi_device *device)
{
struct acpi_smbus_cmi *smbus_cmi;
+ const struct acpi_device_id *id;
int ret;
smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
@@ -368,7 +369,6 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
return -ENOMEM;
smbus_cmi->handle = device->handle;
- smbus_cmi->methods = device_get_match_data(&device->dev);
strcpy(acpi_device_name(device), ACPI_SMBUS_HC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SMBUS_HC_CLASS);
device->driver_data = smbus_cmi;
@@ -376,6 +376,11 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
smbus_cmi->cap_read = 0;
smbus_cmi->cap_write = 0;
+ for (id = acpi_smbus_cmi_ids; id->id[0]; id++)
+ if (!strcmp(id->id, acpi_device_hid(device)))
+ smbus_cmi->methods =
+ (struct smbus_methods_t *) id->driver_data;
+
acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 72f024a0c363..29330ee64c9c 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -940,7 +940,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
adap->nr = dev->id;
adap->dev.of_node = dev->dev.of_node;
- strlcpy(adap->name, dev->name, sizeof(adap->name));
+ strscpy(adap->name, dev->name, sizeof(adap->name));
spin_lock_init(&pd->lock);
init_waitqueue_head(&pd->wait);
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 458c7bcf1d24..87701744752f 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -99,7 +99,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
pd->adap.algo_data = &pd->bit;
pd->adap.dev.parent = &dev->dev;
- strlcpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name));
+ strscpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name));
pd->bit.data = pd;
pd->bit.setsda = simtec_i2c_setsda;
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index b4050f5b6746..b0f0120793e1 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -239,7 +239,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
dev_err(&serio->dev, "TAOS EVM identification failed\n");
goto exit_close;
}
- strlcpy(adapter->name, name, sizeof(adapter->name));
+ strscpy(adapter->name, name, sizeof(adapter->name));
/* Turn echo off for better performance */
taos->state = TAOS_STATE_EOFF;
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index ec0c7cad4240..95139985b2d5 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -305,7 +305,7 @@ static int tegra_bpmp_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.owner = THIS_MODULE;
- strlcpy(i2c->adapter.name, "Tegra BPMP I2C adapter",
+ strscpy(i2c->adapter.name, "Tegra BPMP I2C adapter",
sizeof(i2c->adapter.name));
i2c->adapter.algo = &tegra_bpmp_i2c_algo;
i2c->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 2941e42aa6a0..031c78ac42e6 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1825,7 +1825,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
if (i2c_dev->hw->supports_bus_clear)
i2c_dev->adapter.bus_recovery_info = &tegra_i2c_recovery_info;
- strlcpy(i2c_dev->adapter.name, dev_name(i2c_dev->dev),
+ strscpy(i2c_dev->adapter.name, dev_name(i2c_dev->dev),
sizeof(i2c_dev->adapter.name));
err = i2c_add_numbered_adapter(&i2c_dev->adapter);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index cb4666c54a23..d7b622891e52 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -564,7 +564,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
priv->adap.algo = &uniphier_fi2c_algo;
priv->adap.dev.parent = dev;
priv->adap.dev.of_node = dev->of_node;
- strlcpy(priv->adap.name, "UniPhier FI2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "UniPhier FI2C", sizeof(priv->adap.name));
priv->adap.bus_recovery_info = &uniphier_fi2c_bus_recovery_info;
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index ee00a44bf4c7..e3ebae381f08 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -358,7 +358,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
priv->adap.algo = &uniphier_i2c_algo;
priv->adap.dev.parent = dev;
priv->adap.dev.of_node = dev->of_node;
- strlcpy(priv->adap.name, "UniPhier I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "UniPhier I2C", sizeof(priv->adap.name));
priv->adap.bus_recovery_info = &uniphier_i2c_bus_recovery_info;
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 8d980b1374a8..1ab419f8fa52 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -79,7 +79,7 @@ static int i2c_versatile_probe(struct platform_device *dev)
writel(SCL | SDA, i2c->base + I2C_CONTROLS);
i2c->adap.owner = THIS_MODULE;
- strlcpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name));
i2c->adap.algo_data = &i2c->algo;
i2c->adap.dev.parent = &dev->dev;
i2c->adap.dev.of_node = dev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index 88f5aafdce5b..7d4bc8736079 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -413,7 +413,7 @@ static int wmt_i2c_probe(struct platform_device *pdev)
adap = &i2c_dev->adapter;
i2c_set_adapdata(adap, i2c_dev);
- strlcpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &wmt_i2c_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 10f35f942066..91007558bcb2 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -933,7 +933,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->init_irq = i2c_dev_irq_from_resources(info->resources,
info->num_resources);
- strlcpy(client->name, info->type, sizeof(client->name));
+ strscpy(client->name, info->type, sizeof(client->name));
status = i2c_check_addr_validity(client->addr, client->flags);
if (status) {
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 775332945ad0..8ba9b59a3c40 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -391,7 +391,7 @@ void i2c_register_spd(struct i2c_adapter *adap)
unsigned short addr_list[2];
memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, name, I2C_NAME_SIZE);
+ strscpy(info.type, name, I2C_NAME_SIZE);
addr_list[0] = 0x50 + n;
addr_list[1] = I2C_CLIENT_END;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 33d3ce9c888e..aa36ac618e72 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -78,20 +78,21 @@ config INFINIBAND_VIRT_DMA
def_bool !HIGHMEM
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
-source "drivers/infiniband/hw/mthca/Kconfig"
-source "drivers/infiniband/hw/qib/Kconfig"
+source "drivers/infiniband/hw/bnxt_re/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
+source "drivers/infiniband/hw/erdma/Kconfig"
+source "drivers/infiniband/hw/hfi1/Kconfig"
+source "drivers/infiniband/hw/hns/Kconfig"
source "drivers/infiniband/hw/irdma/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
+source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
-source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
-source "drivers/infiniband/hw/usnic/Kconfig"
-source "drivers/infiniband/hw/hns/Kconfig"
-source "drivers/infiniband/hw/bnxt_re/Kconfig"
-source "drivers/infiniband/hw/hfi1/Kconfig"
source "drivers/infiniband/hw/qedr/Kconfig"
+source "drivers/infiniband/hw/qib/Kconfig"
+source "drivers/infiniband/hw/usnic/Kconfig"
+source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/sw/siw/Kconfig"
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index fabca5e51e3d..46d06678dfbe 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -11,6 +11,7 @@
#include <linux/in6.h>
#include <linux/mutex.h>
#include <linux/random.h>
+#include <linux/rbtree.h>
#include <linux/igmp.h>
#include <linux/xarray.h>
#include <linux/inetdevice.h>
@@ -20,6 +21,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/netevent.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip_fib.h>
@@ -168,6 +170,9 @@ static struct ib_sa_client sa_client;
static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
+static struct rb_root id_table = RB_ROOT;
+/* Serialize operations of id_table tree */
+static DEFINE_SPINLOCK(id_table_lock);
static struct workqueue_struct *cma_wq;
static unsigned int cma_pernet_id;
@@ -202,6 +207,11 @@ struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
}
}
+struct id_table_entry {
+ struct list_head id_list;
+ struct rb_node rb_node;
+};
+
struct cma_device {
struct list_head list;
struct ib_device *device;
@@ -420,11 +430,21 @@ static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
return hdr->ip_version >> 4;
}
-static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
+static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
{
hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
}
+static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
+{
+ return (struct sockaddr *)&id_priv->id.route.addr.src_addr;
+}
+
+static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
+{
+ return (struct sockaddr *)&id_priv->id.route.addr.dst_addr;
+}
+
static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
{
struct in_device *in_dev = NULL;
@@ -445,6 +465,117 @@ static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
return (in_dev) ? 0 : -ENODEV;
}
+static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
+ struct id_table_entry *entry_b)
+{
+ struct rdma_id_private *id_priv = list_first_entry(
+ &entry_b->id_list, struct rdma_id_private, id_list_entry);
+ int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if;
+ struct sockaddr *sb = cma_dst_addr(id_priv);
+
+ if (ifindex_a != ifindex_b)
+ return (ifindex_a > ifindex_b) ? 1 : -1;
+
+ if (sa->sa_family != sb->sa_family)
+ return sa->sa_family - sb->sa_family;
+
+ if (sa->sa_family == AF_INET)
+ return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr,
+ (char *)&((struct sockaddr_in *)sb)->sin_addr,
+ sizeof(((struct sockaddr_in *)sa)->sin_addr));
+
+ return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
+ &((struct sockaddr_in6 *)sb)->sin6_addr);
+}
+
+static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
+{
+ struct rb_node **new, *parent = NULL;
+ struct id_table_entry *this, *node;
+ unsigned long flags;
+ int result;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ new = &id_table.rb_node;
+ while (*new) {
+ this = container_of(*new, struct id_table_entry, rb_node);
+ result = compare_netdev_and_ip(
+ node_id_priv->id.route.addr.dev_addr.bound_dev_if,
+ cma_dst_addr(node_id_priv), this);
+
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else if (result > 0)
+ new = &((*new)->rb_right);
+ else {
+ list_add_tail(&node_id_priv->id_list_entry,
+ &this->id_list);
+ kfree(node);
+ goto unlock;
+ }
+ }
+
+ INIT_LIST_HEAD(&node->id_list);
+ list_add_tail(&node_id_priv->id_list_entry, &node->id_list);
+
+ rb_link_node(&node->rb_node, parent, new);
+ rb_insert_color(&node->rb_node, &id_table);
+
+unlock:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+ return 0;
+}
+
+static struct id_table_entry *
+node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa)
+{
+ struct rb_node *node = root->rb_node;
+ struct id_table_entry *data;
+ int result;
+
+ while (node) {
+ data = container_of(node, struct id_table_entry, rb_node);
+ result = compare_netdev_and_ip(ifindex, sa, data);
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return data;
+ }
+
+ return NULL;
+}
+
+static void cma_remove_id_from_tree(struct rdma_id_private *id_priv)
+{
+ struct id_table_entry *data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ if (list_empty(&id_priv->id_list_entry))
+ goto out;
+
+ data = node_from_ndev_ip(&id_table,
+ id_priv->id.route.addr.dev_addr.bound_dev_if,
+ cma_dst_addr(id_priv));
+ if (!data)
+ goto out;
+
+ list_del_init(&id_priv->id_list_entry);
+ if (list_empty(&data->id_list)) {
+ rb_erase(&data->rb_node, &id_table);
+ kfree(data);
+ }
+out:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+}
+
static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev)
{
@@ -481,16 +612,6 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
-static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
-{
- return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
-}
-
-static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
-{
- return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
-}
-
static inline unsigned short cma_family(struct rdma_id_private *id_priv)
{
return id_priv->id.route.addr.src_addr.ss_family;
@@ -861,6 +982,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
refcount_set(&id_priv->refcount, 1);
mutex_init(&id_priv->handler_mutex);
INIT_LIST_HEAD(&id_priv->device_item);
+ INIT_LIST_HEAD(&id_priv->id_list_entry);
INIT_LIST_HEAD(&id_priv->listen_list);
INIT_LIST_HEAD(&id_priv->mc_list);
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
@@ -1883,6 +2005,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
cma_cancel_operation(id_priv, state);
rdma_restrack_del(&id_priv->res);
+ cma_remove_id_from_tree(id_priv);
if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.ib)
@@ -3172,8 +3295,11 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
cma_id_get(id_priv);
if (rdma_cap_ib_sa(id->device, id->port_num))
ret = cma_resolve_ib_route(id_priv, timeout_ms);
- else if (rdma_protocol_roce(id->device, id->port_num))
+ else if (rdma_protocol_roce(id->device, id->port_num)) {
ret = cma_resolve_iboe_route(id_priv);
+ if (!ret)
+ cma_add_id_to_tree(id_priv);
+ }
else if (rdma_protocol_iwarp(id->device, id->port_num))
ret = cma_resolve_iw_route(id_priv);
else
@@ -4922,10 +5048,87 @@ out:
return ret;
}
+static void cma_netevent_work_handler(struct work_struct *_work)
+{
+ struct rdma_id_private *id_priv =
+ container_of(_work, struct rdma_id_private, id.net_work);
+ struct rdma_cm_event event = {};
+
+ mutex_lock(&id_priv->handler_mutex);
+
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
+ goto out_unlock;
+
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = -ETIMEDOUT;
+
+ if (cma_cm_event_handler(id_priv, &event)) {
+ __acquire(&id_priv->handler_mutex);
+ id_priv->cm_id.ib = NULL;
+ cma_id_put(id_priv);
+ destroy_id_handler_unlock(id_priv);
+ return;
+ }
+
+out_unlock:
+ mutex_unlock(&id_priv->handler_mutex);
+ cma_id_put(id_priv);
+}
+
+static int cma_netevent_callback(struct notifier_block *self,
+ unsigned long event, void *ctx)
+{
+ struct id_table_entry *ips_node = NULL;
+ struct rdma_id_private *current_id;
+ struct neighbour *neigh = ctx;
+ unsigned long flags;
+
+ if (event != NETEVENT_NEIGH_UPDATE)
+ return NOTIFY_DONE;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ if (neigh->tbl->family == AF_INET6) {
+ struct sockaddr_in6 neigh_sock_6;
+
+ neigh_sock_6.sin6_family = AF_INET6;
+ neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key;
+ ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
+ (struct sockaddr *)&neigh_sock_6);
+ } else if (neigh->tbl->family == AF_INET) {
+ struct sockaddr_in neigh_sock_4;
+
+ neigh_sock_4.sin_family = AF_INET;
+ neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key);
+ ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
+ (struct sockaddr *)&neigh_sock_4);
+ } else
+ goto out;
+
+ if (!ips_node)
+ goto out;
+
+ list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) {
+ if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
+ neigh->ha, ETH_ALEN))
+ continue;
+ INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler);
+ cma_id_get(current_id);
+ queue_work(cma_wq, &current_id->id.net_work);
+ }
+out:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+ return NOTIFY_DONE;
+}
+
static struct notifier_block cma_nb = {
.notifier_call = cma_netdev_callback
};
+static struct notifier_block cma_netevent_cb = {
+ .notifier_call = cma_netevent_callback
+};
+
static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
{
struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
@@ -5148,6 +5351,7 @@ static int __init cma_init(void)
ib_sa_register_client(&sa_client);
register_netdevice_notifier(&cma_nb);
+ register_netevent_notifier(&cma_netevent_cb);
ret = ib_register_client(&cma_client);
if (ret)
@@ -5162,6 +5366,7 @@ static int __init cma_init(void)
err_ib:
ib_unregister_client(&cma_client);
err:
+ unregister_netevent_notifier(&cma_netevent_cb);
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
unregister_pernet_subsys(&cma_pernet_operations);
@@ -5174,6 +5379,7 @@ static void __exit cma_cleanup(void)
{
cma_configfs_exit();
ib_unregister_client(&cma_client);
+ unregister_netevent_notifier(&cma_netevent_cb);
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
unregister_pernet_subsys(&cma_pernet_operations);
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index 757a0ef79872..b7354c94cf1b 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -64,6 +64,7 @@ struct rdma_id_private {
struct list_head listen_item;
struct list_head listen_list;
};
+ struct list_head id_list_entry;
struct cma_device *cma_dev;
struct list_head mc_list;
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 94d83b665a2f..29b1ab1d5f93 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -68,7 +68,7 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj,
* In exclusive access mode, we check that the counter is zero (nobody
* claimed this object) and we set it to -1. Releasing a shared access
* lock is done simply by decreasing the counter. As for exclusive
- * access locks, since only a single one of them is is allowed
+ * access locks, since only a single one of them is allowed
* concurrently, setting the counter to zero is enough for releasing
* this lock.
*/
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 68197e576433..e958c43dd28f 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -250,7 +250,7 @@ static bool upper_device_filter(struct ib_device *ib_dev, u32 port,
/**
* is_upper_ndev_bond_master_filter - Check if a given netdevice
- * is bond master device of netdevice of the the RDMA device of port.
+ * is bond master device of netdevice of the RDMA device of port.
* @ib_dev: IB device to check
* @port: Port to consider for adding default GID
* @rdma_ndev: Pointer to rdma netdevice
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 4d98f931a13d..8367974b7998 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -274,33 +274,6 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return 1;
}
-static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
- u32 sg_cnt, enum dma_data_direction dir)
-{
- if (is_pci_p2pdma_page(sg_page(sg)))
- pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
- else
- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
-}
-
-static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- int nents;
-
- if (is_pci_p2pdma_page(sg_page(sgt->sgl))) {
- if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
- return 0;
- nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl,
- sgt->orig_nents, dir);
- if (!nents)
- return -EIO;
- sgt->nents = nents;
- return 0;
- }
- return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0);
-}
-
/**
* rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
* @ctx: context to initialize
@@ -327,7 +300,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
};
int ret;
- ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
if (ret)
return ret;
sg_cnt = sgt.nents;
@@ -366,7 +339,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
return ret;
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_init);
@@ -414,12 +387,12 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return -EINVAL;
}
- ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
if (ret)
return ret;
if (prot_sg_cnt) {
- ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &prot_sgt, dir, 0);
if (ret)
goto out_unmap_sg;
}
@@ -486,9 +459,9 @@ out_free_ctx:
kfree(ctx->reg);
out_unmap_prot_sg:
if (prot_sgt.nents)
- rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &prot_sgt, dir, 0);
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
@@ -621,7 +594,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
break;
}
- rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
@@ -649,8 +622,8 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
kfree(ctx->reg);
if (prot_sg_cnt)
- rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
- rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index fce80a4a5147..04c04e6d24c3 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -18,6 +18,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
struct scatterlist *sg;
unsigned long start, end, cur = 0;
unsigned int nmap = 0;
+ long ret;
int i;
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
@@ -67,9 +68,14 @@ wait_fence:
* may be not up-to-date. Wait for the exporter to finish
* the migration.
*/
- return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+ ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
+ return 0;
}
EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index fba0b3be903e..6b3a88046125 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
+obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 79401e6c6aa9..785c37cae3c0 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -173,7 +173,7 @@ struct bnxt_re_dev {
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
- /* QP for for handling QP1 packets */
+ /* QP for handling QP1 packets */
struct bnxt_re_gsi_context gsi_ctx;
struct bnxt_re_stats stats;
atomic_t nq_alloc_cnt;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index c16017f6e8db..14392c942f49 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2468,31 +2468,24 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CCTRL_ECN_V(1);
}
- skb_get(skb);
- rpl = cplhdr(skb);
if (!is_t4(adapter_type)) {
- BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
- skb_trim(skb, sizeof(*rpl5));
- rpl5 = (void *)rpl;
- INIT_TP_WR(rpl5, ep->hwtid);
- } else {
- skb_trim(skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- }
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
-
- if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
+ rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
+ rpl = (void *)rpl5;
+ INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
- rpl5 = (void *)rpl;
- memset_after(rpl5, 0, iss);
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
+ } else {
+ skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ rpl = __skb_put_zero(skb, sizeof(*rpl));
+ INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
}
rpl->opt0 = cpu_to_be64(opt0);
diff --git a/drivers/infiniband/hw/erdma/Kconfig b/drivers/infiniband/hw/erdma/Kconfig
new file mode 100644
index 000000000000..169038e3ceb1
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_ERDMA
+ tristate "Alibaba Elastic RDMA Adapter (ERDMA) support"
+ depends on PCI_MSI && 64BIT
+ depends on INFINIBAND_ADDR_TRANS
+ depends on INFINIBAND_USER_ACCESS
+ help
+ This is a RDMA/iWarp driver for Alibaba Elastic RDMA Adapter(ERDMA),
+ which supports RDMA features in Alibaba cloud environment.
+
+ To compile this driver as module, choose M here. The module will be
+ called erdma.
diff --git a/drivers/infiniband/hw/erdma/Makefile b/drivers/infiniband/hw/erdma/Makefile
new file mode 100644
index 000000000000..51d2ef91905a
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INFINIBAND_ERDMA) := erdma.o
+
+erdma-y := erdma_cm.o erdma_main.o erdma_cmdq.o erdma_cq.o erdma_verbs.o erdma_qp.o erdma_eq.o
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
new file mode 100644
index 000000000000..2aae635c1c8d
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_H__
+#define __ERDMA_H__
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+#include <linux/xarray.h>
+#include <rdma/ib_verbs.h>
+
+#include "erdma_hw.h"
+
+#define DRV_MODULE_NAME "erdma"
+#define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
+
+struct erdma_eq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+
+ u16 ci;
+ u16 rsvd;
+
+ atomic64_t event_num;
+ atomic64_t notify_num;
+
+ u64 __iomem *db_addr;
+ u64 *db_record;
+};
+
+struct erdma_cmdq_sq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+ u16 ci;
+ u16 pi;
+
+ u16 wqebb_cnt;
+
+ u64 *db_record;
+};
+
+struct erdma_cmdq_cq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+ u32 ci;
+ u32 cmdsn;
+
+ u64 *db_record;
+
+ atomic64_t armed_num;
+};
+
+enum {
+ ERDMA_CMD_STATUS_INIT,
+ ERDMA_CMD_STATUS_ISSUED,
+ ERDMA_CMD_STATUS_FINISHED,
+ ERDMA_CMD_STATUS_TIMEOUT
+};
+
+struct erdma_comp_wait {
+ struct completion wait_event;
+ u32 cmd_status;
+ u32 ctx_id;
+ u16 sq_pi;
+ u8 comp_status;
+ u8 rsvd;
+ u32 comp_data[4];
+};
+
+enum {
+ ERDMA_CMDQ_STATE_OK_BIT = 0,
+ ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1,
+ ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2,
+};
+
+#define ERDMA_CMDQ_TIMEOUT_MS 15000
+#define ERDMA_REG_ACCESS_WAIT_MS 20
+#define ERDMA_WAIT_DEV_DONE_CNT 500
+
+struct erdma_cmdq {
+ unsigned long *comp_wait_bitmap;
+ struct erdma_comp_wait *wait_pool;
+ spinlock_t lock;
+
+ bool use_event;
+
+ struct erdma_cmdq_sq sq;
+ struct erdma_cmdq_cq cq;
+ struct erdma_eq eq;
+
+ unsigned long state;
+
+ struct semaphore credits;
+ u16 max_outstandings;
+};
+
+#define COMPROMISE_CC ERDMA_CC_CUBIC
+enum erdma_cc_alg {
+ ERDMA_CC_NEWRENO = 0,
+ ERDMA_CC_CUBIC,
+ ERDMA_CC_HPCC_RTT,
+ ERDMA_CC_HPCC_ECN,
+ ERDMA_CC_HPCC_INT,
+ ERDMA_CC_METHODS_NUM
+};
+
+struct erdma_devattr {
+ u32 fw_version;
+
+ unsigned char peer_addr[ETH_ALEN];
+
+ int numa_node;
+ enum erdma_cc_alg cc;
+ u32 grp_num;
+ u32 irq_num;
+
+ bool disable_dwqe;
+ u16 dwqe_pages;
+ u16 dwqe_entries;
+
+ u32 max_qp;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_ord;
+ u32 max_ird;
+
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_sge_rd;
+ u32 max_cq;
+ u32 max_cqe;
+ u64 max_mr_size;
+ u32 max_mr;
+ u32 max_pd;
+ u32 max_mw;
+ u32 local_dma_key;
+};
+
+#define ERDMA_IRQNAME_SIZE 50
+
+struct erdma_irq {
+ char name[ERDMA_IRQNAME_SIZE];
+ u32 msix_vector;
+ cpumask_t affinity_hint_mask;
+};
+
+struct erdma_eq_cb {
+ bool ready;
+ void *dev; /* All EQs use this fields to get erdma_dev struct */
+ struct erdma_irq irq;
+ struct erdma_eq eq;
+ struct tasklet_struct tasklet;
+};
+
+struct erdma_resource_cb {
+ unsigned long *bitmap;
+ spinlock_t lock;
+ u32 next_alloc_idx;
+ u32 max_cap;
+};
+
+enum {
+ ERDMA_RES_TYPE_PD = 0,
+ ERDMA_RES_TYPE_STAG_IDX = 1,
+ ERDMA_RES_CNT = 2,
+};
+
+#define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
+#define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
+
+struct erdma_dev {
+ struct ib_device ibdev;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct notifier_block netdev_nb;
+
+ resource_size_t func_bar_addr;
+ resource_size_t func_bar_len;
+ u8 __iomem *func_bar;
+
+ struct erdma_devattr attrs;
+ /* physical port state (only one port per device) */
+ enum ib_port_state state;
+
+ /* cmdq and aeq use the same msix vector */
+ struct erdma_irq comm_irq;
+ struct erdma_cmdq cmdq;
+ struct erdma_eq aeq;
+ struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1];
+
+ spinlock_t lock;
+ struct erdma_resource_cb res_cb[ERDMA_RES_CNT];
+ struct xarray qp_xa;
+ struct xarray cq_xa;
+
+ u32 next_alloc_qpn;
+ u32 next_alloc_cqn;
+
+ spinlock_t db_bitmap_lock;
+ /* We provide max 64 uContexts that each has one SQ doorbell Page. */
+ DECLARE_BITMAP(sdb_page, ERDMA_DWQE_TYPE0_CNT);
+ /*
+ * We provide max 496 uContexts that each has one SQ normal Db,
+ * and one directWQE db。
+ */
+ DECLARE_BITMAP(sdb_entry, ERDMA_DWQE_TYPE1_CNT);
+
+ atomic_t num_ctx;
+ struct list_head cep_list;
+};
+
+static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
+{
+ idx &= (depth - 1);
+
+ return qbuf + (idx << shift);
+}
+
+static inline struct erdma_dev *to_edev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct erdma_dev, ibdev);
+}
+
+static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg)
+{
+ return readl(dev->func_bar + reg);
+}
+
+static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg)
+{
+ return readq(dev->func_bar + reg);
+}
+
+static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value)
+{
+ writel(value, dev->func_bar + reg);
+}
+
+static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value)
+{
+ writeq(value, dev->func_bar + reg);
+}
+
+static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
+ u32 filed_mask)
+{
+ u32 val = erdma_reg_read32(dev, reg);
+
+ return FIELD_GET(filed_mask, val);
+}
+
+int erdma_cmdq_init(struct erdma_dev *dev);
+void erdma_finish_cmdq_init(struct erdma_dev *dev);
+void erdma_cmdq_destroy(struct erdma_dev *dev);
+
+void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+ u64 *resp0, u64 *resp1);
+void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
+
+int erdma_ceqs_init(struct erdma_dev *dev);
+void erdma_ceqs_uninit(struct erdma_dev *dev);
+void notify_eq(struct erdma_eq *eq);
+void *get_next_valid_eqe(struct erdma_eq *eq);
+
+int erdma_aeq_init(struct erdma_dev *dev);
+void erdma_aeq_destroy(struct erdma_dev *dev);
+
+void erdma_aeq_event_handler(struct erdma_dev *dev);
+void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
new file mode 100644
index 000000000000..f13f16479eca
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -0,0 +1,1430 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Fredy Neeser */
+/* Greg Joyce <greg@opengridcomputing.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+/* Copyright (c) 2017, Open Grid Computing, Inc. */
+
+#include <linux/errno.h>
+#include <linux/inetdevice.h>
+#include <linux/net.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_verbs.h"
+
+static struct workqueue_struct *erdma_cm_wq;
+
+static void erdma_cm_llp_state_change(struct sock *sk);
+static void erdma_cm_llp_data_ready(struct sock *sk);
+static void erdma_cm_llp_error_report(struct sock *sk);
+
+static void erdma_sk_assign_cm_upcalls(struct sock *sk)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_state_change = erdma_cm_llp_state_change;
+ sk->sk_data_ready = erdma_cm_llp_data_ready;
+ sk->sk_error_report = erdma_cm_llp_error_report;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void erdma_sk_save_upcalls(struct sock *sk)
+{
+ struct erdma_cep *cep = sk_to_cep(sk);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ cep->sk_state_change = sk->sk_state_change;
+ cep->sk_data_ready = sk->sk_data_ready;
+ cep->sk_error_report = sk->sk_error_report;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void erdma_sk_restore_upcalls(struct sock *sk, struct erdma_cep *cep)
+{
+ sk->sk_state_change = cep->sk_state_change;
+ sk->sk_data_ready = cep->sk_data_ready;
+ sk->sk_error_report = cep->sk_error_report;
+ sk->sk_user_data = NULL;
+}
+
+static void erdma_socket_disassoc(struct socket *s)
+{
+ struct sock *sk = s->sk;
+ struct erdma_cep *cep;
+
+ if (sk) {
+ write_lock_bh(&sk->sk_callback_lock);
+ cep = sk_to_cep(sk);
+ if (cep) {
+ erdma_sk_restore_upcalls(sk, cep);
+ erdma_cep_put(cep);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void erdma_cep_socket_assoc(struct erdma_cep *cep, struct socket *s)
+{
+ cep->sock = s;
+ erdma_cep_get(cep);
+ s->sk->sk_user_data = cep;
+
+ erdma_sk_save_upcalls(s->sk);
+ erdma_sk_assign_cm_upcalls(s->sk);
+}
+
+static void erdma_disassoc_listen_cep(struct erdma_cep *cep)
+{
+ if (cep->listen_cep) {
+ erdma_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ }
+}
+
+static struct erdma_cep *erdma_cep_alloc(struct erdma_dev *dev)
+{
+ struct erdma_cep *cep = kzalloc(sizeof(*cep), GFP_KERNEL);
+ unsigned long flags;
+
+ if (!cep)
+ return NULL;
+
+ INIT_LIST_HEAD(&cep->listenq);
+ INIT_LIST_HEAD(&cep->devq);
+ INIT_LIST_HEAD(&cep->work_freelist);
+
+ kref_init(&cep->ref);
+ cep->state = ERDMA_EPSTATE_IDLE;
+ init_waitqueue_head(&cep->waitq);
+ spin_lock_init(&cep->lock);
+ cep->dev = dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cep->devq, &dev->cep_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return cep;
+}
+
+static void erdma_cm_free_work(struct erdma_cep *cep)
+{
+ struct list_head *w, *tmp;
+ struct erdma_cm_work *work;
+
+ list_for_each_safe(w, tmp, &cep->work_freelist) {
+ work = list_entry(w, struct erdma_cm_work, list);
+ list_del(&work->list);
+ kfree(work);
+ }
+}
+
+static void erdma_cancel_mpatimer(struct erdma_cep *cep)
+{
+ spin_lock_bh(&cep->lock);
+ if (cep->mpa_timer) {
+ if (cancel_delayed_work(&cep->mpa_timer->work)) {
+ erdma_cep_put(cep);
+ kfree(cep->mpa_timer);
+ }
+ cep->mpa_timer = NULL;
+ }
+ spin_unlock_bh(&cep->lock);
+}
+
+static void erdma_put_work(struct erdma_cm_work *work)
+{
+ INIT_LIST_HEAD(&work->list);
+ spin_lock_bh(&work->cep->lock);
+ list_add(&work->list, &work->cep->work_freelist);
+ spin_unlock_bh(&work->cep->lock);
+}
+
+static void erdma_cep_set_inuse(struct erdma_cep *cep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cep->lock, flags);
+ while (cep->in_use) {
+ spin_unlock_irqrestore(&cep->lock, flags);
+ wait_event_interruptible(cep->waitq, !cep->in_use);
+ if (signal_pending(current))
+ flush_signals(current);
+
+ spin_lock_irqsave(&cep->lock, flags);
+ }
+
+ cep->in_use = 1;
+ spin_unlock_irqrestore(&cep->lock, flags);
+}
+
+static void erdma_cep_set_free(struct erdma_cep *cep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cep->lock, flags);
+ cep->in_use = 0;
+ spin_unlock_irqrestore(&cep->lock, flags);
+
+ wake_up(&cep->waitq);
+}
+
+static void __erdma_cep_dealloc(struct kref *ref)
+{
+ struct erdma_cep *cep = container_of(ref, struct erdma_cep, ref);
+ struct erdma_dev *dev = cep->dev;
+ unsigned long flags;
+
+ WARN_ON(cep->listen_cep);
+
+ kfree(cep->private_data);
+ kfree(cep->mpa.pdata);
+ spin_lock_bh(&cep->lock);
+ if (!list_empty(&cep->work_freelist))
+ erdma_cm_free_work(cep);
+ spin_unlock_bh(&cep->lock);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_del(&cep->devq);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ kfree(cep);
+}
+
+static struct erdma_cm_work *erdma_get_work(struct erdma_cep *cep)
+{
+ struct erdma_cm_work *work = NULL;
+
+ spin_lock_bh(&cep->lock);
+ if (!list_empty(&cep->work_freelist)) {
+ work = list_entry(cep->work_freelist.next, struct erdma_cm_work,
+ list);
+ list_del_init(&work->list);
+ }
+
+ spin_unlock_bh(&cep->lock);
+ return work;
+}
+
+static int erdma_cm_alloc_work(struct erdma_cep *cep, int num)
+{
+ struct erdma_cm_work *work;
+
+ while (num--) {
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ if (!(list_empty(&cep->work_freelist)))
+ erdma_cm_free_work(cep);
+ return -ENOMEM;
+ }
+ work->cep = cep;
+ INIT_LIST_HEAD(&work->list);
+ list_add(&work->list, &cep->work_freelist);
+ }
+
+ return 0;
+}
+
+static int erdma_cm_upcall(struct erdma_cep *cep, enum iw_cm_event_type reason,
+ int status)
+{
+ struct iw_cm_event event;
+ struct iw_cm_id *cm_id;
+
+ memset(&event, 0, sizeof(event));
+ event.status = status;
+ event.event = reason;
+
+ if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
+ event.provider_data = cep;
+ cm_id = cep->listen_cep->cm_id;
+
+ event.ird = cep->dev->attrs.max_ird;
+ event.ord = cep->dev->attrs.max_ord;
+ } else {
+ cm_id = cep->cm_id;
+ }
+
+ if (reason == IW_CM_EVENT_CONNECT_REQUEST ||
+ reason == IW_CM_EVENT_CONNECT_REPLY) {
+ u16 pd_len = be16_to_cpu(cep->mpa.hdr.params.pd_len);
+
+ if (pd_len && cep->mpa.pdata) {
+ event.private_data_len = pd_len;
+ event.private_data = cep->mpa.pdata;
+ }
+
+ getname_local(cep->sock, &event.local_addr);
+ getname_peer(cep->sock, &event.remote_addr);
+ }
+
+ return cm_id->event_handler(cm_id, &event);
+}
+
+void erdma_qp_cm_drop(struct erdma_qp *qp)
+{
+ struct erdma_cep *cep = qp->cep;
+
+ if (!qp->cep)
+ return;
+
+ erdma_cep_set_inuse(cep);
+
+ /* already closed. */
+ if (cep->state == ERDMA_EPSTATE_CLOSED)
+ goto out;
+
+ if (cep->cm_id) {
+ switch (cep->state) {
+ case ERDMA_EPSTATE_AWAIT_MPAREP:
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -EINVAL);
+ break;
+ case ERDMA_EPSTATE_RDMA_MODE:
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ break;
+ case ERDMA_EPSTATE_IDLE:
+ case ERDMA_EPSTATE_LISTENING:
+ case ERDMA_EPSTATE_CONNECTING:
+ case ERDMA_EPSTATE_AWAIT_MPAREQ:
+ case ERDMA_EPSTATE_RECVD_MPAREQ:
+ case ERDMA_EPSTATE_CLOSED:
+ default:
+ break;
+ }
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ erdma_cep_put(cep);
+ }
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+
+ if (cep->qp) {
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+ }
+out:
+ erdma_cep_set_free(cep);
+}
+
+void erdma_cep_put(struct erdma_cep *cep)
+{
+ WARN_ON(kref_read(&cep->ref) < 1);
+ kref_put(&cep->ref, __erdma_cep_dealloc);
+}
+
+void erdma_cep_get(struct erdma_cep *cep)
+{
+ kref_get(&cep->ref);
+}
+
+static int erdma_send_mpareqrep(struct erdma_cep *cep, const void *pdata,
+ u8 pd_len)
+{
+ struct socket *s = cep->sock;
+ struct mpa_rr *rr = &cep->mpa.hdr;
+ struct kvec iov[3];
+ struct msghdr msg;
+ int iovec_num = 0;
+ int ret;
+ int mpa_len;
+
+ memset(&msg, 0, sizeof(msg));
+
+ rr->params.pd_len = cpu_to_be16(pd_len);
+
+ iov[iovec_num].iov_base = rr;
+ iov[iovec_num].iov_len = sizeof(*rr);
+ iovec_num++;
+ mpa_len = sizeof(*rr);
+
+ iov[iovec_num].iov_base = &cep->mpa.ext_data;
+ iov[iovec_num].iov_len = sizeof(cep->mpa.ext_data);
+ iovec_num++;
+ mpa_len += sizeof(cep->mpa.ext_data);
+
+ if (pd_len) {
+ iov[iovec_num].iov_base = (char *)pdata;
+ iov[iovec_num].iov_len = pd_len;
+ mpa_len += pd_len;
+ iovec_num++;
+ }
+
+ ret = kernel_sendmsg(s, &msg, iov, iovec_num, mpa_len);
+
+ return ret < 0 ? ret : 0;
+}
+
+static inline int ksock_recv(struct socket *sock, char *buf, size_t size,
+ int flags)
+{
+ struct kvec iov = { buf, size };
+ struct msghdr msg = { .msg_name = NULL, .msg_flags = flags };
+
+ return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
+}
+
+static int __recv_mpa_hdr(struct erdma_cep *cep, int hdr_rcvd, char *hdr,
+ int hdr_size, int *rcvd_out)
+{
+ struct socket *s = cep->sock;
+ int rcvd;
+
+ *rcvd_out = 0;
+ if (hdr_rcvd < hdr_size) {
+ rcvd = ksock_recv(s, hdr + hdr_rcvd, hdr_size - hdr_rcvd,
+ MSG_DONTWAIT);
+ if (rcvd == -EAGAIN)
+ return -EAGAIN;
+
+ if (rcvd <= 0)
+ return -ECONNABORTED;
+
+ hdr_rcvd += rcvd;
+ *rcvd_out = rcvd;
+
+ if (hdr_rcvd < hdr_size)
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void __mpa_rr_set_revision(__be16 *bits, u8 rev)
+{
+ *bits = (*bits & ~MPA_RR_MASK_REVISION) |
+ (cpu_to_be16(rev) & MPA_RR_MASK_REVISION);
+}
+
+static u8 __mpa_rr_revision(__be16 mpa_rr_bits)
+{
+ __be16 rev = mpa_rr_bits & MPA_RR_MASK_REVISION;
+
+ return (u8)be16_to_cpu(rev);
+}
+
+static void __mpa_ext_set_cc(__be32 *bits, u32 cc)
+{
+ *bits = (*bits & ~MPA_EXT_FLAG_CC) |
+ (cpu_to_be32(cc) & MPA_EXT_FLAG_CC);
+}
+
+static u8 __mpa_ext_cc(__be32 mpa_ext_bits)
+{
+ __be32 cc = mpa_ext_bits & MPA_EXT_FLAG_CC;
+
+ return (u8)be32_to_cpu(cc);
+}
+
+/*
+ * Receive MPA Request/Reply header.
+ *
+ * Returns 0 if complete MPA Request/Reply haeder including
+ * eventual private data was received. Returns -EAGAIN if
+ * header was partially received or negative error code otherwise.
+ *
+ * Context: May be called in process context only
+ */
+static int erdma_recv_mpa_rr(struct erdma_cep *cep)
+{
+ struct mpa_rr *hdr = &cep->mpa.hdr;
+ struct socket *s = cep->sock;
+ u16 pd_len;
+ int rcvd, to_rcv, ret, pd_rcvd;
+
+ if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) {
+ ret = __recv_mpa_hdr(cep, cep->mpa.bytes_rcvd,
+ (char *)&cep->mpa.hdr,
+ sizeof(struct mpa_rr), &rcvd);
+ cep->mpa.bytes_rcvd += rcvd;
+ if (ret)
+ return ret;
+ }
+
+ if (be16_to_cpu(hdr->params.pd_len) > MPA_MAX_PRIVDATA ||
+ __mpa_rr_revision(hdr->params.bits) != MPA_REVISION_EXT_1)
+ return -EPROTO;
+
+ if (cep->mpa.bytes_rcvd - sizeof(struct mpa_rr) <
+ sizeof(struct erdma_mpa_ext)) {
+ ret = __recv_mpa_hdr(
+ cep, cep->mpa.bytes_rcvd - sizeof(struct mpa_rr),
+ (char *)&cep->mpa.ext_data,
+ sizeof(struct erdma_mpa_ext), &rcvd);
+ cep->mpa.bytes_rcvd += rcvd;
+ if (ret)
+ return ret;
+ }
+
+ pd_len = be16_to_cpu(hdr->params.pd_len);
+ pd_rcvd = cep->mpa.bytes_rcvd - sizeof(struct mpa_rr) -
+ sizeof(struct erdma_mpa_ext);
+ to_rcv = pd_len - pd_rcvd;
+
+ if (!to_rcv) {
+ /*
+ * We have received the whole MPA Request/Reply message.
+ * Check against peer protocol violation.
+ */
+ u32 word;
+
+ ret = __recv_mpa_hdr(cep, 0, (char *)&word, sizeof(word),
+ &rcvd);
+ if (ret == -EAGAIN && rcvd == 0)
+ return 0;
+
+ if (ret)
+ return ret;
+
+ return -EPROTO;
+ }
+
+ /*
+ * At this point, MPA header has been fully received, and pd_len != 0.
+ * So, begin to receive private data.
+ */
+ if (!cep->mpa.pdata) {
+ cep->mpa.pdata = kmalloc(pd_len + 4, GFP_KERNEL);
+ if (!cep->mpa.pdata)
+ return -ENOMEM;
+ }
+
+ rcvd = ksock_recv(s, cep->mpa.pdata + pd_rcvd, to_rcv + 4,
+ MSG_DONTWAIT);
+ if (rcvd < 0)
+ return rcvd;
+
+ if (rcvd > to_rcv)
+ return -EPROTO;
+
+ cep->mpa.bytes_rcvd += rcvd;
+
+ if (to_rcv == rcvd)
+ return 0;
+
+ return -EAGAIN;
+}
+
+/*
+ * erdma_proc_mpareq()
+ *
+ * Read MPA Request from socket and signal new connection to IWCM
+ * if success. Caller must hold lock on corresponding listening CEP.
+ */
+static int erdma_proc_mpareq(struct erdma_cep *cep)
+{
+ struct mpa_rr *req;
+ int ret;
+
+ ret = erdma_recv_mpa_rr(cep);
+ if (ret)
+ return ret;
+
+ req = &cep->mpa.hdr;
+
+ if (memcmp(req->key, MPA_KEY_REQ, MPA_KEY_SIZE))
+ return -EPROTO;
+
+ memcpy(req->key, MPA_KEY_REP, MPA_KEY_SIZE);
+
+ /* Currently does not support marker and crc. */
+ if (req->params.bits & MPA_RR_FLAG_MARKERS ||
+ req->params.bits & MPA_RR_FLAG_CRC)
+ goto reject_conn;
+
+ cep->state = ERDMA_EPSTATE_RECVD_MPAREQ;
+
+ /* Keep reference until IWCM accepts/rejects */
+ erdma_cep_get(cep);
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REQUEST, 0);
+ if (ret)
+ erdma_cep_put(cep);
+
+ return ret;
+
+reject_conn:
+ req->params.bits &= ~MPA_RR_FLAG_MARKERS;
+ req->params.bits |= MPA_RR_FLAG_REJECT;
+ req->params.bits &= ~MPA_RR_FLAG_CRC;
+
+ kfree(cep->mpa.pdata);
+ cep->mpa.pdata = NULL;
+ erdma_send_mpareqrep(cep, NULL, 0);
+
+ return -EOPNOTSUPP;
+}
+
+static int erdma_proc_mpareply(struct erdma_cep *cep)
+{
+ struct erdma_qp_attrs qp_attrs;
+ struct erdma_qp *qp = cep->qp;
+ struct mpa_rr *rep;
+ int ret;
+
+ ret = erdma_recv_mpa_rr(cep);
+ if (ret)
+ goto out_err;
+
+ erdma_cancel_mpatimer(cep);
+
+ rep = &cep->mpa.hdr;
+
+ if (memcmp(rep->key, MPA_KEY_REP, MPA_KEY_SIZE)) {
+ ret = -EPROTO;
+ goto out_err;
+ }
+
+ if (rep->params.bits & MPA_RR_FLAG_REJECT) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET);
+ return -ECONNRESET;
+ }
+
+ /* Currently does not support marker and crc. */
+ if ((rep->params.bits & MPA_RR_FLAG_MARKERS) ||
+ (rep->params.bits & MPA_RR_FLAG_CRC)) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
+ return -EINVAL;
+ }
+
+ memset(&qp_attrs, 0, sizeof(qp_attrs));
+ qp_attrs.irq_size = cep->ird;
+ qp_attrs.orq_size = cep->ord;
+ qp_attrs.state = ERDMA_QP_STATE_RTS;
+
+ down_write(&qp->state_lock);
+ if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto out_err;
+ }
+
+ qp->attrs.qp_type = ERDMA_QP_ACTIVE;
+ if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc)
+ qp->attrs.cc = COMPROMISE_CC;
+
+ ret = erdma_modify_qp_internal(qp, &qp_attrs,
+ ERDMA_QP_ATTR_STATE |
+ ERDMA_QP_ATTR_LLP_HANDLE |
+ ERDMA_QP_ATTR_MPA);
+
+ up_write(&qp->state_lock);
+
+ if (!ret) {
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, 0);
+ if (!ret)
+ cep->state = ERDMA_EPSTATE_RDMA_MODE;
+
+ return 0;
+ }
+
+out_err:
+ if (ret != -EAGAIN)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
+
+ return ret;
+}
+
+static void erdma_accept_newconn(struct erdma_cep *cep)
+{
+ struct socket *s = cep->sock;
+ struct socket *new_s = NULL;
+ struct erdma_cep *new_cep = NULL;
+ int ret = 0;
+
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ goto error;
+
+ new_cep = erdma_cep_alloc(cep->dev);
+ if (!new_cep)
+ goto error;
+
+ /*
+ * 4: Allocate a sufficient number of work elements
+ * to allow concurrent handling of local + peer close
+ * events, MPA header processing + MPA timeout.
+ */
+ if (erdma_cm_alloc_work(new_cep, 4) != 0)
+ goto error;
+
+ /*
+ * Copy saved socket callbacks from listening CEP
+ * and assign new socket with new CEP
+ */
+ new_cep->sk_state_change = cep->sk_state_change;
+ new_cep->sk_data_ready = cep->sk_data_ready;
+ new_cep->sk_error_report = cep->sk_error_report;
+
+ ret = kernel_accept(s, &new_s, O_NONBLOCK);
+ if (ret != 0)
+ goto error;
+
+ new_cep->sock = new_s;
+ erdma_cep_get(new_cep);
+ new_s->sk->sk_user_data = new_cep;
+
+ tcp_sock_set_nodelay(new_s->sk);
+ new_cep->state = ERDMA_EPSTATE_AWAIT_MPAREQ;
+
+ ret = erdma_cm_queue_work(new_cep, ERDMA_CM_WORK_MPATIMEOUT);
+ if (ret)
+ goto error;
+
+ new_cep->listen_cep = cep;
+ erdma_cep_get(cep);
+
+ if (atomic_read(&new_s->sk->sk_rmem_alloc)) {
+ /* MPA REQ already queued */
+ erdma_cep_set_inuse(new_cep);
+ ret = erdma_proc_mpareq(new_cep);
+ if (ret != -EAGAIN) {
+ erdma_cep_put(cep);
+ new_cep->listen_cep = NULL;
+ if (ret) {
+ erdma_cep_set_free(new_cep);
+ goto error;
+ }
+ }
+ erdma_cep_set_free(new_cep);
+ }
+ return;
+
+error:
+ if (new_cep) {
+ new_cep->state = ERDMA_EPSTATE_CLOSED;
+ erdma_cancel_mpatimer(new_cep);
+
+ erdma_cep_put(new_cep);
+ new_cep->sock = NULL;
+ }
+
+ if (new_s) {
+ erdma_socket_disassoc(new_s);
+ sock_release(new_s);
+ }
+}
+
+static int erdma_newconn_connected(struct erdma_cep *cep)
+{
+ int ret = 0;
+
+ cep->mpa.hdr.params.bits = 0;
+ __mpa_rr_set_revision(&cep->mpa.hdr.params.bits, MPA_REVISION_EXT_1);
+
+ memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, MPA_KEY_SIZE);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+ __mpa_ext_set_cc(&cep->mpa.ext_data.bits, cep->qp->attrs.cc);
+
+ ret = erdma_send_mpareqrep(cep, cep->private_data, cep->pd_len);
+ cep->state = ERDMA_EPSTATE_AWAIT_MPAREP;
+ cep->mpa.hdr.params.pd_len = 0;
+
+ if (ret >= 0)
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_MPATIMEOUT);
+
+ return ret;
+}
+
+static void erdma_cm_work_handler(struct work_struct *w)
+{
+ struct erdma_cm_work *work;
+ struct erdma_cep *cep;
+ int release_cep = 0, ret = 0;
+
+ work = container_of(w, struct erdma_cm_work, work.work);
+ cep = work->cep;
+
+ erdma_cep_set_inuse(cep);
+
+ switch (work->type) {
+ case ERDMA_CM_WORK_CONNECTED:
+ erdma_cancel_mpatimer(cep);
+ if (cep->state == ERDMA_EPSTATE_CONNECTING) {
+ ret = erdma_newconn_connected(cep);
+ if (ret) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -EIO);
+ release_cep = 1;
+ }
+ }
+ break;
+ case ERDMA_CM_WORK_CONNECTTIMEOUT:
+ if (cep->state == ERDMA_EPSTATE_CONNECTING) {
+ cep->mpa_timer = NULL;
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ETIMEDOUT);
+ release_cep = 1;
+ }
+ break;
+ case ERDMA_CM_WORK_ACCEPT:
+ erdma_accept_newconn(cep);
+ break;
+ case ERDMA_CM_WORK_READ_MPAHDR:
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ if (cep->listen_cep) {
+ erdma_cep_set_inuse(cep->listen_cep);
+
+ if (cep->listen_cep->state ==
+ ERDMA_EPSTATE_LISTENING)
+ ret = erdma_proc_mpareq(cep);
+ else
+ ret = -EFAULT;
+
+ erdma_cep_set_free(cep->listen_cep);
+
+ if (ret != -EAGAIN) {
+ erdma_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ if (ret)
+ erdma_cep_put(cep);
+ }
+ }
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ ret = erdma_proc_mpareply(cep);
+ }
+
+ if (ret && ret != -EAGAIN)
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_CLOSE_LLP:
+ if (cep->cm_id)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_PEER_CLOSE:
+ if (cep->cm_id) {
+ if (cep->state == ERDMA_EPSTATE_CONNECTING ||
+ cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ /*
+ * MPA reply not received, but connection drop
+ */
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ } else if (cep->state == ERDMA_EPSTATE_RDMA_MODE) {
+ /*
+ * NOTE: IW_CM_EVENT_DISCONNECT is given just
+ * to transition IWCM into CLOSING.
+ */
+ erdma_cm_upcall(cep, IW_CM_EVENT_DISCONNECT, 0);
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ }
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ /* Socket close before MPA request received. */
+ erdma_disassoc_listen_cep(cep);
+ erdma_cep_put(cep);
+ }
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_MPATIMEOUT:
+ cep->mpa_timer = NULL;
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ /*
+ * MPA request timed out:
+ * Hide any partially received private data and signal
+ * timeout
+ */
+ cep->mpa.hdr.params.pd_len = 0;
+
+ if (cep->cm_id)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ETIMEDOUT);
+ release_cep = 1;
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ /* No MPA req received after peer TCP stream setup. */
+ erdma_disassoc_listen_cep(cep);
+
+ erdma_cep_put(cep);
+ release_cep = 1;
+ }
+ break;
+ default:
+ WARN(1, "Undefined CM work type: %d\n", work->type);
+ }
+
+ if (release_cep) {
+ erdma_cancel_mpatimer(cep);
+ cep->state = ERDMA_EPSTATE_CLOSED;
+ if (cep->qp) {
+ struct erdma_qp *qp = cep->qp;
+ /*
+ * Serialize a potential race with application
+ * closing the QP and calling erdma_qp_cm_drop()
+ */
+ erdma_qp_get(qp);
+ erdma_cep_set_free(cep);
+
+ erdma_qp_llp_close(qp);
+ erdma_qp_put(qp);
+
+ erdma_cep_set_inuse(cep);
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+ }
+
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ erdma_cep_put(cep);
+ }
+ }
+ erdma_cep_set_free(cep);
+ erdma_put_work(work);
+ erdma_cep_put(cep);
+}
+
+int erdma_cm_queue_work(struct erdma_cep *cep, enum erdma_work_type type)
+{
+ struct erdma_cm_work *work = erdma_get_work(cep);
+ unsigned long delay = 0;
+
+ if (!work)
+ return -ENOMEM;
+
+ work->type = type;
+ work->cep = cep;
+
+ erdma_cep_get(cep);
+
+ INIT_DELAYED_WORK(&work->work, erdma_cm_work_handler);
+
+ if (type == ERDMA_CM_WORK_MPATIMEOUT) {
+ cep->mpa_timer = work;
+
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP)
+ delay = MPAREP_TIMEOUT;
+ else
+ delay = MPAREQ_TIMEOUT;
+ } else if (type == ERDMA_CM_WORK_CONNECTTIMEOUT) {
+ cep->mpa_timer = work;
+
+ delay = CONNECT_TIMEOUT;
+ }
+
+ queue_delayed_work(erdma_cm_wq, &work->work, delay);
+
+ return 0;
+}
+
+static void erdma_cm_llp_data_ready(struct sock *sk)
+{
+ struct erdma_cep *cep;
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep)
+ goto out;
+
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ ||
+ cep->state == ERDMA_EPSTATE_AWAIT_MPAREP)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_READ_MPAHDR);
+
+out:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void erdma_cm_llp_error_report(struct sock *sk)
+{
+ struct erdma_cep *cep = sk_to_cep(sk);
+
+ if (cep)
+ cep->sk_error_report(sk);
+}
+
+static void erdma_cm_llp_state_change(struct sock *sk)
+{
+ struct erdma_cep *cep;
+ void (*orig_state_change)(struct sock *sk);
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep) {
+ read_unlock(&sk->sk_callback_lock);
+ return;
+ }
+ orig_state_change = cep->sk_state_change;
+
+ switch (sk->sk_state) {
+ case TCP_ESTABLISHED:
+ if (cep->state == ERDMA_EPSTATE_CONNECTING)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTED);
+ else
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_ACCEPT);
+ break;
+ case TCP_CLOSE:
+ case TCP_CLOSE_WAIT:
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_PEER_CLOSE);
+ break;
+ default:
+ break;
+ }
+ read_unlock(&sk->sk_callback_lock);
+ orig_state_change(sk);
+}
+
+static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
+ int laddrlen, struct sockaddr *raddr,
+ int raddrlen, int flags)
+{
+ int ret;
+
+ sock_set_reuseaddr(s->sk);
+ ret = s->ops->bind(s, laddr, laddrlen);
+ if (ret)
+ return ret;
+ ret = s->ops->connect(s, raddr, raddrlen, flags);
+ return ret < 0 ? ret : 0;
+}
+
+int erdma_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
+{
+ struct erdma_dev *dev = to_edev(id->device);
+ struct erdma_qp *qp;
+ struct erdma_cep *cep = NULL;
+ struct socket *s = NULL;
+ struct sockaddr *laddr = (struct sockaddr *)&id->m_local_addr;
+ struct sockaddr *raddr = (struct sockaddr *)&id->m_remote_addr;
+ u16 pd_len = params->private_data_len;
+ int ret;
+
+ if (pd_len > MPA_MAX_PRIVDATA)
+ return -EINVAL;
+
+ if (params->ird > dev->attrs.max_ird ||
+ params->ord > dev->attrs.max_ord)
+ return -EINVAL;
+
+ if (laddr->sa_family != AF_INET || raddr->sa_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ qp = find_qp_by_qpn(dev, params->qpn);
+ if (!qp)
+ return -ENOENT;
+ erdma_qp_get(qp);
+
+ ret = sock_create(AF_INET, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (ret < 0)
+ goto error_put_qp;
+
+ cep = erdma_cep_alloc(dev);
+ if (!cep) {
+ ret = -ENOMEM;
+ goto error_release_sock;
+ }
+
+ erdma_cep_set_inuse(cep);
+
+ /* Associate QP with CEP */
+ erdma_cep_get(cep);
+ qp->cep = cep;
+ cep->qp = qp;
+
+ /* Associate cm_id with CEP */
+ id->add_ref(id);
+ cep->cm_id = id;
+
+ /*
+ * 6: Allocate a sufficient number of work elements
+ * to allow concurrent handling of local + peer close
+ * events, MPA header processing + MPA timeout, connected event
+ * and connect timeout.
+ */
+ ret = erdma_cm_alloc_work(cep, 6);
+ if (ret != 0) {
+ ret = -ENOMEM;
+ goto error_release_cep;
+ }
+
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+ cep->state = ERDMA_EPSTATE_CONNECTING;
+
+ erdma_cep_socket_assoc(cep, s);
+
+ if (pd_len) {
+ cep->pd_len = pd_len;
+ cep->private_data = kmalloc(pd_len, GFP_KERNEL);
+ if (!cep->private_data) {
+ ret = -ENOMEM;
+ goto error_disassoc;
+ }
+
+ memcpy(cep->private_data, params->private_data,
+ params->private_data_len);
+ }
+
+ ret = kernel_bindconnect(s, laddr, sizeof(*laddr), raddr,
+ sizeof(*raddr), O_NONBLOCK);
+ if (ret != -EINPROGRESS && ret != 0) {
+ goto error_disassoc;
+ } else if (ret == 0) {
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTED);
+ if (ret)
+ goto error_disassoc;
+ } else {
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTTIMEOUT);
+ if (ret)
+ goto error_disassoc;
+ }
+
+ erdma_cep_set_free(cep);
+ return 0;
+
+error_disassoc:
+ kfree(cep->private_data);
+ cep->private_data = NULL;
+ cep->pd_len = 0;
+
+ erdma_socket_disassoc(s);
+
+error_release_cep:
+ /* disassoc with cm_id */
+ cep->cm_id = NULL;
+ id->rem_ref(id);
+
+ /* disassoc with qp */
+ qp->cep = NULL;
+ erdma_cep_put(cep);
+ cep->qp = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+
+ /* release the cep. */
+ erdma_cep_put(cep);
+
+error_release_sock:
+ if (s)
+ sock_release(s);
+error_put_qp:
+ erdma_qp_put(qp);
+
+ return ret;
+}
+
+int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
+{
+ struct erdma_dev *dev = to_edev(id->device);
+ struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+ struct erdma_qp *qp;
+ struct erdma_qp_attrs qp_attrs;
+ int ret;
+
+ erdma_cep_set_inuse(cep);
+ erdma_cep_put(cep);
+
+ /* Free lingering inbound private data */
+ if (cep->mpa.hdr.params.pd_len) {
+ cep->mpa.hdr.params.pd_len = 0;
+ kfree(cep->mpa.pdata);
+ cep->mpa.pdata = NULL;
+ }
+ erdma_cancel_mpatimer(cep);
+
+ if (cep->state != ERDMA_EPSTATE_RECVD_MPAREQ) {
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return -ECONNRESET;
+ }
+
+ qp = find_qp_by_qpn(dev, params->qpn);
+ if (!qp)
+ return -ENOENT;
+ erdma_qp_get(qp);
+
+ down_write(&qp->state_lock);
+ if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ if (params->ord > dev->attrs.max_ord ||
+ params->ird > dev->attrs.max_ord) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ if (params->private_data_len > MPA_MAX_PRIVDATA) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+
+ cep->cm_id = id;
+ id->add_ref(id);
+
+ memset(&qp_attrs, 0, sizeof(qp_attrs));
+ qp_attrs.orq_size = params->ord;
+ qp_attrs.irq_size = params->ird;
+
+ qp_attrs.state = ERDMA_QP_STATE_RTS;
+
+ /* Associate QP with CEP */
+ erdma_cep_get(cep);
+ qp->cep = cep;
+ cep->qp = qp;
+
+ cep->state = ERDMA_EPSTATE_RDMA_MODE;
+
+ qp->attrs.qp_type = ERDMA_QP_PASSIVE;
+ qp->attrs.pd_len = params->private_data_len;
+
+ if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits))
+ qp->attrs.cc = COMPROMISE_CC;
+
+ /* move to rts */
+ ret = erdma_modify_qp_internal(qp, &qp_attrs,
+ ERDMA_QP_ATTR_STATE |
+ ERDMA_QP_ATTR_ORD |
+ ERDMA_QP_ATTR_LLP_HANDLE |
+ ERDMA_QP_ATTR_IRD |
+ ERDMA_QP_ATTR_MPA);
+ up_write(&qp->state_lock);
+
+ if (ret)
+ goto error;
+
+ cep->mpa.ext_data.bits = 0;
+ __mpa_ext_set_cc(&cep->mpa.ext_data.bits, qp->attrs.cc);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+
+ ret = erdma_send_mpareqrep(cep, params->private_data,
+ params->private_data_len);
+ if (!ret) {
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
+ if (ret)
+ goto error;
+
+ erdma_cep_set_free(cep);
+
+ return 0;
+ }
+
+error:
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(id);
+ cep->cm_id = NULL;
+ }
+
+ if (qp->cep) {
+ erdma_cep_put(cep);
+ qp->cep = NULL;
+ }
+
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return ret;
+}
+
+int erdma_reject(struct iw_cm_id *id, const void *pdata, u8 plen)
+{
+ struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+
+ erdma_cep_set_inuse(cep);
+ erdma_cep_put(cep);
+
+ erdma_cancel_mpatimer(cep);
+
+ if (cep->state != ERDMA_EPSTATE_RECVD_MPAREQ) {
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return -ECONNRESET;
+ }
+
+ if (__mpa_rr_revision(cep->mpa.hdr.params.bits) == MPA_REVISION_EXT_1) {
+ cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */
+ erdma_send_mpareqrep(cep, pdata, plen);
+ }
+
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return 0;
+}
+
+int erdma_create_listen(struct iw_cm_id *id, int backlog)
+{
+ struct socket *s;
+ struct erdma_cep *cep = NULL;
+ int ret = 0;
+ struct erdma_dev *dev = to_edev(id->device);
+ int addr_family = id->local_addr.ss_family;
+ struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
+
+ if (addr_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ ret = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (ret < 0)
+ return ret;
+
+ sock_set_reuseaddr(s->sk);
+
+ /* For wildcard addr, limit binding to current device only */
+ if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
+ s->sk->sk_bound_dev_if = dev->netdev->ifindex;
+
+ ret = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in));
+ if (ret)
+ goto error;
+
+ cep = erdma_cep_alloc(dev);
+ if (!cep) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ erdma_cep_socket_assoc(cep, s);
+
+ ret = erdma_cm_alloc_work(cep, backlog);
+ if (ret)
+ goto error;
+
+ ret = s->ops->listen(s, backlog);
+ if (ret)
+ goto error;
+
+ cep->cm_id = id;
+ id->add_ref(id);
+
+ if (!id->provider_data) {
+ id->provider_data =
+ kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!id->provider_data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ INIT_LIST_HEAD((struct list_head *)id->provider_data);
+ }
+
+ list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
+ cep->state = ERDMA_EPSTATE_LISTENING;
+
+ return 0;
+
+error:
+ if (cep) {
+ erdma_cep_set_inuse(cep);
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ }
+ cep->sock = NULL;
+ erdma_socket_disassoc(s);
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+ }
+ sock_release(s);
+
+ return ret;
+}
+
+static void erdma_drop_listeners(struct iw_cm_id *id)
+{
+ struct list_head *p, *tmp;
+ /*
+ * In case of a wildcard rdma_listen on a multi-homed device,
+ * a listener's IWCM id is associated with more than one listening CEP.
+ */
+ list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) {
+ struct erdma_cep *cep =
+ list_entry(p, struct erdma_cep, listenq);
+
+ list_del(p);
+
+ erdma_cep_set_inuse(cep);
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ }
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+ cep->state = ERDMA_EPSTATE_CLOSED;
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+ }
+}
+
+int erdma_destroy_listen(struct iw_cm_id *id)
+{
+ if (!id->provider_data)
+ return 0;
+
+ erdma_drop_listeners(id);
+ kfree(id->provider_data);
+ id->provider_data = NULL;
+
+ return 0;
+}
+
+int erdma_cm_init(void)
+{
+ erdma_cm_wq = create_singlethread_workqueue("erdma_cm_wq");
+ if (!erdma_cm_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void erdma_cm_exit(void)
+{
+ if (erdma_cm_wq)
+ destroy_workqueue(erdma_cm_wq);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.h b/drivers/infiniband/hw/erdma/erdma_cm.h
new file mode 100644
index 000000000000..8a3f998fec9b
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cm.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Greg Joyce <greg@opengridcomputing.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+/* Copyright (c) 2017, Open Grid Computing, Inc. */
+
+#ifndef __ERDMA_CM_H__
+#define __ERDMA_CM_H__
+
+#include <linux/tcp.h>
+#include <net/sock.h>
+#include <rdma/iw_cm.h>
+
+/* iWarp MPA protocol defs */
+#define MPA_REVISION_EXT_1 129
+#define MPA_MAX_PRIVDATA RDMA_MAX_PRIVATE_DATA
+#define MPA_KEY_REQ "MPA ID Req Frame"
+#define MPA_KEY_REP "MPA ID Rep Frame"
+#define MPA_KEY_SIZE 16
+#define MPA_DEFAULT_HDR_LEN 28
+
+struct mpa_rr_params {
+ __be16 bits;
+ __be16 pd_len;
+};
+
+/*
+ * MPA request/response Hdr bits & fields
+ */
+enum {
+ MPA_RR_FLAG_MARKERS = __cpu_to_be16(0x8000),
+ MPA_RR_FLAG_CRC = __cpu_to_be16(0x4000),
+ MPA_RR_FLAG_REJECT = __cpu_to_be16(0x2000),
+ MPA_RR_RESERVED = __cpu_to_be16(0x1f00),
+ MPA_RR_MASK_REVISION = __cpu_to_be16(0x00ff)
+};
+
+/*
+ * MPA request/reply header
+ */
+struct mpa_rr {
+ u8 key[16];
+ struct mpa_rr_params params;
+};
+
+struct erdma_mpa_ext {
+ __be32 cookie;
+ __be32 bits;
+};
+
+enum {
+ MPA_EXT_FLAG_CC = cpu_to_be32(0x0000000f),
+};
+
+struct erdma_mpa_info {
+ struct mpa_rr hdr; /* peer mpa hdr in host byte order */
+ struct erdma_mpa_ext ext_data;
+ char *pdata;
+ int bytes_rcvd;
+};
+
+struct erdma_sk_upcalls {
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk, int bytes);
+ void (*sk_error_report)(struct sock *sk);
+};
+
+struct erdma_dev;
+
+enum erdma_cep_state {
+ ERDMA_EPSTATE_IDLE = 1,
+ ERDMA_EPSTATE_LISTENING,
+ ERDMA_EPSTATE_CONNECTING,
+ ERDMA_EPSTATE_AWAIT_MPAREQ,
+ ERDMA_EPSTATE_RECVD_MPAREQ,
+ ERDMA_EPSTATE_AWAIT_MPAREP,
+ ERDMA_EPSTATE_RDMA_MODE,
+ ERDMA_EPSTATE_CLOSED
+};
+
+struct erdma_cep {
+ struct iw_cm_id *cm_id;
+ struct erdma_dev *dev;
+ struct list_head devq;
+ spinlock_t lock;
+ struct kref ref;
+ int in_use;
+ wait_queue_head_t waitq;
+ enum erdma_cep_state state;
+
+ struct list_head listenq;
+ struct erdma_cep *listen_cep;
+
+ struct erdma_qp *qp;
+ struct socket *sock;
+
+ struct erdma_cm_work *mpa_timer;
+ struct list_head work_freelist;
+
+ struct erdma_mpa_info mpa;
+ int ord;
+ int ird;
+
+ int pd_len;
+ /* hold user's private data. */
+ void *private_data;
+
+ /* Saved upcalls of socket llp.sock */
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk);
+ void (*sk_error_report)(struct sock *sk);
+};
+
+#define MPAREQ_TIMEOUT (HZ * 20)
+#define MPAREP_TIMEOUT (HZ * 10)
+#define CONNECT_TIMEOUT (HZ * 10)
+
+enum erdma_work_type {
+ ERDMA_CM_WORK_ACCEPT = 1,
+ ERDMA_CM_WORK_READ_MPAHDR,
+ ERDMA_CM_WORK_CLOSE_LLP, /* close socket */
+ ERDMA_CM_WORK_PEER_CLOSE, /* socket indicated peer close */
+ ERDMA_CM_WORK_MPATIMEOUT,
+ ERDMA_CM_WORK_CONNECTED,
+ ERDMA_CM_WORK_CONNECTTIMEOUT
+};
+
+struct erdma_cm_work {
+ struct delayed_work work;
+ struct list_head list;
+ enum erdma_work_type type;
+ struct erdma_cep *cep;
+};
+
+#define to_sockaddr_in(a) (*(struct sockaddr_in *)(&(a)))
+
+static inline int getname_peer(struct socket *s, struct sockaddr_storage *a)
+{
+ return s->ops->getname(s, (struct sockaddr *)a, 1);
+}
+
+static inline int getname_local(struct socket *s, struct sockaddr_storage *a)
+{
+ return s->ops->getname(s, (struct sockaddr *)a, 0);
+}
+
+int erdma_connect(struct iw_cm_id *id, struct iw_cm_conn_param *param);
+int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *param);
+int erdma_reject(struct iw_cm_id *id, const void *pdata, u8 plen);
+int erdma_create_listen(struct iw_cm_id *id, int backlog);
+int erdma_destroy_listen(struct iw_cm_id *id);
+
+void erdma_cep_get(struct erdma_cep *ceq);
+void erdma_cep_put(struct erdma_cep *ceq);
+int erdma_cm_queue_work(struct erdma_cep *ceq, enum erdma_work_type type);
+
+int erdma_cm_init(void);
+void erdma_cm_exit(void);
+
+#define sk_to_cep(sk) ((struct erdma_cep *)((sk)->sk_user_data))
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
new file mode 100644
index 000000000000..57da0c670472
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "erdma.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
+{
+ struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
+ u64 db_data = FIELD_PREP(ERDMA_CQDB_CI_MASK, cmdq->cq.ci) |
+ FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
+ FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
+ FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
+
+ *cmdq->cq.db_record = db_data;
+ writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
+
+ atomic64_inc(&cmdq->cq.armed_num);
+}
+
+static void kick_cmdq_db(struct erdma_cmdq *cmdq)
+{
+ struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
+ u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
+
+ *cmdq->sq.db_record = db_data;
+ writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
+}
+
+static struct erdma_comp_wait *get_comp_wait(struct erdma_cmdq *cmdq)
+{
+ int comp_idx;
+
+ spin_lock(&cmdq->lock);
+ comp_idx = find_first_zero_bit(cmdq->comp_wait_bitmap,
+ cmdq->max_outstandings);
+ if (comp_idx == cmdq->max_outstandings) {
+ spin_unlock(&cmdq->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ __set_bit(comp_idx, cmdq->comp_wait_bitmap);
+ spin_unlock(&cmdq->lock);
+
+ return &cmdq->wait_pool[comp_idx];
+}
+
+static void put_comp_wait(struct erdma_cmdq *cmdq,
+ struct erdma_comp_wait *comp_wait)
+{
+ int used;
+
+ cmdq->wait_pool[comp_wait->ctx_id].cmd_status = ERDMA_CMD_STATUS_INIT;
+ spin_lock(&cmdq->lock);
+ used = __test_and_clear_bit(comp_wait->ctx_id, cmdq->comp_wait_bitmap);
+ spin_unlock(&cmdq->lock);
+
+ WARN_ON(!used);
+}
+
+static int erdma_cmdq_wait_res_init(struct erdma_dev *dev,
+ struct erdma_cmdq *cmdq)
+{
+ int i;
+
+ cmdq->wait_pool =
+ devm_kcalloc(&dev->pdev->dev, cmdq->max_outstandings,
+ sizeof(struct erdma_comp_wait), GFP_KERNEL);
+ if (!cmdq->wait_pool)
+ return -ENOMEM;
+
+ spin_lock_init(&cmdq->lock);
+ cmdq->comp_wait_bitmap = devm_bitmap_zalloc(
+ &dev->pdev->dev, cmdq->max_outstandings, GFP_KERNEL);
+ if (!cmdq->comp_wait_bitmap)
+ return -ENOMEM;
+
+ for (i = 0; i < cmdq->max_outstandings; i++) {
+ init_completion(&cmdq->wait_pool[i].wait_event);
+ cmdq->wait_pool[i].ctx_id = i;
+ }
+
+ return 0;
+}
+
+static int erdma_cmdq_sq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_cmdq_sq *sq = &cmdq->sq;
+ u32 buf_size;
+
+ sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE);
+ sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
+
+ buf_size = sq->depth << SQEBB_SHIFT;
+
+ sq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &sq->qbuf_dma_addr, GFP_KERNEL);
+ if (!sq->qbuf)
+ return -ENOMEM;
+
+ sq->db_record = (u64 *)(sq->qbuf + buf_size);
+
+ spin_lock_init(&sq->lock);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_H_REG,
+ upper_32_bits(sq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
+ lower_32_bits(sq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
+ erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG,
+ sq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+static int erdma_cmdq_cq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_cmdq_cq *cq = &cmdq->cq;
+ u32 buf_size;
+
+ cq->depth = cmdq->sq.depth;
+ buf_size = cq->depth << CQE_SHIFT;
+
+ cq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &cq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!cq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&cq->lock);
+
+ cq->db_record = (u64 *)(cq->qbuf + buf_size);
+
+ atomic64_set(&cq->armed_num, 0);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_H_REG,
+ upper_32_bits(cq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
+ lower_32_bits(cq->qbuf_dma_addr));
+ erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG,
+ cq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+static int erdma_cmdq_eq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_eq *eq = &cmdq->eq;
+ u32 buf_size;
+
+ eq->depth = cmdq->max_outstandings;
+ buf_size = eq->depth << EQE_SHIFT;
+
+ eq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+
+ eq->db_addr =
+ (u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG);
+ eq->db_record = (u64 *)(eq->qbuf + buf_size);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG,
+ eq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+int erdma_cmdq_init(struct erdma_dev *dev)
+{
+ int err, i;
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ u32 sts, ctrl;
+
+ cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
+ cmdq->use_event = false;
+
+ sema_init(&cmdq->credits, cmdq->max_outstandings);
+
+ err = erdma_cmdq_wait_res_init(dev, cmdq);
+ if (err)
+ return err;
+
+ err = erdma_cmdq_sq_init(dev);
+ if (err)
+ return err;
+
+ err = erdma_cmdq_cq_init(dev);
+ if (err)
+ goto err_destroy_sq;
+
+ err = erdma_cmdq_eq_init(dev);
+ if (err)
+ goto err_destroy_cq;
+
+ ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_INIT_MASK, 1);
+ erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
+
+ for (i = 0; i < ERDMA_WAIT_DEV_DONE_CNT; i++) {
+ sts = erdma_reg_read32_filed(dev, ERDMA_REGS_DEV_ST_REG,
+ ERDMA_REG_DEV_ST_INIT_DONE_MASK);
+ if (sts)
+ break;
+
+ msleep(ERDMA_REG_ACCESS_WAIT_MS);
+ }
+
+ if (i == ERDMA_WAIT_DEV_DONE_CNT) {
+ dev_err(&dev->pdev->dev, "wait init done failed.\n");
+ err = -ETIMEDOUT;
+ goto err_destroy_eq;
+ }
+
+ set_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+
+ return 0;
+
+err_destroy_eq:
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->eq.depth << EQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
+
+err_destroy_cq:
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->cq.depth << CQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+
+err_destroy_sq:
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->sq.depth << SQEBB_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
+
+ return err;
+}
+
+void erdma_finish_cmdq_init(struct erdma_dev *dev)
+{
+ /* after device init successfully, change cmdq to event mode. */
+ dev->cmdq.use_event = true;
+ arm_cmdq_cq(&dev->cmdq);
+}
+
+void erdma_cmdq_destroy(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->eq.depth << EQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->sq.depth << SQEBB_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->cq.depth << CQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+}
+
+static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
+{
+ __be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci,
+ cmdq->cq.depth, CQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ __be32_to_cpu(READ_ONCE(*cqe)));
+
+ return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
+}
+
+static void push_cmdq_sqe(struct erdma_cmdq *cmdq, u64 *req, size_t req_len,
+ struct erdma_comp_wait *comp_wait)
+{
+ __le64 *wqe;
+ u64 hdr = *req;
+
+ comp_wait->cmd_status = ERDMA_CMD_STATUS_ISSUED;
+ reinit_completion(&comp_wait->wait_event);
+ comp_wait->sq_pi = cmdq->sq.pi;
+
+ wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth,
+ SQEBB_SHIFT);
+ memcpy(wqe, req, req_len);
+
+ cmdq->sq.pi += cmdq->sq.wqebb_cnt;
+ hdr |= FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi) |
+ FIELD_PREP(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK,
+ comp_wait->ctx_id) |
+ FIELD_PREP(ERDMA_CMD_HDR_WQEBB_CNT_MASK, cmdq->sq.wqebb_cnt - 1);
+ *wqe = cpu_to_le64(hdr);
+
+ kick_cmdq_db(cmdq);
+}
+
+static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
+{
+ struct erdma_comp_wait *comp_wait;
+ u32 hdr0, sqe_idx;
+ __be32 *cqe;
+ u16 ctx_id;
+ u64 *sqe;
+ int i;
+
+ cqe = get_next_valid_cmdq_cqe(cmdq);
+ if (!cqe)
+ return -EAGAIN;
+
+ cmdq->cq.ci++;
+
+ dma_rmb();
+ hdr0 = __be32_to_cpu(*cqe);
+ sqe_idx = __be32_to_cpu(*(cqe + 1));
+
+ sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
+ SQEBB_SHIFT);
+ ctx_id = FIELD_GET(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK, *sqe);
+ comp_wait = &cmdq->wait_pool[ctx_id];
+ if (comp_wait->cmd_status != ERDMA_CMD_STATUS_ISSUED)
+ return -EIO;
+
+ comp_wait->cmd_status = ERDMA_CMD_STATUS_FINISHED;
+ comp_wait->comp_status = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, hdr0);
+ cmdq->sq.ci += cmdq->sq.wqebb_cnt;
+
+ for (i = 0; i < 4; i++)
+ comp_wait->comp_data[i] = __be32_to_cpu(*(cqe + 2 + i));
+
+ if (cmdq->use_event)
+ complete(&comp_wait->wait_event);
+
+ return 0;
+}
+
+static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
+{
+ unsigned long flags;
+ u16 comp_num;
+
+ spin_lock_irqsave(&cmdq->cq.lock, flags);
+
+ /* We must have less than # of max_outstandings
+ * completions at one time.
+ */
+ for (comp_num = 0; comp_num < cmdq->max_outstandings; comp_num++)
+ if (erdma_poll_single_cmd_completion(cmdq))
+ break;
+
+ if (comp_num && cmdq->use_event)
+ arm_cmdq_cq(cmdq);
+
+ spin_unlock_irqrestore(&cmdq->cq.lock, flags);
+}
+
+void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
+{
+ int got_event = 0;
+
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
+ !cmdq->use_event)
+ return;
+
+ while (get_next_valid_eqe(&cmdq->eq)) {
+ cmdq->eq.ci++;
+ got_event++;
+ }
+
+ if (got_event) {
+ cmdq->cq.cmdsn++;
+ erdma_polling_cmd_completions(cmdq);
+ }
+
+ notify_eq(&cmdq->eq);
+}
+
+static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
+ struct erdma_cmdq *cmdq, u32 timeout)
+{
+ unsigned long comp_timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (1) {
+ erdma_polling_cmd_completions(cmdq);
+ if (comp_ctx->cmd_status != ERDMA_CMD_STATUS_ISSUED)
+ break;
+
+ if (time_is_before_jiffies(comp_timeout))
+ return -ETIME;
+
+ msleep(20);
+ }
+
+ return 0;
+}
+
+static int erdma_wait_cmd_completion(struct erdma_comp_wait *comp_ctx,
+ struct erdma_cmdq *cmdq, u32 timeout)
+{
+ unsigned long flags = 0;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+ msecs_to_jiffies(timeout));
+
+ if (unlikely(comp_ctx->cmd_status != ERDMA_CMD_STATUS_FINISHED)) {
+ spin_lock_irqsave(&cmdq->cq.lock, flags);
+ comp_ctx->cmd_status = ERDMA_CMD_STATUS_TIMEOUT;
+ spin_unlock_irqrestore(&cmdq->cq.lock, flags);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
+{
+ *hdr = FIELD_PREP(ERDMA_CMD_HDR_SUB_MOD_MASK, mod) |
+ FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
+}
+
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+ u64 *resp0, u64 *resp1)
+{
+ struct erdma_comp_wait *comp_wait;
+ int ret;
+
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
+ return -ENODEV;
+
+ down(&cmdq->credits);
+
+ comp_wait = get_comp_wait(cmdq);
+ if (IS_ERR(comp_wait)) {
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+ set_bit(ERDMA_CMDQ_STATE_CTX_ERR_BIT, &cmdq->state);
+ up(&cmdq->credits);
+ return PTR_ERR(comp_wait);
+ }
+
+ spin_lock(&cmdq->sq.lock);
+ push_cmdq_sqe(cmdq, req, req_size, comp_wait);
+ spin_unlock(&cmdq->sq.lock);
+
+ if (cmdq->use_event)
+ ret = erdma_wait_cmd_completion(comp_wait, cmdq,
+ ERDMA_CMDQ_TIMEOUT_MS);
+ else
+ ret = erdma_poll_cmd_completion(comp_wait, cmdq,
+ ERDMA_CMDQ_TIMEOUT_MS);
+
+ if (ret) {
+ set_bit(ERDMA_CMDQ_STATE_TIMEOUT_BIT, &cmdq->state);
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+ goto out;
+ }
+
+ if (comp_wait->comp_status)
+ ret = -EIO;
+
+ if (resp0 && resp1) {
+ *resp0 = *((u64 *)&comp_wait->comp_data[0]);
+ *resp1 = *((u64 *)&comp_wait->comp_data[2]);
+ }
+ put_comp_wait(cmdq, comp_wait);
+
+out:
+ up(&cmdq->credits);
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
new file mode 100644
index 000000000000..751c7f9f0de7
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <rdma/ib_verbs.h>
+
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+static void *get_next_valid_cqe(struct erdma_cq *cq)
+{
+ __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
+ cq->depth, CQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ __be32_to_cpu(READ_ONCE(*cqe)));
+
+ return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
+}
+
+static void notify_cq(struct erdma_cq *cq, u8 solcitied)
+{
+ u64 db_data =
+ FIELD_PREP(ERDMA_CQDB_IDX_MASK, (cq->kern_cq.notify_cnt)) |
+ FIELD_PREP(ERDMA_CQDB_CQN_MASK, cq->cqn) |
+ FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
+ FIELD_PREP(ERDMA_CQDB_SOL_MASK, solcitied) |
+ FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
+ FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
+
+ *cq->kern_cq.db_record = db_data;
+ writeq(db_data, cq->kern_cq.db);
+}
+
+int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ unsigned long irq_flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, irq_flags);
+
+ notify_cq(cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && get_next_valid_cqe(cq))
+ ret = 1;
+
+ cq->kern_cq.notify_cnt++;
+
+ spin_unlock_irqrestore(&cq->kern_cq.lock, irq_flags);
+
+ return ret;
+}
+
+static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
+ [ERDMA_OP_WRITE] = IB_WC_RDMA_WRITE,
+ [ERDMA_OP_READ] = IB_WC_RDMA_READ,
+ [ERDMA_OP_SEND] = IB_WC_SEND,
+ [ERDMA_OP_SEND_WITH_IMM] = IB_WC_SEND,
+ [ERDMA_OP_RECEIVE] = IB_WC_RECV,
+ [ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
+ [ERDMA_OP_RECV_INV] = IB_WC_RECV,
+ [ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [ERDMA_OP_INVALIDATE] = IB_WC_LOCAL_INV,
+ [ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
+ [ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
+ [ERDMA_OP_REG_MR] = IB_WC_REG_MR,
+ [ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
+ [ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
+};
+
+static const struct {
+ enum erdma_wc_status erdma;
+ enum ib_wc_status base;
+ enum erdma_vendor_err vendor;
+} map_cqe_status[ERDMA_NUM_WC_STATUS] = {
+ { ERDMA_WC_SUCCESS, IB_WC_SUCCESS, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_GENERAL_ERR, IB_WC_GENERAL_ERR, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_RECV_WQE_FORMAT_ERR, IB_WC_GENERAL_ERR,
+ ERDMA_WC_VENDOR_INVALID_RQE },
+ { ERDMA_WC_RECV_STAG_INVALID_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_INVALID_STAG },
+ { ERDMA_WC_RECV_ADDR_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION },
+ { ERDMA_WC_RECV_RIGHT_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR },
+ { ERDMA_WC_RECV_PDID_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_INVALID_PD },
+ { ERDMA_WC_RECV_WARRPING_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_WRAP_ERR },
+ { ERDMA_WC_SEND_WQE_FORMAT_ERR, IB_WC_LOC_QP_OP_ERR,
+ ERDMA_WC_VENDOR_INVALID_SQE },
+ { ERDMA_WC_SEND_WQE_ORD_EXCEED, IB_WC_GENERAL_ERR,
+ ERDMA_WC_VENDOR_ZERO_ORD },
+ { ERDMA_WC_SEND_STAG_INVALID_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_INVALID_STAG },
+ { ERDMA_WC_SEND_ADDR_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION },
+ { ERDMA_WC_SEND_RIGHT_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_ACCESS_ERR },
+ { ERDMA_WC_SEND_PDID_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_INVALID_PD },
+ { ERDMA_WC_SEND_WARRPING_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_WARP_ERR },
+ { ERDMA_WC_FLUSH_ERR, IB_WC_WR_FLUSH_ERR, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
+};
+
+#define ERDMA_POLLCQ_NO_QP 1
+
+static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
+{
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+ u8 opcode, syndrome, qtype;
+ struct erdma_kqp *kern_qp;
+ struct erdma_cqe *cqe;
+ struct erdma_qp *qp;
+ u16 wqe_idx, depth;
+ u32 qpn, cqe_hdr;
+ u64 *id_table;
+ u64 *wqe_hdr;
+
+ cqe = get_next_valid_cqe(cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ cq->kern_cq.ci++;
+
+ /* cqbuf should be ready when we poll */
+ dma_rmb();
+
+ qpn = be32_to_cpu(cqe->qpn);
+ wqe_idx = be32_to_cpu(cqe->qe_idx);
+ cqe_hdr = be32_to_cpu(cqe->hdr);
+
+ qp = find_qp_by_qpn(dev, qpn);
+ if (!qp)
+ return ERDMA_POLLCQ_NO_QP;
+
+ kern_qp = &qp->kern_qp;
+
+ qtype = FIELD_GET(ERDMA_CQE_HDR_QTYPE_MASK, cqe_hdr);
+ syndrome = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, cqe_hdr);
+ opcode = FIELD_GET(ERDMA_CQE_HDR_OPCODE_MASK, cqe_hdr);
+
+ if (qtype == ERDMA_CQE_QTYPE_SQ) {
+ id_table = kern_qp->swr_tbl;
+ depth = qp->attrs.sq_size;
+ wqe_hdr = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ kern_qp->sq_ci =
+ FIELD_GET(ERDMA_SQE_HDR_WQEBB_CNT_MASK, *wqe_hdr) +
+ wqe_idx + 1;
+ } else {
+ id_table = kern_qp->rwr_tbl;
+ depth = qp->attrs.rq_size;
+ }
+ wc->wr_id = id_table[wqe_idx & (depth - 1)];
+ wc->byte_len = be32_to_cpu(cqe->size);
+
+ wc->wc_flags = 0;
+
+ wc->opcode = wc_mapping_table[opcode];
+ if (opcode == ERDMA_OP_RECV_IMM || opcode == ERDMA_OP_RSP_SEND_IMM) {
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->imm_data));
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ } else if (opcode == ERDMA_OP_RECV_INV) {
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inv_rkey);
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+
+ if (syndrome >= ERDMA_NUM_WC_STATUS)
+ syndrome = ERDMA_WC_GENERAL_ERR;
+
+ wc->status = map_cqe_status[syndrome].base;
+ wc->vendor_err = map_cqe_status[syndrome].vendor;
+ wc->qp = &qp->ibqp;
+
+ return 0;
+}
+
+int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ unsigned long flags;
+ int npolled, ret;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, flags);
+
+ for (npolled = 0; npolled < num_entries;) {
+ ret = erdma_poll_one_cqe(cq, wc + npolled);
+
+ if (ret == -EAGAIN) /* no received new CQEs. */
+ break;
+ else if (ret) /* ignore invalid CQEs. */
+ continue;
+
+ npolled++;
+ }
+
+ spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
+
+ return npolled;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
new file mode 100644
index 000000000000..8f2d094e0227
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "erdma.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+#define MAX_POLL_CHUNK_SIZE 16
+
+void notify_eq(struct erdma_eq *eq)
+{
+ u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
+ FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
+
+ *eq->db_record = db_data;
+ writeq(db_data, eq->db_addr);
+
+ atomic64_inc(&eq->notify_num);
+}
+
+void *get_next_valid_eqe(struct erdma_eq *eq)
+{
+ u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
+
+ return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
+}
+
+void erdma_aeq_event_handler(struct erdma_dev *dev)
+{
+ struct erdma_aeqe *aeqe;
+ u32 cqn, qpn;
+ struct erdma_qp *qp;
+ struct erdma_cq *cq;
+ struct ib_event event;
+ u32 poll_cnt = 0;
+
+ memset(&event, 0, sizeof(event));
+
+ while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
+ aeqe = get_next_valid_eqe(&dev->aeq);
+ if (!aeqe)
+ break;
+
+ dma_rmb();
+
+ dev->aeq.ci++;
+ atomic64_inc(&dev->aeq.event_num);
+ poll_cnt++;
+
+ if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
+ le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
+ cqn = le32_to_cpu(aeqe->event_data0);
+ cq = find_cq_by_cqn(dev, cqn);
+ if (!cq)
+ continue;
+
+ event.device = cq->ibcq.device;
+ event.element.cq = &cq->ibcq;
+ event.event = IB_EVENT_CQ_ERR;
+ if (cq->ibcq.event_handler)
+ cq->ibcq.event_handler(&event,
+ cq->ibcq.cq_context);
+ } else {
+ qpn = le32_to_cpu(aeqe->event_data0);
+ qp = find_qp_by_qpn(dev, qpn);
+ if (!qp)
+ continue;
+
+ event.device = qp->ibqp.device;
+ event.element.qp = &qp->ibqp;
+ event.event = IB_EVENT_QP_FATAL;
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&event,
+ qp->ibqp.qp_context);
+ }
+ }
+
+ notify_eq(&dev->aeq);
+}
+
+int erdma_aeq_init(struct erdma_dev *dev)
+{
+ struct erdma_eq *eq = &dev->aeq;
+ u32 buf_size;
+
+ eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
+ buf_size = eq->depth << EQE_SHIFT;
+
+ eq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+ atomic64_set(&eq->notify_num, 0);
+
+ eq->db_addr = (u64 __iomem *)(dev->func_bar + ERDMA_REGS_AEQ_DB_REG);
+ eq->db_record = (u64 *)(eq->qbuf + buf_size);
+
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG,
+ eq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+void erdma_aeq_destroy(struct erdma_dev *dev)
+{
+ struct erdma_eq *eq = &dev->aeq;
+
+ dma_free_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(eq->depth << EQE_SHIFT), eq->qbuf,
+ eq->qbuf_dma_addr);
+}
+
+void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
+{
+ struct erdma_dev *dev = ceq_cb->dev;
+ struct erdma_cq *cq;
+ u32 poll_cnt = 0;
+ u64 *ceqe;
+ int cqn;
+
+ if (!ceq_cb->ready)
+ return;
+
+ while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
+ ceqe = get_next_valid_eqe(&ceq_cb->eq);
+ if (!ceqe)
+ break;
+
+ dma_rmb();
+ ceq_cb->eq.ci++;
+ poll_cnt++;
+ cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
+
+ cq = find_cq_by_cqn(dev, cqn);
+ if (!cq)
+ continue;
+
+ if (rdma_is_kernel_res(&cq->ibcq.res))
+ cq->kern_cq.cmdsn++;
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ }
+
+ notify_eq(&ceq_cb->eq);
+}
+
+static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
+{
+ struct erdma_eq_cb *ceq_cb = data;
+
+ tasklet_schedule(&ceq_cb->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void erdma_intr_ceq_task(unsigned long data)
+{
+ erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
+}
+
+static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
+ int err;
+
+ snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
+ pci_name(dev->pdev));
+ eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
+
+ tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
+ (unsigned long)&dev->ceqs[ceqn]);
+
+ cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
+ &eqc->irq.affinity_hint_mask);
+
+ err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
+ eqc->irq.name, eqc);
+ if (err) {
+ dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
+ return err;
+ }
+
+ irq_set_affinity_hint(eqc->irq.msix_vector,
+ &eqc->irq.affinity_hint_mask);
+
+ return 0;
+}
+
+static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
+
+ irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
+ free_irq(eqc->irq.msix_vector, eqc);
+}
+
+static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
+{
+ struct erdma_cmdq_create_eq_req req;
+ dma_addr_t db_info_dma_addr;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_CREATE_EQ);
+ req.eqn = eqn;
+ req.depth = ilog2(eq->depth);
+ req.qbuf_addr = eq->qbuf_dma_addr;
+ req.qtype = ERDMA_EQ_TYPE_CEQ;
+ /* Vector index is the same as EQN. */
+ req.vector_idx = eqn;
+ db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT);
+ req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
+ req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req,
+ sizeof(struct erdma_cmdq_create_eq_req),
+ NULL, NULL);
+}
+
+static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
+ u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
+ int ret;
+
+ eq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+ atomic64_set(&eq->notify_num, 0);
+
+ eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
+ eq->db_addr =
+ (u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
+ (ceqn + 1) * ERDMA_DB_SIZE);
+ eq->db_record = (u64 *)(eq->qbuf + buf_size);
+ eq->ci = 0;
+ dev->ceqs[ceqn].dev = dev;
+
+ /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
+ ret = create_eq_cmd(dev, ceqn + 1, eq);
+ dev->ceqs[ceqn].ready = ret ? false : true;
+
+ return ret;
+}
+
+static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
+ u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
+ struct erdma_cmdq_destroy_eq_req req;
+ int err;
+
+ dev->ceqs[ceqn].ready = 0;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_DESTROY_EQ);
+ /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
+ req.eqn = ceqn + 1;
+ req.qtype = ERDMA_EQ_TYPE_CEQ;
+ req.vector_idx = ceqn + 1;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (err)
+ return;
+
+ dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf,
+ eq->qbuf_dma_addr);
+}
+
+int erdma_ceqs_init(struct erdma_dev *dev)
+{
+ u32 i, j;
+ int err;
+
+ for (i = 0; i < dev->attrs.irq_num - 1; i++) {
+ err = erdma_ceq_init_one(dev, i);
+ if (err)
+ goto out_err;
+
+ err = erdma_set_ceq_irq(dev, i);
+ if (err) {
+ erdma_ceq_uninit_one(dev, i);
+ goto out_err;
+ }
+ }
+
+ return 0;
+
+out_err:
+ for (j = 0; j < i; j++) {
+ erdma_free_ceq_irq(dev, j);
+ erdma_ceq_uninit_one(dev, j);
+ }
+
+ return err;
+}
+
+void erdma_ceqs_uninit(struct erdma_dev *dev)
+{
+ u32 i;
+
+ for (i = 0; i < dev->attrs.irq_num - 1; i++) {
+ erdma_free_ceq_irq(dev, i);
+ erdma_ceq_uninit_one(dev, i);
+ }
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
new file mode 100644
index 000000000000..b210c49c669f
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -0,0 +1,508 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_HW_H__
+#define __ERDMA_HW_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/* PCIe device related definition. */
+#define PCI_VENDOR_ID_ALIBABA 0x1ded
+
+#define ERDMA_PCI_WIDTH 64
+#define ERDMA_FUNC_BAR 0
+#define ERDMA_MISX_BAR 2
+
+#define ERDMA_BAR_MASK (BIT(ERDMA_FUNC_BAR) | BIT(ERDMA_MISX_BAR))
+
+/* MSI-X related. */
+#define ERDMA_NUM_MSIX_VEC 32U
+#define ERDMA_MSIX_VECTOR_CMDQ 0
+
+/* PCIe Bar0 Registers. */
+#define ERDMA_REGS_VERSION_REG 0x0
+#define ERDMA_REGS_DEV_CTRL_REG 0x10
+#define ERDMA_REGS_DEV_ST_REG 0x14
+#define ERDMA_REGS_NETDEV_MAC_L_REG 0x18
+#define ERDMA_REGS_NETDEV_MAC_H_REG 0x1C
+#define ERDMA_REGS_CMDQ_SQ_ADDR_L_REG 0x20
+#define ERDMA_REGS_CMDQ_SQ_ADDR_H_REG 0x24
+#define ERDMA_REGS_CMDQ_CQ_ADDR_L_REG 0x28
+#define ERDMA_REGS_CMDQ_CQ_ADDR_H_REG 0x2C
+#define ERDMA_REGS_CMDQ_DEPTH_REG 0x30
+#define ERDMA_REGS_CMDQ_EQ_DEPTH_REG 0x34
+#define ERDMA_REGS_CMDQ_EQ_ADDR_L_REG 0x38
+#define ERDMA_REGS_CMDQ_EQ_ADDR_H_REG 0x3C
+#define ERDMA_REGS_AEQ_ADDR_L_REG 0x40
+#define ERDMA_REGS_AEQ_ADDR_H_REG 0x44
+#define ERDMA_REGS_AEQ_DEPTH_REG 0x48
+#define ERDMA_REGS_GRP_NUM_REG 0x4c
+#define ERDMA_REGS_AEQ_DB_REG 0x50
+#define ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG 0x60
+#define ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG 0x68
+#define ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG 0x70
+#define ERDMA_AEQ_DB_HOST_ADDR_REG 0x78
+#define ERDMA_REGS_STATS_TSO_IN_PKTS_REG 0x80
+#define ERDMA_REGS_STATS_TSO_OUT_PKTS_REG 0x88
+#define ERDMA_REGS_STATS_TSO_OUT_BYTES_REG 0x90
+#define ERDMA_REGS_STATS_TX_DROP_PKTS_REG 0x98
+#define ERDMA_REGS_STATS_TX_BPS_METER_DROP_PKTS_REG 0xa0
+#define ERDMA_REGS_STATS_TX_PPS_METER_DROP_PKTS_REG 0xa8
+#define ERDMA_REGS_STATS_RX_PKTS_REG 0xc0
+#define ERDMA_REGS_STATS_RX_BYTES_REG 0xc8
+#define ERDMA_REGS_STATS_RX_DROP_PKTS_REG 0xd0
+#define ERDMA_REGS_STATS_RX_BPS_METER_DROP_PKTS_REG 0xd8
+#define ERDMA_REGS_STATS_RX_PPS_METER_DROP_PKTS_REG 0xe0
+#define ERDMA_REGS_CEQ_DB_BASE_REG 0x100
+#define ERDMA_CMDQ_SQDB_REG 0x200
+#define ERDMA_CMDQ_CQDB_REG 0x300
+
+/* DEV_CTRL_REG details. */
+#define ERDMA_REG_DEV_CTRL_RESET_MASK 0x00000001
+#define ERDMA_REG_DEV_CTRL_INIT_MASK 0x00000002
+
+/* DEV_ST_REG details. */
+#define ERDMA_REG_DEV_ST_RESET_DONE_MASK 0x00000001U
+#define ERDMA_REG_DEV_ST_INIT_DONE_MASK 0x00000002U
+
+/* eRDMA PCIe DBs definition. */
+#define ERDMA_BAR_DB_SPACE_BASE 4096
+
+#define ERDMA_BAR_SQDB_SPACE_OFFSET ERDMA_BAR_DB_SPACE_BASE
+#define ERDMA_BAR_SQDB_SPACE_SIZE (384 * 1024)
+
+#define ERDMA_BAR_RQDB_SPACE_OFFSET \
+ (ERDMA_BAR_SQDB_SPACE_OFFSET + ERDMA_BAR_SQDB_SPACE_SIZE)
+#define ERDMA_BAR_RQDB_SPACE_SIZE (96 * 1024)
+
+#define ERDMA_BAR_CQDB_SPACE_OFFSET \
+ (ERDMA_BAR_RQDB_SPACE_OFFSET + ERDMA_BAR_RQDB_SPACE_SIZE)
+
+/* Doorbell page resources related. */
+/*
+ * Max # of parallelly issued directSQE is 3072 per device,
+ * hardware organizes this into 24 group, per group has 128 credits.
+ */
+#define ERDMA_DWQE_MAX_GRP_CNT 24
+#define ERDMA_DWQE_NUM_PER_GRP 128
+
+#define ERDMA_DWQE_TYPE0_CNT 64
+#define ERDMA_DWQE_TYPE1_CNT 496
+/* type1 DB contains 2 DBs, takes 256Byte. */
+#define ERDMA_DWQE_TYPE1_CNT_PER_PAGE 16
+
+#define ERDMA_SDB_SHARED_PAGE_INDEX 95
+
+/* Doorbell related. */
+#define ERDMA_DB_SIZE 8
+
+#define ERDMA_CQDB_IDX_MASK GENMASK_ULL(63, 56)
+#define ERDMA_CQDB_CQN_MASK GENMASK_ULL(55, 32)
+#define ERDMA_CQDB_ARM_MASK BIT_ULL(31)
+#define ERDMA_CQDB_SOL_MASK BIT_ULL(30)
+#define ERDMA_CQDB_CMDSN_MASK GENMASK_ULL(29, 28)
+#define ERDMA_CQDB_CI_MASK GENMASK_ULL(23, 0)
+
+#define ERDMA_EQDB_ARM_MASK BIT(31)
+#define ERDMA_EQDB_CI_MASK GENMASK_ULL(23, 0)
+
+#define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000
+
+/* WQE related. */
+#define EQE_SIZE 16
+#define EQE_SHIFT 4
+#define RQE_SIZE 32
+#define RQE_SHIFT 5
+#define CQE_SIZE 32
+#define CQE_SHIFT 5
+#define SQEBB_SIZE 32
+#define SQEBB_SHIFT 5
+#define SQEBB_MASK (~(SQEBB_SIZE - 1))
+#define SQEBB_ALIGN(size) ((size + SQEBB_SIZE - 1) & SQEBB_MASK)
+#define SQEBB_COUNT(size) (SQEBB_ALIGN(size) >> SQEBB_SHIFT)
+
+#define ERDMA_MAX_SQE_SIZE 128
+#define ERDMA_MAX_WQEBB_PER_SQE 4
+
+/* CMDQ related. */
+#define ERDMA_CMDQ_MAX_OUTSTANDING 128
+#define ERDMA_CMDQ_SQE_SIZE 64
+
+/* cmdq sub module definition. */
+enum CMDQ_WQE_SUB_MOD {
+ CMDQ_SUBMOD_RDMA = 0,
+ CMDQ_SUBMOD_COMMON = 1
+};
+
+enum CMDQ_RDMA_OPCODE {
+ CMDQ_OPCODE_QUERY_DEVICE = 0,
+ CMDQ_OPCODE_CREATE_QP = 1,
+ CMDQ_OPCODE_DESTROY_QP = 2,
+ CMDQ_OPCODE_MODIFY_QP = 3,
+ CMDQ_OPCODE_CREATE_CQ = 4,
+ CMDQ_OPCODE_DESTROY_CQ = 5,
+ CMDQ_OPCODE_REG_MR = 8,
+ CMDQ_OPCODE_DEREG_MR = 9
+};
+
+enum CMDQ_COMMON_OPCODE {
+ CMDQ_OPCODE_CREATE_EQ = 0,
+ CMDQ_OPCODE_DESTROY_EQ = 1,
+ CMDQ_OPCODE_QUERY_FW_INFO = 2,
+};
+
+/* cmdq-SQE HDR */
+#define ERDMA_CMD_HDR_WQEBB_CNT_MASK GENMASK_ULL(54, 52)
+#define ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK GENMASK_ULL(47, 32)
+#define ERDMA_CMD_HDR_SUB_MOD_MASK GENMASK_ULL(25, 24)
+#define ERDMA_CMD_HDR_OPCODE_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0)
+
+struct erdma_cmdq_destroy_cq_req {
+ u64 hdr;
+ u32 cqn;
+};
+
+#define ERDMA_EQ_TYPE_AEQ 0
+#define ERDMA_EQ_TYPE_CEQ 1
+
+struct erdma_cmdq_create_eq_req {
+ u64 hdr;
+ u64 qbuf_addr;
+ u8 vector_idx;
+ u8 eqn;
+ u8 depth;
+ u8 qtype;
+ u32 db_dma_addr_l;
+ u32 db_dma_addr_h;
+};
+
+struct erdma_cmdq_destroy_eq_req {
+ u64 hdr;
+ u64 rsvd0;
+ u8 vector_idx;
+ u8 eqn;
+ u8 rsvd1;
+ u8 qtype;
+};
+
+/* create_cq cfg0 */
+#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
+#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
+#define ERDMA_CMD_CREATE_CQ_CQN_MASK GENMASK(19, 0)
+
+/* create_cq cfg1 */
+#define ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK GENMASK(31, 16)
+#define ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK BIT(15)
+#define ERDMA_CMD_CREATE_CQ_EQN_MASK GENMASK(9, 0)
+
+struct erdma_cmdq_create_cq_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 qbuf_addr_l;
+ u32 qbuf_addr_h;
+ u32 cfg1;
+ u64 cq_db_info_addr;
+ u32 first_page_offset;
+};
+
+/* regmr/deregmr cfg0 */
+#define ERDMA_CMD_MR_VALID_MASK BIT(31)
+#define ERDMA_CMD_MR_KEY_MASK GENMASK(27, 20)
+#define ERDMA_CMD_MR_MPT_IDX_MASK GENMASK(19, 0)
+
+/* regmr cfg1 */
+#define ERDMA_CMD_REGMR_PD_MASK GENMASK(31, 12)
+#define ERDMA_CMD_REGMR_TYPE_MASK GENMASK(7, 6)
+#define ERDMA_CMD_REGMR_RIGHT_MASK GENMASK(5, 2)
+#define ERDMA_CMD_REGMR_ACC_MODE_MASK GENMASK(1, 0)
+
+/* regmr cfg2 */
+#define ERDMA_CMD_REGMR_PAGESIZE_MASK GENMASK(31, 27)
+#define ERDMA_CMD_REGMR_MTT_TYPE_MASK GENMASK(21, 20)
+#define ERDMA_CMD_REGMR_MTT_CNT_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_reg_mr_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u64 start_va;
+ u32 size;
+ u32 cfg2;
+ u64 phy_addr[4];
+};
+
+struct erdma_cmdq_dereg_mr_req {
+ u64 hdr;
+ u32 cfg;
+};
+
+/* modify qp cfg */
+#define ERDMA_CMD_MODIFY_QP_STATE_MASK GENMASK(31, 24)
+#define ERDMA_CMD_MODIFY_QP_CC_MASK GENMASK(23, 20)
+#define ERDMA_CMD_MODIFY_QP_QPN_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_modify_qp_req {
+ u64 hdr;
+ u32 cfg;
+ u32 cookie;
+ __be32 dip;
+ __be32 sip;
+ __be16 sport;
+ __be16 dport;
+ u32 send_nxt;
+ u32 recv_nxt;
+};
+
+/* create qp cfg0 */
+#define ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK GENMASK(31, 20)
+#define ERDMA_CMD_CREATE_QP_QPN_MASK GENMASK(19, 0)
+
+/* create qp cfg1 */
+#define ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK GENMASK(31, 20)
+#define ERDMA_CMD_CREATE_QP_PD_MASK GENMASK(19, 0)
+
+/* create qp cqn_mtt_cfg */
+#define ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK GENMASK(31, 28)
+#define ERDMA_CMD_CREATE_QP_CQN_MASK GENMASK(23, 0)
+
+/* create qp mtt_cfg */
+#define ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK GENMASK(31, 12)
+#define ERDMA_CMD_CREATE_QP_MTT_CNT_MASK GENMASK(11, 1)
+#define ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK BIT(0)
+
+#define ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK GENMASK_ULL(31, 0)
+
+struct erdma_cmdq_create_qp_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u32 sq_cqn_mtt_cfg;
+ u32 rq_cqn_mtt_cfg;
+ u64 sq_buf_addr;
+ u64 rq_buf_addr;
+ u32 sq_mtt_cfg;
+ u32 rq_mtt_cfg;
+ u64 sq_db_info_dma_addr;
+ u64 rq_db_info_dma_addr;
+};
+
+struct erdma_cmdq_destroy_qp_req {
+ u64 hdr;
+ u32 qpn;
+};
+
+/* cap qword 0 definition */
+#define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
+#define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0)
+
+/* cap qword 1 definition */
+#define ERDMA_CMD_DEV_CAP_DMA_LOCAL_KEY_MASK GENMASK_ULL(63, 32)
+#define ERDMA_CMD_DEV_CAP_DEFAULT_CC_MASK GENMASK_ULL(31, 28)
+#define ERDMA_CMD_DEV_CAP_QBLOCK_MASK GENMASK_ULL(27, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_MW_MASK GENMASK_ULL(7, 0)
+
+#define ERDMA_NQP_PER_QBLOCK 1024
+
+#define ERDMA_CMD_INFO0_FW_VER_MASK GENMASK_ULL(31, 0)
+
+/* CQE hdr */
+#define ERDMA_CQE_HDR_OWNER_MASK BIT(31)
+#define ERDMA_CQE_HDR_OPCODE_MASK GENMASK(23, 16)
+#define ERDMA_CQE_HDR_QTYPE_MASK GENMASK(15, 8)
+#define ERDMA_CQE_HDR_SYNDROME_MASK GENMASK(7, 0)
+
+#define ERDMA_CQE_QTYPE_SQ 0
+#define ERDMA_CQE_QTYPE_RQ 1
+#define ERDMA_CQE_QTYPE_CMDQ 2
+
+struct erdma_cqe {
+ __be32 hdr;
+ __be32 qe_idx;
+ __be32 qpn;
+ union {
+ __le32 imm_data;
+ __be32 inv_rkey;
+ };
+ __be32 size;
+ __be32 rsvd[3];
+};
+
+struct erdma_sge {
+ __aligned_le64 laddr;
+ __le32 length;
+ __le32 lkey;
+};
+
+/* Receive Queue Element */
+struct erdma_rqe {
+ __le16 qe_idx;
+ __le16 rsvd0;
+ __le32 qpn;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ __le64 to;
+ __le32 length;
+ __le32 stag;
+};
+
+/* SQE */
+#define ERDMA_SQE_HDR_SGL_LEN_MASK GENMASK_ULL(63, 56)
+#define ERDMA_SQE_HDR_WQEBB_CNT_MASK GENMASK_ULL(54, 52)
+#define ERDMA_SQE_HDR_QPN_MASK GENMASK_ULL(51, 32)
+#define ERDMA_SQE_HDR_OPCODE_MASK GENMASK_ULL(31, 27)
+#define ERDMA_SQE_HDR_DWQE_MASK BIT_ULL(26)
+#define ERDMA_SQE_HDR_INLINE_MASK BIT_ULL(25)
+#define ERDMA_SQE_HDR_FENCE_MASK BIT_ULL(24)
+#define ERDMA_SQE_HDR_SE_MASK BIT_ULL(23)
+#define ERDMA_SQE_HDR_CE_MASK BIT_ULL(22)
+#define ERDMA_SQE_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0)
+
+/* REG MR attrs */
+#define ERDMA_SQE_MR_MODE_MASK GENMASK(1, 0)
+#define ERDMA_SQE_MR_ACCESS_MASK GENMASK(5, 2)
+#define ERDMA_SQE_MR_MTT_TYPE_MASK GENMASK(7, 6)
+#define ERDMA_SQE_MR_MTT_CNT_MASK GENMASK(31, 12)
+
+struct erdma_write_sqe {
+ __le64 hdr;
+ __be32 imm_data;
+ __le32 length;
+
+ __le32 sink_stag;
+ __le32 sink_to_l;
+ __le32 sink_to_h;
+
+ __le32 rsvd;
+
+ struct erdma_sge sgl[0];
+};
+
+struct erdma_send_sqe {
+ __le64 hdr;
+ union {
+ __be32 imm_data;
+ __le32 invalid_stag;
+ };
+
+ __le32 length;
+ struct erdma_sge sgl[0];
+};
+
+struct erdma_readreq_sqe {
+ __le64 hdr;
+ __le32 invalid_stag;
+ __le32 length;
+ __le32 sink_stag;
+ __le32 sink_to_l;
+ __le32 sink_to_h;
+ __le32 rsvd;
+};
+
+struct erdma_reg_mr_sqe {
+ __le64 hdr;
+ __le64 addr;
+ __le32 length;
+ __le32 stag;
+ __le32 attrs;
+ __le32 rsvd;
+};
+
+/* EQ related. */
+#define ERDMA_DEFAULT_EQ_DEPTH 256
+
+/* ceqe */
+#define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)
+#define ERDMA_CEQE_HDR_PI_MASK GENMASK_ULL(55, 32)
+#define ERDMA_CEQE_HDR_O_MASK BIT_ULL(31)
+#define ERDMA_CEQE_HDR_CQN_MASK GENMASK_ULL(19, 0)
+
+/* aeqe */
+#define ERDMA_AEQE_HDR_O_MASK BIT(31)
+#define ERDMA_AEQE_HDR_TYPE_MASK GENMASK(23, 16)
+#define ERDMA_AEQE_HDR_SUBTYPE_MASK GENMASK(7, 0)
+
+#define ERDMA_AE_TYPE_QP_FATAL_EVENT 0
+#define ERDMA_AE_TYPE_QP_ERQ_ERR_EVENT 1
+#define ERDMA_AE_TYPE_ACC_ERR_EVENT 2
+#define ERDMA_AE_TYPE_CQ_ERR 3
+#define ERDMA_AE_TYPE_OTHER_ERROR 4
+
+struct erdma_aeqe {
+ __le32 hdr;
+ __le32 event_data0;
+ __le32 event_data1;
+ __le32 rsvd;
+};
+
+enum erdma_opcode {
+ ERDMA_OP_WRITE = 0,
+ ERDMA_OP_READ = 1,
+ ERDMA_OP_SEND = 2,
+ ERDMA_OP_SEND_WITH_IMM = 3,
+
+ ERDMA_OP_RECEIVE = 4,
+ ERDMA_OP_RECV_IMM = 5,
+ ERDMA_OP_RECV_INV = 6,
+
+ ERDMA_OP_REQ_ERR = 7,
+ ERDMA_OP_READ_RESPONSE = 8,
+ ERDMA_OP_WRITE_WITH_IMM = 9,
+
+ ERDMA_OP_RECV_ERR = 10,
+
+ ERDMA_OP_INVALIDATE = 11,
+ ERDMA_OP_RSP_SEND_IMM = 12,
+ ERDMA_OP_SEND_WITH_INV = 13,
+
+ ERDMA_OP_REG_MR = 14,
+ ERDMA_OP_LOCAL_INV = 15,
+ ERDMA_OP_READ_WITH_INV = 16,
+ ERDMA_NUM_OPCODES = 17,
+ ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1
+};
+
+enum erdma_wc_status {
+ ERDMA_WC_SUCCESS = 0,
+ ERDMA_WC_GENERAL_ERR = 1,
+ ERDMA_WC_RECV_WQE_FORMAT_ERR = 2,
+ ERDMA_WC_RECV_STAG_INVALID_ERR = 3,
+ ERDMA_WC_RECV_ADDR_VIOLATION_ERR = 4,
+ ERDMA_WC_RECV_RIGHT_VIOLATION_ERR = 5,
+ ERDMA_WC_RECV_PDID_ERR = 6,
+ ERDMA_WC_RECV_WARRPING_ERR = 7,
+ ERDMA_WC_SEND_WQE_FORMAT_ERR = 8,
+ ERDMA_WC_SEND_WQE_ORD_EXCEED = 9,
+ ERDMA_WC_SEND_STAG_INVALID_ERR = 10,
+ ERDMA_WC_SEND_ADDR_VIOLATION_ERR = 11,
+ ERDMA_WC_SEND_RIGHT_VIOLATION_ERR = 12,
+ ERDMA_WC_SEND_PDID_ERR = 13,
+ ERDMA_WC_SEND_WARRPING_ERR = 14,
+ ERDMA_WC_FLUSH_ERR = 15,
+ ERDMA_WC_RETRY_EXC_ERR = 16,
+ ERDMA_NUM_WC_STATUS
+};
+
+enum erdma_vendor_err {
+ ERDMA_WC_VENDOR_NO_ERR = 0,
+ ERDMA_WC_VENDOR_INVALID_RQE = 1,
+ ERDMA_WC_VENDOR_RQE_INVALID_STAG = 2,
+ ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION = 3,
+ ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR = 4,
+ ERDMA_WC_VENDOR_RQE_INVALID_PD = 5,
+ ERDMA_WC_VENDOR_RQE_WRAP_ERR = 6,
+ ERDMA_WC_VENDOR_INVALID_SQE = 0x20,
+ ERDMA_WC_VENDOR_ZERO_ORD = 0x21,
+ ERDMA_WC_VENDOR_SQE_INVALID_STAG = 0x30,
+ ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION = 0x31,
+ ERDMA_WC_VENDOR_SQE_ACCESS_ERR = 0x32,
+ ERDMA_WC_VENDOR_SQE_INVALID_PD = 0x33,
+ ERDMA_WC_VENDOR_SQE_WARP_ERR = 0x34
+};
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
new file mode 100644
index 000000000000..07e743d24847
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -0,0 +1,608 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <net/addrconf.h>
+#include <rdma/erdma-abi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+MODULE_AUTHOR("Cheng Xu <chengyou@linux.alibaba.com>");
+MODULE_DESCRIPTION("Alibaba elasticRDMA adapter driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
+ void *arg)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(arg);
+ struct erdma_dev *dev = container_of(nb, struct erdma_dev, netdev_nb);
+
+ if (dev->netdev == NULL || dev->netdev != netdev)
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ dev->state = IB_PORT_ACTIVE;
+ erdma_port_event(dev, IB_EVENT_PORT_ACTIVE);
+ break;
+ case NETDEV_DOWN:
+ dev->state = IB_PORT_DOWN;
+ erdma_port_event(dev, IB_EVENT_PORT_ERR);
+ break;
+ case NETDEV_REGISTER:
+ case NETDEV_UNREGISTER:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGEMTU:
+ case NETDEV_GOING_DOWN:
+ case NETDEV_CHANGE:
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_OK;
+}
+
+static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
+{
+ struct net_device *netdev;
+ int ret = -ENODEV;
+
+ /* Already binded to a net_device, so we skip. */
+ if (dev->netdev)
+ return 0;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, netdev) {
+ /*
+ * In erdma, the paired netdev and ibdev should have the same
+ * MAC address. erdma can get the value from its PCIe bar
+ * registers. Since erdma can not get the paired netdev
+ * reference directly, we do a traverse here to get the paired
+ * netdev.
+ */
+ if (ether_addr_equal_unaligned(netdev->perm_addr,
+ dev->attrs.peer_addr)) {
+ ret = ib_device_set_netdev(&dev->ibdev, netdev, 1);
+ if (ret) {
+ rtnl_unlock();
+ ibdev_warn(&dev->ibdev,
+ "failed (%d) to link netdev", ret);
+ return ret;
+ }
+
+ dev->netdev = netdev;
+ break;
+ }
+ }
+
+ rtnl_unlock();
+
+ return ret;
+}
+
+static int erdma_device_register(struct erdma_dev *dev)
+{
+ struct ib_device *ibdev = &dev->ibdev;
+ int ret;
+
+ ret = erdma_enum_and_get_netdev(dev);
+ if (ret)
+ return ret;
+
+ addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr);
+
+ ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "ib_register_device failed: ret = %d\n", ret);
+ return ret;
+ }
+
+ dev->netdev_nb.notifier_call = erdma_netdev_event;
+ ret = register_netdevice_notifier(&dev->netdev_nb);
+ if (ret) {
+ ibdev_err(&dev->ibdev, "failed to register notifier.\n");
+ ib_unregister_device(ibdev);
+ }
+
+ return ret;
+}
+
+static irqreturn_t erdma_comm_irq_handler(int irq, void *data)
+{
+ struct erdma_dev *dev = data;
+
+ erdma_cmdq_completion_handler(&dev->cmdq);
+ erdma_aeq_event_handler(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void erdma_dwqe_resource_init(struct erdma_dev *dev)
+{
+ int total_pages, type0, type1;
+
+ dev->attrs.grp_num = erdma_reg_read32(dev, ERDMA_REGS_GRP_NUM_REG);
+
+ if (dev->attrs.grp_num < 4)
+ dev->attrs.disable_dwqe = true;
+ else
+ dev->attrs.disable_dwqe = false;
+
+ /* One page contains 4 goups. */
+ total_pages = dev->attrs.grp_num * 4;
+
+ if (dev->attrs.grp_num >= ERDMA_DWQE_MAX_GRP_CNT) {
+ dev->attrs.grp_num = ERDMA_DWQE_MAX_GRP_CNT;
+ type0 = ERDMA_DWQE_TYPE0_CNT;
+ type1 = ERDMA_DWQE_TYPE1_CNT / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+ } else {
+ type1 = total_pages / 3;
+ type0 = total_pages - type1 - 1;
+ }
+
+ dev->attrs.dwqe_pages = type0;
+ dev->attrs.dwqe_entries = type1 * ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+}
+
+static int erdma_request_vectors(struct erdma_dev *dev)
+{
+ int expect_irq_num = min(num_possible_cpus() + 1, ERDMA_NUM_MSIX_VEC);
+ int ret;
+
+ ret = pci_alloc_irq_vectors(dev->pdev, 1, expect_irq_num, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "request irq vectors failed(%d)\n",
+ ret);
+ return ret;
+ }
+ dev->attrs.irq_num = ret;
+
+ return 0;
+}
+
+static int erdma_comm_irq_init(struct erdma_dev *dev)
+{
+ snprintf(dev->comm_irq.name, ERDMA_IRQNAME_SIZE, "erdma-common@pci:%s",
+ pci_name(dev->pdev));
+ dev->comm_irq.msix_vector =
+ pci_irq_vector(dev->pdev, ERDMA_MSIX_VECTOR_CMDQ);
+
+ cpumask_set_cpu(cpumask_first(cpumask_of_pcibus(dev->pdev->bus)),
+ &dev->comm_irq.affinity_hint_mask);
+ irq_set_affinity_hint(dev->comm_irq.msix_vector,
+ &dev->comm_irq.affinity_hint_mask);
+
+ return request_irq(dev->comm_irq.msix_vector, erdma_comm_irq_handler, 0,
+ dev->comm_irq.name, dev);
+}
+
+static void erdma_comm_irq_uninit(struct erdma_dev *dev)
+{
+ irq_set_affinity_hint(dev->comm_irq.msix_vector, NULL);
+ free_irq(dev->comm_irq.msix_vector, dev);
+}
+
+static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
+{
+ int ret;
+
+ erdma_dwqe_resource_init(dev);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(ERDMA_PCI_WIDTH));
+ if (ret)
+ return ret;
+
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
+ return 0;
+}
+
+static void erdma_device_uninit(struct erdma_dev *dev)
+{
+ u32 ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_RESET_MASK, 1);
+
+ erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
+}
+
+static const struct pci_device_id erdma_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ALIBABA, 0x107f) },
+ {}
+};
+
+static int erdma_probe_dev(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev;
+ int bars, err;
+ u32 version;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed(%d)\n", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ dev = ib_alloc_device(erdma_dev, ibdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "ib_alloc_device failed\n");
+ err = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ dev->pdev = pdev;
+ dev->attrs.numa_node = dev_to_node(&pdev->dev);
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
+ if (bars != ERDMA_BAR_MASK || err) {
+ err = err ? err : -EINVAL;
+ goto err_ib_device_release;
+ }
+
+ dev->func_bar_addr = pci_resource_start(pdev, ERDMA_FUNC_BAR);
+ dev->func_bar_len = pci_resource_len(pdev, ERDMA_FUNC_BAR);
+
+ dev->func_bar =
+ devm_ioremap(&pdev->dev, dev->func_bar_addr, dev->func_bar_len);
+ if (!dev->func_bar) {
+ dev_err(&pdev->dev, "devm_ioremap failed.\n");
+ err = -EFAULT;
+ goto err_release_bars;
+ }
+
+ version = erdma_reg_read32(dev, ERDMA_REGS_VERSION_REG);
+ if (version == 0) {
+ /* we knows that it is a non-functional function. */
+ err = -ENODEV;
+ goto err_iounmap_func_bar;
+ }
+
+ err = erdma_device_init(dev, pdev);
+ if (err)
+ goto err_iounmap_func_bar;
+
+ err = erdma_request_vectors(dev);
+ if (err)
+ goto err_iounmap_func_bar;
+
+ err = erdma_comm_irq_init(dev);
+ if (err)
+ goto err_free_vectors;
+
+ err = erdma_aeq_init(dev);
+ if (err)
+ goto err_uninit_comm_irq;
+
+ err = erdma_cmdq_init(dev);
+ if (err)
+ goto err_uninit_aeq;
+
+ err = erdma_ceqs_init(dev);
+ if (err)
+ goto err_uninit_cmdq;
+
+ erdma_finish_cmdq_init(dev);
+
+ return 0;
+
+err_uninit_cmdq:
+ erdma_device_uninit(dev);
+ erdma_cmdq_destroy(dev);
+
+err_uninit_aeq:
+ erdma_aeq_destroy(dev);
+
+err_uninit_comm_irq:
+ erdma_comm_irq_uninit(dev);
+
+err_free_vectors:
+ pci_free_irq_vectors(dev->pdev);
+
+err_iounmap_func_bar:
+ devm_iounmap(&pdev->dev, dev->func_bar);
+
+err_release_bars:
+ pci_release_selected_regions(pdev, bars);
+
+err_ib_device_release:
+ ib_dealloc_device(&dev->ibdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void erdma_remove_dev(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+
+ erdma_ceqs_uninit(dev);
+
+ erdma_device_uninit(dev);
+
+ erdma_cmdq_destroy(dev);
+ erdma_aeq_destroy(dev);
+ erdma_comm_irq_uninit(dev);
+ pci_free_irq_vectors(dev->pdev);
+
+ devm_iounmap(&pdev->dev, dev->func_bar);
+ pci_release_selected_regions(pdev, ERDMA_BAR_MASK);
+
+ ib_dealloc_device(&dev->ibdev);
+
+ pci_disable_device(pdev);
+}
+
+#define ERDMA_GET_CAP(name, cap) FIELD_GET(ERDMA_CMD_DEV_CAP_##name##_MASK, cap)
+
+static int erdma_dev_attrs_init(struct erdma_dev *dev)
+{
+ int err;
+ u64 req_hdr, cap0, cap1;
+
+ erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_QUERY_DEVICE);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
+ &cap1);
+ if (err)
+ return err;
+
+ dev->attrs.max_cqe = 1 << ERDMA_GET_CAP(MAX_CQE, cap0);
+ dev->attrs.max_mr_size = 1ULL << ERDMA_GET_CAP(MAX_MR_SIZE, cap0);
+ dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
+ dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
+ dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
+ dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
+ dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
+ dev->attrs.max_mr = dev->attrs.max_qp << 1;
+ dev->attrs.max_cq = dev->attrs.max_qp << 1;
+
+ dev->attrs.max_send_wr = ERDMA_MAX_SEND_WR;
+ dev->attrs.max_ord = ERDMA_MAX_ORD;
+ dev->attrs.max_ird = ERDMA_MAX_IRD;
+ dev->attrs.max_send_sge = ERDMA_MAX_SEND_SGE;
+ dev->attrs.max_recv_sge = ERDMA_MAX_RECV_SGE;
+ dev->attrs.max_sge_rd = ERDMA_MAX_SGE_RD;
+ dev->attrs.max_pd = ERDMA_MAX_PD;
+
+ dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
+ dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
+
+ erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_QUERY_FW_INFO);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
+ &cap1);
+ if (!err)
+ dev->attrs.fw_version =
+ FIELD_GET(ERDMA_CMD_INFO0_FW_VER_MASK, cap0);
+
+ return err;
+}
+
+static int erdma_res_cb_init(struct erdma_dev *dev)
+{
+ int i, j;
+
+ for (i = 0; i < ERDMA_RES_CNT; i++) {
+ dev->res_cb[i].next_alloc_idx = 1;
+ spin_lock_init(&dev->res_cb[i].lock);
+ dev->res_cb[i].bitmap =
+ bitmap_zalloc(dev->res_cb[i].max_cap, GFP_KERNEL);
+ if (!dev->res_cb[i].bitmap)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ bitmap_free(dev->res_cb[j].bitmap);
+
+ return -ENOMEM;
+}
+
+static void erdma_res_cb_free(struct erdma_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ERDMA_RES_CNT; i++)
+ bitmap_free(dev->res_cb[i].bitmap);
+}
+
+static const struct ib_device_ops erdma_device_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_ERDMA,
+ .uverbs_abi_ver = ERDMA_ABI_VERSION,
+
+ .alloc_mr = erdma_ib_alloc_mr,
+ .alloc_pd = erdma_alloc_pd,
+ .alloc_ucontext = erdma_alloc_ucontext,
+ .create_cq = erdma_create_cq,
+ .create_qp = erdma_create_qp,
+ .dealloc_pd = erdma_dealloc_pd,
+ .dealloc_ucontext = erdma_dealloc_ucontext,
+ .dereg_mr = erdma_dereg_mr,
+ .destroy_cq = erdma_destroy_cq,
+ .destroy_qp = erdma_destroy_qp,
+ .get_dma_mr = erdma_get_dma_mr,
+ .get_port_immutable = erdma_get_port_immutable,
+ .iw_accept = erdma_accept,
+ .iw_add_ref = erdma_qp_get_ref,
+ .iw_connect = erdma_connect,
+ .iw_create_listen = erdma_create_listen,
+ .iw_destroy_listen = erdma_destroy_listen,
+ .iw_get_qp = erdma_get_ibqp,
+ .iw_reject = erdma_reject,
+ .iw_rem_ref = erdma_qp_put_ref,
+ .map_mr_sg = erdma_map_mr_sg,
+ .mmap = erdma_mmap,
+ .mmap_free = erdma_mmap_free,
+ .modify_qp = erdma_modify_qp,
+ .post_recv = erdma_post_recv,
+ .post_send = erdma_post_send,
+ .poll_cq = erdma_poll_cq,
+ .query_device = erdma_query_device,
+ .query_gid = erdma_query_gid,
+ .query_port = erdma_query_port,
+ .query_qp = erdma_query_qp,
+ .req_notify_cq = erdma_req_notify_cq,
+ .reg_user_mr = erdma_reg_user_mr,
+
+ INIT_RDMA_OBJ_SIZE(ib_cq, erdma_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, erdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, erdma_ucontext, ibucontext),
+ INIT_RDMA_OBJ_SIZE(ib_qp, erdma_qp, ibqp),
+};
+
+static int erdma_ib_device_add(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+ struct ib_device *ibdev = &dev->ibdev;
+ u64 mac;
+ int ret;
+
+ ret = erdma_dev_attrs_init(dev);
+ if (ret)
+ return ret;
+
+ ibdev->node_type = RDMA_NODE_RNIC;
+ memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
+
+ /*
+ * Current model (one-to-one device association):
+ * One ERDMA device per net_device or, equivalently,
+ * per physical port.
+ */
+ ibdev->phys_port_cnt = 1;
+ ibdev->num_comp_vectors = dev->attrs.irq_num - 1;
+
+ ib_set_device_ops(ibdev, &erdma_device_ops);
+
+ INIT_LIST_HEAD(&dev->cep_list);
+
+ spin_lock_init(&dev->lock);
+ xa_init_flags(&dev->qp_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&dev->cq_xa, XA_FLAGS_ALLOC1);
+ dev->next_alloc_cqn = 1;
+ dev->next_alloc_qpn = 1;
+
+ ret = erdma_res_cb_init(dev);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&dev->db_bitmap_lock);
+ bitmap_zero(dev->sdb_page, ERDMA_DWQE_TYPE0_CNT);
+ bitmap_zero(dev->sdb_entry, ERDMA_DWQE_TYPE1_CNT);
+
+ atomic_set(&dev->num_ctx, 0);
+
+ mac = erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_L_REG);
+ mac |= (u64)erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_H_REG) << 32;
+
+ u64_to_ether_addr(mac, dev->attrs.peer_addr);
+
+ ret = erdma_device_register(dev);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ xa_destroy(&dev->qp_xa);
+ xa_destroy(&dev->cq_xa);
+
+ erdma_res_cb_free(dev);
+
+ return ret;
+}
+
+static void erdma_ib_device_remove(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+
+ unregister_netdevice_notifier(&dev->netdev_nb);
+ ib_unregister_device(&dev->ibdev);
+
+ erdma_res_cb_free(dev);
+ xa_destroy(&dev->qp_xa);
+ xa_destroy(&dev->cq_xa);
+}
+
+static int erdma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = erdma_probe_dev(pdev);
+ if (ret)
+ return ret;
+
+ ret = erdma_ib_device_add(pdev);
+ if (ret) {
+ erdma_remove_dev(pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void erdma_remove(struct pci_dev *pdev)
+{
+ erdma_ib_device_remove(pdev);
+ erdma_remove_dev(pdev);
+}
+
+static struct pci_driver erdma_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = erdma_pci_tbl,
+ .probe = erdma_probe,
+ .remove = erdma_remove
+};
+
+MODULE_DEVICE_TABLE(pci, erdma_pci_tbl);
+
+static __init int erdma_init_module(void)
+{
+ int ret;
+
+ ret = erdma_cm_init();
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&erdma_pci_driver);
+ if (ret)
+ erdma_cm_exit();
+
+ return ret;
+}
+
+static void __exit erdma_exit_module(void)
+{
+ pci_unregister_driver(&erdma_pci_driver);
+
+ erdma_cm_exit();
+}
+
+module_init(erdma_init_module);
+module_exit(erdma_exit_module);
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
new file mode 100644
index 000000000000..bc3ec22a62c5
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2021, Alibaba Group */
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_verbs.h"
+
+void erdma_qp_llp_close(struct erdma_qp *qp)
+{
+ struct erdma_qp_attrs qp_attrs;
+
+ down_write(&qp->state_lock);
+
+ switch (qp->attrs.state) {
+ case ERDMA_QP_STATE_RTS:
+ case ERDMA_QP_STATE_RTR:
+ case ERDMA_QP_STATE_IDLE:
+ case ERDMA_QP_STATE_TERMINATE:
+ qp_attrs.state = ERDMA_QP_STATE_CLOSING;
+ erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ break;
+ case ERDMA_QP_STATE_CLOSING:
+ qp->attrs.state = ERDMA_QP_STATE_IDLE;
+ break;
+ default:
+ break;
+ }
+
+ if (qp->cep) {
+ erdma_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+
+ up_write(&qp->state_lock);
+}
+
+struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
+{
+ struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id);
+
+ if (qp)
+ return &qp->ibqp;
+
+ return NULL;
+}
+
+static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
+ struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask)
+{
+ int ret;
+ struct erdma_dev *dev = qp->dev;
+ struct erdma_cmdq_modify_qp_req req;
+ struct tcp_sock *tp;
+ struct erdma_cep *cep = qp->cep;
+ struct sockaddr_storage local_addr, remote_addr;
+
+ if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
+ return -EINVAL;
+
+ if (!(mask & ERDMA_QP_ATTR_MPA))
+ return -EINVAL;
+
+ ret = getname_local(cep->sock, &local_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = getname_peer(cep->sock, &remote_addr);
+ if (ret < 0)
+ return ret;
+
+ qp->attrs.state = ERDMA_QP_STATE_RTS;
+
+ tp = tcp_sk(qp->cep->sock->sk);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
+ req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
+ req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
+ req.dport = to_sockaddr_in(remote_addr).sin_port;
+ req.sport = to_sockaddr_in(local_addr).sin_port;
+
+ req.send_nxt = tp->snd_nxt;
+ /* rsvd tcp seq for mpa-rsp in server. */
+ if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
+ req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
+ req.recv_nxt = tp->rcv_nxt;
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
+ struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask)
+{
+ struct erdma_dev *dev = qp->dev;
+ struct erdma_cmdq_modify_qp_req req;
+
+ qp->attrs.state = attrs->state;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask)
+{
+ int drop_conn, ret = 0;
+
+ if (!mask)
+ return 0;
+
+ if (!(mask & ERDMA_QP_ATTR_STATE))
+ return 0;
+
+ switch (qp->attrs.state) {
+ case ERDMA_QP_STATE_IDLE:
+ case ERDMA_QP_STATE_RTR:
+ if (attrs->state == ERDMA_QP_STATE_RTS) {
+ ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
+ } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ if (qp->cep) {
+ erdma_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ }
+ break;
+ case ERDMA_QP_STATE_RTS:
+ drop_conn = 0;
+
+ if (attrs->state == ERDMA_QP_STATE_CLOSING) {
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ drop_conn = 1;
+ } else if (attrs->state == ERDMA_QP_STATE_TERMINATE) {
+ qp->attrs.state = ERDMA_QP_STATE_TERMINATE;
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ drop_conn = 1;
+ } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ drop_conn = 1;
+ }
+
+ if (drop_conn)
+ erdma_qp_cm_drop(qp);
+
+ break;
+ case ERDMA_QP_STATE_TERMINATE:
+ if (attrs->state == ERDMA_QP_STATE_ERROR)
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ break;
+ case ERDMA_QP_STATE_CLOSING:
+ if (attrs->state == ERDMA_QP_STATE_IDLE) {
+ qp->attrs.state = ERDMA_QP_STATE_IDLE;
+ } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ } else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
+ return -ECONNABORTED;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void erdma_qp_safe_free(struct kref *ref)
+{
+ struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
+
+ complete(&qp->safe_free);
+}
+
+void erdma_qp_put(struct erdma_qp *qp)
+{
+ WARN_ON(kref_read(&qp->ref) < 1);
+ kref_put(&qp->ref, erdma_qp_safe_free);
+}
+
+void erdma_qp_get(struct erdma_qp *qp)
+{
+ kref_get(&qp->ref);
+}
+
+static int fill_inline_data(struct erdma_qp *qp,
+ const struct ib_send_wr *send_wr, u16 wqe_idx,
+ u32 sgl_offset, __le32 *length_field)
+{
+ u32 remain_size, copy_size, data_off, bytes = 0;
+ char *data;
+ int i = 0;
+
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+ data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size,
+ SQEBB_SHIFT);
+
+ while (i < send_wr->num_sge) {
+ bytes += send_wr->sg_list[i].length;
+ if (bytes > (int)ERDMA_MAX_INLINE)
+ return -EINVAL;
+
+ remain_size = send_wr->sg_list[i].length;
+ data_off = 0;
+
+ while (1) {
+ copy_size = min(remain_size, SQEBB_SIZE - sgl_offset);
+
+ memcpy(data + sgl_offset,
+ (void *)(uintptr_t)send_wr->sg_list[i].addr +
+ data_off,
+ copy_size);
+ remain_size -= copy_size;
+ data_off += copy_size;
+ sgl_offset += copy_size;
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+
+ data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ if (!remain_size)
+ break;
+ }
+
+ i++;
+ }
+ *length_field = cpu_to_le32(bytes);
+
+ return bytes;
+}
+
+static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
+ u16 wqe_idx, u32 sgl_offset, __le32 *length_field)
+{
+ int i = 0;
+ u32 bytes = 0;
+ char *sgl;
+
+ if (send_wr->num_sge > qp->dev->attrs.max_send_sge)
+ return -EINVAL;
+
+ if (sgl_offset & 0xF)
+ return -EINVAL;
+
+ while (i < send_wr->num_sge) {
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+ sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+
+ bytes += send_wr->sg_list[i].length;
+ memcpy(sgl + sgl_offset, &send_wr->sg_list[i],
+ sizeof(struct ib_sge));
+
+ sgl_offset += sizeof(struct ib_sge);
+ i++;
+ }
+
+ *length_field = cpu_to_le32(bytes);
+ return 0;
+}
+
+static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
+ const struct ib_send_wr *send_wr)
+{
+ u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
+ u32 idx = *pi & (qp->attrs.sq_size - 1);
+ enum ib_wr_opcode op = send_wr->opcode;
+ struct erdma_readreq_sqe *read_sqe;
+ struct erdma_reg_mr_sqe *regmr_sge;
+ struct erdma_write_sqe *write_sqe;
+ struct erdma_send_sqe *send_sqe;
+ struct ib_rdma_wr *rdma_wr;
+ struct erdma_mr *mr;
+ __le32 *length_field;
+ u64 wqe_hdr, *entry;
+ struct ib_sge *sge;
+ u32 attrs;
+ int ret;
+
+ entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
+ SQEBB_SHIFT);
+
+ /* Clear the SQE header section. */
+ *entry = 0;
+
+ qp->kern_qp.swr_tbl[idx] = send_wr->wr_id;
+ flags = send_wr->send_flags;
+ wqe_hdr = FIELD_PREP(
+ ERDMA_SQE_HDR_CE_MASK,
+ ((flags & IB_SEND_SIGNALED) || qp->kern_qp.sig_all) ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SE_MASK,
+ flags & IB_SEND_SOLICITED ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_FENCE_MASK,
+ flags & IB_SEND_FENCE ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_INLINE_MASK,
+ flags & IB_SEND_INLINE ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp));
+
+ switch (op) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ hw_op = ERDMA_OP_WRITE;
+ if (op == IB_WR_RDMA_WRITE_WITH_IMM)
+ hw_op = ERDMA_OP_WRITE_WITH_IMM;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
+ write_sqe = (struct erdma_write_sqe *)entry;
+
+ write_sqe->imm_data = send_wr->ex.imm_data;
+ write_sqe->sink_stag = cpu_to_le32(rdma_wr->rkey);
+ write_sqe->sink_to_h =
+ cpu_to_le32(upper_32_bits(rdma_wr->remote_addr));
+ write_sqe->sink_to_l =
+ cpu_to_le32(lower_32_bits(rdma_wr->remote_addr));
+
+ length_field = &write_sqe->length;
+ wqe_size = sizeof(struct erdma_write_sqe);
+ sgl_offset = wqe_size;
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ read_sqe = (struct erdma_readreq_sqe *)entry;
+ if (unlikely(send_wr->num_sge != 1))
+ return -EINVAL;
+ hw_op = ERDMA_OP_READ;
+ if (op == IB_WR_RDMA_READ_WITH_INV) {
+ hw_op = ERDMA_OP_READ_WITH_INV;
+ read_sqe->invalid_stag =
+ cpu_to_le32(send_wr->ex.invalidate_rkey);
+ }
+
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
+ read_sqe->length = cpu_to_le32(send_wr->sg_list[0].length);
+ read_sqe->sink_stag = cpu_to_le32(send_wr->sg_list[0].lkey);
+ read_sqe->sink_to_l =
+ cpu_to_le32(lower_32_bits(send_wr->sg_list[0].addr));
+ read_sqe->sink_to_h =
+ cpu_to_le32(upper_32_bits(send_wr->sg_list[0].addr));
+
+ sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ sge->addr = rdma_wr->remote_addr;
+ sge->lkey = rdma_wr->rkey;
+ sge->length = send_wr->sg_list[0].length;
+ wqe_size = sizeof(struct erdma_readreq_sqe) +
+ send_wr->num_sge * sizeof(struct ib_sge);
+
+ goto out;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ send_sqe = (struct erdma_send_sqe *)entry;
+ hw_op = ERDMA_OP_SEND;
+ if (op == IB_WR_SEND_WITH_IMM) {
+ hw_op = ERDMA_OP_SEND_WITH_IMM;
+ send_sqe->imm_data = send_wr->ex.imm_data;
+ } else if (op == IB_WR_SEND_WITH_INV) {
+ hw_op = ERDMA_OP_SEND_WITH_INV;
+ send_sqe->invalid_stag =
+ cpu_to_le32(send_wr->ex.invalidate_rkey);
+ }
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ length_field = &send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe);
+ sgl_offset = wqe_size;
+
+ break;
+ case IB_WR_REG_MR:
+ wqe_hdr |=
+ FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, ERDMA_OP_REG_MR);
+ regmr_sge = (struct erdma_reg_mr_sqe *)entry;
+ mr = to_emr(reg_wr(send_wr)->mr);
+
+ mr->access = ERDMA_MR_ACC_LR |
+ to_erdma_access_flags(reg_wr(send_wr)->access);
+ regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
+ regmr_sge->length = cpu_to_le32(mr->ibmr.length);
+ regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
+ attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
+ FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
+ FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
+ mr->mem.mtt_nents);
+
+ if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
+ /* Copy SGLs to SQE content to accelerate */
+ memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+ qp->attrs.sq_size, SQEBB_SHIFT),
+ mr->mem.mtt_buf, MTT_SIZE(mr->mem.mtt_nents));
+ wqe_size = sizeof(struct erdma_reg_mr_sqe) +
+ MTT_SIZE(mr->mem.mtt_nents);
+ } else {
+ attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 1);
+ wqe_size = sizeof(struct erdma_reg_mr_sqe);
+ }
+
+ regmr_sge->attrs = cpu_to_le32(attrs);
+ goto out;
+ case IB_WR_LOCAL_INV:
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
+ ERDMA_OP_LOCAL_INV);
+ regmr_sge = (struct erdma_reg_mr_sqe *)entry;
+ regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey);
+ wqe_size = sizeof(struct erdma_reg_mr_sqe);
+ goto out;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (flags & IB_SEND_INLINE) {
+ ret = fill_inline_data(qp, send_wr, idx, sgl_offset,
+ length_field);
+ if (ret < 0)
+ return -EINVAL;
+ wqe_size += ret;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK, ret);
+ } else {
+ ret = fill_sgl(qp, send_wr, idx, sgl_offset, length_field);
+ if (ret)
+ return -EINVAL;
+ wqe_size += send_wr->num_sge * sizeof(struct ib_sge);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK,
+ send_wr->num_sge);
+ }
+
+out:
+ wqebb_cnt = SQEBB_COUNT(wqe_size);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_CNT_MASK, wqebb_cnt - 1);
+ *pi += wqebb_cnt;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, *pi);
+
+ *entry = wqe_hdr;
+
+ return 0;
+}
+
+static void kick_sq_db(struct erdma_qp *qp, u16 pi)
+{
+ u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
+ FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
+
+ *(u64 *)qp->kern_qp.sq_db_info = db_data;
+ writeq(db_data, qp->kern_qp.hw_sq_db);
+}
+
+int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ int ret = 0;
+ const struct ib_send_wr *wr = send_wr;
+ unsigned long flags;
+ u16 sq_pi;
+
+ if (!send_wr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&qp->lock, flags);
+ sq_pi = qp->kern_qp.sq_pi;
+
+ while (wr) {
+ if ((u16)(sq_pi - qp->kern_qp.sq_ci) >= qp->attrs.sq_size) {
+ ret = -ENOMEM;
+ *bad_send_wr = send_wr;
+ break;
+ }
+
+ ret = erdma_push_one_sqe(qp, &sq_pi, wr);
+ if (ret) {
+ *bad_send_wr = wr;
+ break;
+ }
+ qp->kern_qp.sq_pi = sq_pi;
+ kick_sq_db(qp, sq_pi);
+
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ return ret;
+}
+
+static int erdma_post_recv_one(struct erdma_qp *qp,
+ const struct ib_recv_wr *recv_wr)
+{
+ struct erdma_rqe *rqe =
+ get_queue_entry(qp->kern_qp.rq_buf, qp->kern_qp.rq_pi,
+ qp->attrs.rq_size, RQE_SHIFT);
+
+ rqe->qe_idx = cpu_to_le16(qp->kern_qp.rq_pi + 1);
+ rqe->qpn = cpu_to_le32(QP_ID(qp));
+
+ if (recv_wr->num_sge == 0) {
+ rqe->length = 0;
+ } else if (recv_wr->num_sge == 1) {
+ rqe->stag = cpu_to_le32(recv_wr->sg_list[0].lkey);
+ rqe->to = cpu_to_le64(recv_wr->sg_list[0].addr);
+ rqe->length = cpu_to_le32(recv_wr->sg_list[0].length);
+ } else {
+ return -EINVAL;
+ }
+
+ *(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe;
+ writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
+
+ qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
+ recv_wr->wr_id;
+ qp->kern_qp.rq_pi++;
+
+ return 0;
+}
+
+int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr)
+{
+ const struct ib_recv_wr *wr = recv_wr;
+ struct erdma_qp *qp = to_eqp(ibqp);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ while (wr) {
+ ret = erdma_post_recv_one(qp, wr);
+ if (ret) {
+ *bad_recv_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
new file mode 100644
index 000000000000..699bd3f59cd3
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -0,0 +1,1460 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+/* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <net/addrconf.h>
+#include <rdma/erdma-abi.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
+{
+ struct erdma_cmdq_create_qp_req req;
+ struct erdma_pd *pd = to_epd(qp->ibqp.pd);
+ struct erdma_uqp *user_qp;
+ u64 resp0, resp1;
+ int err;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_QP);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK,
+ ilog2(qp->attrs.sq_size)) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_QPN_MASK, QP_ID(qp));
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK,
+ ilog2(qp->attrs.rq_size)) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res)) {
+ u32 pgsz_range = ilog2(SZ_1M) - PAGE_SHIFT;
+
+ req.sq_cqn_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ pgsz_range) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
+ req.rq_cqn_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ pgsz_range) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
+
+ req.sq_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
+ ERDMA_MR_INLINE_MTT);
+ req.rq_mtt_cfg = req.sq_mtt_cfg;
+
+ req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
+ req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
+ req.sq_db_info_dma_addr = qp->kern_qp.sq_buf_dma_addr +
+ (qp->attrs.sq_size << SQEBB_SHIFT);
+ req.rq_db_info_dma_addr = qp->kern_qp.rq_buf_dma_addr +
+ (qp->attrs.rq_size << RQE_SHIFT);
+ } else {
+ user_qp = &qp->user_qp;
+ req.sq_cqn_mtt_cfg = FIELD_PREP(
+ ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ ilog2(user_qp->sq_mtt.page_size) - PAGE_SHIFT);
+ req.sq_cqn_mtt_cfg |=
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
+
+ req.rq_cqn_mtt_cfg = FIELD_PREP(
+ ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ ilog2(user_qp->rq_mtt.page_size) - PAGE_SHIFT);
+ req.rq_cqn_mtt_cfg |=
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
+
+ req.sq_mtt_cfg = user_qp->sq_mtt.page_offset;
+ req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
+ user_qp->sq_mtt.mtt_nents) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
+ user_qp->sq_mtt.mtt_type);
+
+ req.rq_mtt_cfg = user_qp->rq_mtt.page_offset;
+ req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
+ user_qp->rq_mtt.mtt_nents) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
+ user_qp->rq_mtt.mtt_type);
+
+ req.sq_buf_addr = user_qp->sq_mtt.mtt_entry[0];
+ req.rq_buf_addr = user_qp->rq_mtt.mtt_entry[0];
+
+ req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr;
+ req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
+ }
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), &resp0,
+ &resp1);
+ if (!err)
+ qp->attrs.cookie =
+ FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
+
+ return err;
+}
+
+static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
+{
+ struct erdma_cmdq_reg_mr_req req;
+ struct erdma_pd *pd = to_epd(mr->ibmr.pd);
+ u64 *phy_addr;
+ int i;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
+ FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) |
+ FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8);
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_REGMR_PD_MASK, pd->pdn) |
+ FIELD_PREP(ERDMA_CMD_REGMR_TYPE_MASK, mr->type) |
+ FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access) |
+ FIELD_PREP(ERDMA_CMD_REGMR_ACC_MODE_MASK, 0);
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK,
+ ilog2(mr->mem.page_size)) |
+ FIELD_PREP(ERDMA_CMD_REGMR_MTT_TYPE_MASK, mr->mem.mtt_type) |
+ FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt);
+
+ if (mr->type == ERDMA_MR_TYPE_DMA)
+ goto post_cmd;
+
+ if (mr->type == ERDMA_MR_TYPE_NORMAL) {
+ req.start_va = mr->mem.va;
+ req.size = mr->mem.len;
+ }
+
+ if (mr->type == ERDMA_MR_TYPE_FRMR ||
+ mr->mem.mtt_type == ERDMA_MR_INDIRECT_MTT) {
+ phy_addr = req.phy_addr;
+ *phy_addr = mr->mem.mtt_entry[0];
+ } else {
+ phy_addr = req.phy_addr;
+ for (i = 0; i < mr->mem.mtt_nents; i++)
+ *phy_addr++ = mr->mem.mtt_entry[i];
+ }
+
+post_cmd:
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
+{
+ struct erdma_cmdq_create_cq_req req;
+ u32 page_size;
+ struct erdma_mem *mtt;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_CQ);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_CQN_MASK, cq->cqn) |
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_DEPTH_MASK, ilog2(cq->depth));
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_EQN_MASK, cq->assoc_eqn);
+
+ if (rdma_is_kernel_res(&cq->ibcq.res)) {
+ page_size = SZ_32M;
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+ ilog2(page_size) - PAGE_SHIFT);
+ req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
+ req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
+
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
+ ERDMA_MR_INLINE_MTT);
+
+ req.first_page_offset = 0;
+ req.cq_db_info_addr =
+ cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
+ } else {
+ mtt = &cq->user_cq.qbuf_mtt;
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+ ilog2(mtt->page_size) - PAGE_SHIFT);
+ if (mtt->mtt_nents == 1) {
+ req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
+ req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
+ } else {
+ req.qbuf_addr_l = lower_32_bits(mtt->mtt_entry[0]);
+ req.qbuf_addr_h = upper_32_bits(mtt->mtt_entry[0]);
+ }
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK,
+ mtt->mtt_nents);
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
+ mtt->mtt_type);
+
+ req.first_page_offset = mtt->page_offset;
+ req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
+ }
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
+{
+ int idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&res_cb->lock, flags);
+ idx = find_next_zero_bit(res_cb->bitmap, res_cb->max_cap,
+ res_cb->next_alloc_idx);
+ if (idx == res_cb->max_cap) {
+ idx = find_first_zero_bit(res_cb->bitmap, res_cb->max_cap);
+ if (idx == res_cb->max_cap) {
+ res_cb->next_alloc_idx = 1;
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+ return -ENOSPC;
+ }
+ }
+
+ set_bit(idx, res_cb->bitmap);
+ res_cb->next_alloc_idx = idx + 1;
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+
+ return idx;
+}
+
+static inline void erdma_free_idx(struct erdma_resource_cb *res_cb, u32 idx)
+{
+ unsigned long flags;
+ u32 used;
+
+ spin_lock_irqsave(&res_cb->lock, flags);
+ used = __test_and_clear_bit(idx, res_cb->bitmap);
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+ WARN_ON(!used);
+}
+
+static struct rdma_user_mmap_entry *
+erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address,
+ u32 size, u8 mmap_flag, u64 *mmap_offset)
+{
+ struct erdma_user_mmap_entry *entry =
+ kzalloc(sizeof(*entry), GFP_KERNEL);
+ int ret;
+
+ if (!entry)
+ return NULL;
+
+ entry->address = (u64)address;
+ entry->mmap_flag = mmap_flag;
+
+ size = PAGE_ALIGN(size);
+
+ ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry,
+ size);
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+
+ *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
+ struct ib_udata *unused)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->max_mr_size = dev->attrs.max_mr_size;
+ attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
+ attr->vendor_part_id = dev->pdev->device;
+ attr->hw_ver = dev->pdev->revision;
+ attr->max_qp = dev->attrs.max_qp - 1;
+ attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
+ attr->max_qp_rd_atom = dev->attrs.max_ord;
+ attr->max_qp_init_rd_atom = dev->attrs.max_ird;
+ attr->max_res_rd_atom = dev->attrs.max_qp * dev->attrs.max_ird;
+ attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
+ attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ ibdev->local_dma_lkey = dev->attrs.local_dma_key;
+ attr->max_send_sge = dev->attrs.max_send_sge;
+ attr->max_recv_sge = dev->attrs.max_recv_sge;
+ attr->max_sge_rd = dev->attrs.max_sge_rd;
+ attr->max_cq = dev->attrs.max_cq - 1;
+ attr->max_cqe = dev->attrs.max_cqe;
+ attr->max_mr = dev->attrs.max_mr;
+ attr->max_pd = dev->attrs.max_pd;
+ attr->max_mw = dev->attrs.max_mw;
+ attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
+ attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
+ attr->fw_ver = dev->attrs.fw_version;
+
+ if (dev->netdev)
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ dev->netdev->dev_addr);
+
+ return 0;
+}
+
+int erdma_query_gid(struct ib_device *ibdev, u32 port, int idx,
+ union ib_gid *gid)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ memset(gid, 0, sizeof(*gid));
+ ether_addr_copy(gid->raw, dev->attrs.peer_addr);
+
+ return 0;
+}
+
+int erdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+ struct net_device *ndev = dev->netdev;
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->gid_tbl_len = 1;
+ attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
+ attr->max_msg_sz = -1;
+
+ if (!ndev)
+ goto out;
+
+ ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
+ attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
+ attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
+ if (netif_running(ndev) && netif_carrier_ok(ndev))
+ dev->state = IB_PORT_ACTIVE;
+ else
+ dev->state = IB_PORT_DOWN;
+ attr->state = dev->state;
+
+out:
+ if (dev->state == IB_PORT_ACTIVE)
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ else
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+
+ return 0;
+}
+
+int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
+ struct ib_port_immutable *port_immutable)
+{
+ port_immutable->gid_tbl_len = 1;
+ port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+ return 0;
+}
+
+int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct erdma_pd *pd = to_epd(ibpd);
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ int pdn;
+
+ pdn = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_PD]);
+ if (pdn < 0)
+ return pdn;
+
+ pd->pdn = pdn;
+
+ return 0;
+}
+
+int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct erdma_pd *pd = to_epd(ibpd);
+ struct erdma_dev *dev = to_edev(ibpd->device);
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_PD], pd->pdn);
+
+ return 0;
+}
+
+static int erdma_qp_validate_cap(struct erdma_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if ((attrs->cap.max_send_wr > dev->attrs.max_send_wr) ||
+ (attrs->cap.max_recv_wr > dev->attrs.max_recv_wr) ||
+ (attrs->cap.max_send_sge > dev->attrs.max_send_sge) ||
+ (attrs->cap.max_recv_sge > dev->attrs.max_recv_sge) ||
+ (attrs->cap.max_inline_data > ERDMA_MAX_INLINE) ||
+ !attrs->cap.max_send_wr || !attrs->cap.max_recv_wr) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int erdma_qp_validate_attr(struct erdma_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if (attrs->qp_type != IB_QPT_RC)
+ return -EOPNOTSUPP;
+
+ if (attrs->srq)
+ return -EOPNOTSUPP;
+
+ if (!attrs->send_cq || !attrs->recv_cq)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void free_kernel_qp(struct erdma_qp *qp)
+{
+ struct erdma_dev *dev = qp->dev;
+
+ vfree(qp->kern_qp.swr_tbl);
+ vfree(qp->kern_qp.rwr_tbl);
+
+ if (qp->kern_qp.sq_buf)
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
+ qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
+
+ if (qp->kern_qp.rq_buf)
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
+ qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
+}
+
+static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ struct erdma_kqp *kqp = &qp->kern_qp;
+ int size;
+
+ if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
+ kqp->sig_all = 1;
+
+ kqp->sq_pi = 0;
+ kqp->sq_ci = 0;
+ kqp->rq_pi = 0;
+ kqp->rq_ci = 0;
+ kqp->hw_sq_db =
+ dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT);
+ kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET;
+
+ kqp->swr_tbl = vmalloc(qp->attrs.sq_size * sizeof(u64));
+ kqp->rwr_tbl = vmalloc(qp->attrs.rq_size * sizeof(u64));
+ if (!kqp->swr_tbl || !kqp->rwr_tbl)
+ goto err_out;
+
+ size = (qp->attrs.sq_size << SQEBB_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
+ kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
+ &kqp->sq_buf_dma_addr, GFP_KERNEL);
+ if (!kqp->sq_buf)
+ goto err_out;
+
+ size = (qp->attrs.rq_size << RQE_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
+ kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
+ &kqp->rq_buf_dma_addr, GFP_KERNEL);
+ if (!kqp->rq_buf)
+ goto err_out;
+
+ kqp->sq_db_info = kqp->sq_buf + (qp->attrs.sq_size << SQEBB_SHIFT);
+ kqp->rq_db_info = kqp->rq_buf + (qp->attrs.rq_size << RQE_SHIFT);
+
+ return 0;
+
+err_out:
+ free_kernel_qp(qp);
+ return -ENOMEM;
+}
+
+static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
+ u64 start, u64 len, int access, u64 virt,
+ unsigned long req_page_size, u8 force_indirect_mtt)
+{
+ struct ib_block_iter biter;
+ uint64_t *phy_addr = NULL;
+ int ret = 0;
+
+ mem->umem = ib_umem_get(&dev->ibdev, start, len, access);
+ if (IS_ERR(mem->umem)) {
+ ret = PTR_ERR(mem->umem);
+ mem->umem = NULL;
+ return ret;
+ }
+
+ mem->va = virt;
+ mem->len = len;
+ mem->page_size = ib_umem_find_best_pgsz(mem->umem, req_page_size, virt);
+ mem->page_offset = start & (mem->page_size - 1);
+ mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size);
+ mem->page_cnt = mem->mtt_nents;
+
+ if (mem->page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES ||
+ force_indirect_mtt) {
+ mem->mtt_type = ERDMA_MR_INDIRECT_MTT;
+ mem->mtt_buf =
+ alloc_pages_exact(MTT_SIZE(mem->page_cnt), GFP_KERNEL);
+ if (!mem->mtt_buf) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ phy_addr = mem->mtt_buf;
+ } else {
+ mem->mtt_type = ERDMA_MR_INLINE_MTT;
+ phy_addr = mem->mtt_entry;
+ }
+
+ rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size) {
+ *phy_addr = rdma_block_iter_dma_address(&biter);
+ phy_addr++;
+ }
+
+ if (mem->mtt_type == ERDMA_MR_INDIRECT_MTT) {
+ mem->mtt_entry[0] =
+ dma_map_single(&dev->pdev->dev, mem->mtt_buf,
+ MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, mem->mtt_entry[0])) {
+ free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
+ mem->mtt_buf = NULL;
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ }
+
+ return 0;
+
+error_ret:
+ if (mem->umem) {
+ ib_umem_release(mem->umem);
+ mem->umem = NULL;
+ }
+
+ return ret;
+}
+
+static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem)
+{
+ if (mem->mtt_buf) {
+ dma_unmap_single(&dev->pdev->dev, mem->mtt_entry[0],
+ MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
+ free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
+ }
+
+ if (mem->umem) {
+ ib_umem_release(mem->umem);
+ mem->umem = NULL;
+ }
+}
+
+static int erdma_map_user_dbrecords(struct erdma_ucontext *ctx,
+ u64 dbrecords_va,
+ struct erdma_user_dbrecords_page **dbr_page,
+ dma_addr_t *dma_addr)
+{
+ struct erdma_user_dbrecords_page *page = NULL;
+ int rv = 0;
+
+ mutex_lock(&ctx->dbrecords_page_mutex);
+
+ list_for_each_entry(page, &ctx->dbrecords_page_list, list)
+ if (page->va == (dbrecords_va & PAGE_MASK))
+ goto found;
+
+ page = kmalloc(sizeof(*page), GFP_KERNEL);
+ if (!page) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ page->va = (dbrecords_va & PAGE_MASK);
+ page->refcnt = 0;
+
+ page->umem = ib_umem_get(ctx->ibucontext.device,
+ dbrecords_va & PAGE_MASK, PAGE_SIZE, 0);
+ if (IS_ERR(page->umem)) {
+ rv = PTR_ERR(page->umem);
+ kfree(page);
+ goto out;
+ }
+
+ list_add(&page->list, &ctx->dbrecords_page_list);
+
+found:
+ *dma_addr = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (dbrecords_va & ~PAGE_MASK);
+ *dbr_page = page;
+ page->refcnt++;
+
+out:
+ mutex_unlock(&ctx->dbrecords_page_mutex);
+ return rv;
+}
+
+static void
+erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
+ struct erdma_user_dbrecords_page **dbr_page)
+{
+ if (!ctx || !(*dbr_page))
+ return;
+
+ mutex_lock(&ctx->dbrecords_page_mutex);
+ if (--(*dbr_page)->refcnt == 0) {
+ list_del(&(*dbr_page)->list);
+ ib_umem_release((*dbr_page)->umem);
+ kfree(*dbr_page);
+ }
+
+ *dbr_page = NULL;
+ mutex_unlock(&ctx->dbrecords_page_mutex);
+}
+
+static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
+ u64 va, u32 len, u64 db_info_va)
+{
+ dma_addr_t db_info_dma_addr;
+ u32 rq_offset;
+ int ret;
+
+ if (len < (PAGE_ALIGN(qp->attrs.sq_size * SQEBB_SIZE) +
+ qp->attrs.rq_size * RQE_SIZE))
+ return -EINVAL;
+
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mtt, va,
+ qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
+ (SZ_1M - SZ_4K), 1);
+ if (ret)
+ return ret;
+
+ rq_offset = PAGE_ALIGN(qp->attrs.sq_size << SQEBB_SHIFT);
+ qp->user_qp.rq_offset = rq_offset;
+
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,
+ qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset,
+ (SZ_1M - SZ_4K), 1);
+ if (ret)
+ goto put_sq_mtt;
+
+ ret = erdma_map_user_dbrecords(uctx, db_info_va,
+ &qp->user_qp.user_dbr_page,
+ &db_info_dma_addr);
+ if (ret)
+ goto put_rq_mtt;
+
+ qp->user_qp.sq_db_info_dma_addr = db_info_dma_addr;
+ qp->user_qp.rq_db_info_dma_addr = db_info_dma_addr + ERDMA_DB_SIZE;
+
+ return 0;
+
+put_rq_mtt:
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
+
+put_sq_mtt:
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
+
+ return ret;
+}
+
+static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx)
+{
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
+ erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page);
+}
+
+int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ struct erdma_dev *dev = to_edev(ibqp->device);
+ struct erdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ struct erdma_ureq_create_qp ureq;
+ struct erdma_uresp_create_qp uresp;
+ int ret;
+
+ ret = erdma_qp_validate_cap(dev, attrs);
+ if (ret)
+ goto err_out;
+
+ ret = erdma_qp_validate_attr(dev, attrs);
+ if (ret)
+ goto err_out;
+
+ qp->scq = to_ecq(attrs->send_cq);
+ qp->rcq = to_ecq(attrs->recv_cq);
+ qp->dev = dev;
+ qp->attrs.cc = dev->attrs.cc;
+
+ init_rwsem(&qp->state_lock);
+ kref_init(&qp->ref);
+ init_completion(&qp->safe_free);
+
+ ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
+ XA_LIMIT(1, dev->attrs.max_qp - 1),
+ &dev->next_alloc_qpn, GFP_KERNEL);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ qp->attrs.sq_size = roundup_pow_of_two(attrs->cap.max_send_wr *
+ ERDMA_MAX_WQEBB_PER_SQE);
+ qp->attrs.rq_size = roundup_pow_of_two(attrs->cap.max_recv_wr);
+
+ if (uctx) {
+ ret = ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen));
+ if (ret)
+ goto err_out_xa;
+
+ ret = init_user_qp(qp, uctx, ureq.qbuf_va, ureq.qbuf_len,
+ ureq.db_record_va);
+ if (ret)
+ goto err_out_xa;
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.num_sqe = qp->attrs.sq_size;
+ uresp.num_rqe = qp->attrs.rq_size;
+ uresp.qp_id = QP_ID(qp);
+ uresp.rq_offset = qp->user_qp.rq_offset;
+
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_out_cmd;
+ } else {
+ init_kernel_qp(dev, qp, attrs);
+ }
+
+ qp->attrs.max_send_sge = attrs->cap.max_send_sge;
+ qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
+ qp->attrs.state = ERDMA_QP_STATE_IDLE;
+
+ ret = create_qp_cmd(dev, qp);
+ if (ret)
+ goto err_out_cmd;
+
+ spin_lock_init(&qp->lock);
+
+ return 0;
+
+err_out_cmd:
+ if (uctx)
+ free_user_qp(qp, uctx);
+ else
+ free_kernel_qp(qp);
+err_out_xa:
+ xa_erase(&dev->qp_xa, QP_ID(qp));
+err_out:
+ return ret;
+}
+
+static int erdma_create_stag(struct erdma_dev *dev, u32 *stag)
+{
+ int stag_idx;
+
+ stag_idx = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX]);
+ if (stag_idx < 0)
+ return stag_idx;
+
+ /* For now, we always let key field be zero. */
+ *stag = (stag_idx << 8);
+
+ return 0;
+}
+
+struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ struct erdma_mr *mr;
+ u32 stag;
+ int ret;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto out_free;
+
+ mr->type = ERDMA_MR_TYPE_DMA;
+
+ mr->ibmr.lkey = stag;
+ mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(acc);
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto out_remove_stag;
+
+ return &mr->ibmr;
+
+out_remove_stag:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct erdma_mr *mr;
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ int ret;
+ u32 stag;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (max_num_sg > ERDMA_MR_MAX_MTT_CNT)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto out_free;
+
+ mr->type = ERDMA_MR_TYPE_FRMR;
+
+ mr->ibmr.lkey = stag;
+ mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ /* update it in FRMR. */
+ mr->access = ERDMA_MR_ACC_LR | ERDMA_MR_ACC_LW | ERDMA_MR_ACC_RR |
+ ERDMA_MR_ACC_RW;
+
+ mr->mem.page_size = PAGE_SIZE; /* update it later. */
+ mr->mem.page_cnt = max_num_sg;
+ mr->mem.mtt_type = ERDMA_MR_INDIRECT_MTT;
+ mr->mem.mtt_buf =
+ alloc_pages_exact(MTT_SIZE(mr->mem.page_cnt), GFP_KERNEL);
+ if (!mr->mem.mtt_buf) {
+ ret = -ENOMEM;
+ goto out_remove_stag;
+ }
+
+ mr->mem.mtt_entry[0] =
+ dma_map_single(&dev->pdev->dev, mr->mem.mtt_buf,
+ MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, mr->mem.mtt_entry[0])) {
+ ret = -ENOMEM;
+ goto out_free_mtt;
+ }
+
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto out_dma_unmap;
+
+ return &mr->ibmr;
+
+out_dma_unmap:
+ dma_unmap_single(&dev->pdev->dev, mr->mem.mtt_entry[0],
+ MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
+out_free_mtt:
+ free_pages_exact(mr->mem.mtt_buf, MTT_SIZE(mr->mem.page_cnt));
+
+out_remove_stag:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+static int erdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct erdma_mr *mr = to_emr(ibmr);
+
+ if (mr->mem.mtt_nents >= mr->mem.page_cnt)
+ return -1;
+
+ *((u64 *)mr->mem.mtt_buf + mr->mem.mtt_nents) = addr;
+ mr->mem.mtt_nents++;
+
+ return 0;
+}
+
+int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct erdma_mr *mr = to_emr(ibmr);
+ int num;
+
+ mr->mem.mtt_nents = 0;
+
+ num = ib_sg_to_pages(&mr->ibmr, sg, sg_nents, sg_offset,
+ erdma_set_page);
+
+ return num;
+}
+
+struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 virt, int access, struct ib_udata *udata)
+{
+ struct erdma_mr *mr = NULL;
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ u32 stag;
+ int ret;
+
+ if (!len || len > dev->attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt,
+ SZ_2G - SZ_4K, 0);
+ if (ret)
+ goto err_out_free;
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto err_out_put_mtt;
+
+ mr->ibmr.lkey = mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ mr->mem.va = virt;
+ mr->mem.len = len;
+ mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(access);
+ mr->valid = 1;
+ mr->type = ERDMA_MR_TYPE_NORMAL;
+
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto err_out_mr;
+
+ return &mr->ibmr;
+
+err_out_mr:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+err_out_put_mtt:
+ put_mtt_entries(dev, &mr->mem);
+
+err_out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct erdma_mr *mr;
+ struct erdma_dev *dev = to_edev(ibmr->device);
+ struct erdma_cmdq_dereg_mr_req req;
+ int ret;
+
+ mr = to_emr(ibmr);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DEREG_MR);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
+ FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (ret)
+ return ret;
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], ibmr->lkey >> 8);
+
+ put_mtt_entries(dev, &mr->mem);
+
+ kfree(mr);
+ return 0;
+}
+
+int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_dev *dev = to_edev(ibcq->device);
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ int err;
+ struct erdma_cmdq_destroy_cq_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_CQ);
+ req.cqn = cq->cqn;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (err)
+ return err;
+
+ if (rdma_is_kernel_res(&cq->ibcq.res)) {
+ dma_free_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ } else {
+ erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ }
+
+ xa_erase(&dev->cq_xa, cq->cqn);
+
+ return 0;
+}
+
+int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ struct erdma_dev *dev = to_edev(ibqp->device);
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ struct erdma_qp_attrs qp_attrs;
+ int err;
+ struct erdma_cmdq_destroy_qp_req req;
+
+ down_write(&qp->state_lock);
+ qp_attrs.state = ERDMA_QP_STATE_ERROR;
+ erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ up_write(&qp->state_lock);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_QP);
+ req.qpn = QP_ID(qp);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (err)
+ return err;
+
+ erdma_qp_put(qp);
+ wait_for_completion(&qp->safe_free);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res)) {
+ vfree(qp->kern_qp.swr_tbl);
+ vfree(qp->kern_qp.rwr_tbl);
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
+ qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
+ qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
+ } else {
+ put_mtt_entries(dev, &qp->user_qp.sq_mtt);
+ put_mtt_entries(dev, &qp->user_qp.rq_mtt);
+ erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page);
+ }
+
+ if (qp->cep)
+ erdma_cep_put(qp->cep);
+ xa_erase(&dev->qp_xa, QP_ID(qp));
+
+ return 0;
+}
+
+void erdma_qp_get_ref(struct ib_qp *ibqp)
+{
+ erdma_qp_get(to_eqp(ibqp));
+}
+
+void erdma_qp_put_ref(struct ib_qp *ibqp)
+{
+ erdma_qp_put(to_eqp(ibqp));
+}
+
+int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct erdma_user_mmap_entry *entry;
+ pgprot_t prot;
+ int err;
+
+ rdma_entry = rdma_user_mmap_entry_get(ctx, vma);
+ if (!rdma_entry)
+ return -EINVAL;
+
+ entry = to_emmap(rdma_entry);
+
+ switch (entry->mmap_flag) {
+ case ERDMA_MMAP_IO_NC:
+ /* map doorbell. */
+ prot = pgprot_device(vma->vm_page_prot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
+ prot, rdma_entry);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return err;
+}
+
+void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct erdma_user_mmap_entry *entry = to_emmap(rdma_entry);
+
+ kfree(entry);
+}
+
+#define ERDMA_SDB_PAGE 0
+#define ERDMA_SDB_ENTRY 1
+#define ERDMA_SDB_SHARED 2
+
+static void alloc_db_resources(struct erdma_dev *dev,
+ struct erdma_ucontext *ctx)
+{
+ u32 bitmap_idx;
+ struct erdma_devattr *attrs = &dev->attrs;
+
+ if (attrs->disable_dwqe)
+ goto alloc_normal_db;
+
+ /* Try to alloc independent SDB page. */
+ spin_lock(&dev->db_bitmap_lock);
+ bitmap_idx = find_first_zero_bit(dev->sdb_page, attrs->dwqe_pages);
+ if (bitmap_idx != attrs->dwqe_pages) {
+ set_bit(bitmap_idx, dev->sdb_page);
+ spin_unlock(&dev->db_bitmap_lock);
+
+ ctx->sdb_type = ERDMA_SDB_PAGE;
+ ctx->sdb_idx = bitmap_idx;
+ ctx->sdb_page_idx = bitmap_idx;
+ ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
+ (bitmap_idx << PAGE_SHIFT);
+ ctx->sdb_page_off = 0;
+
+ return;
+ }
+
+ bitmap_idx = find_first_zero_bit(dev->sdb_entry, attrs->dwqe_entries);
+ if (bitmap_idx != attrs->dwqe_entries) {
+ set_bit(bitmap_idx, dev->sdb_entry);
+ spin_unlock(&dev->db_bitmap_lock);
+
+ ctx->sdb_type = ERDMA_SDB_ENTRY;
+ ctx->sdb_idx = bitmap_idx;
+ ctx->sdb_page_idx = attrs->dwqe_pages +
+ bitmap_idx / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+ ctx->sdb_page_off = bitmap_idx % ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+
+ ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
+ (ctx->sdb_page_idx << PAGE_SHIFT);
+
+ return;
+ }
+
+ spin_unlock(&dev->db_bitmap_lock);
+
+alloc_normal_db:
+ ctx->sdb_type = ERDMA_SDB_SHARED;
+ ctx->sdb_idx = 0;
+ ctx->sdb_page_idx = ERDMA_SDB_SHARED_PAGE_INDEX;
+ ctx->sdb_page_off = 0;
+
+ ctx->sdb = dev->func_bar_addr + (ctx->sdb_page_idx << PAGE_SHIFT);
+}
+
+static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx)
+{
+ rdma_user_mmap_entry_remove(uctx->sq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(uctx->rq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(uctx->cq_db_mmap_entry);
+}
+
+int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
+{
+ struct erdma_ucontext *ctx = to_ectx(ibctx);
+ struct erdma_dev *dev = to_edev(ibctx->device);
+ int ret;
+ struct erdma_uresp_alloc_ctx uresp = {};
+
+ if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ INIT_LIST_HEAD(&ctx->dbrecords_page_list);
+ mutex_init(&ctx->dbrecords_page_mutex);
+
+ alloc_db_resources(dev, ctx);
+
+ ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
+ ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
+
+ if (udata->outlen < sizeof(uresp)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
+ if (!ctx->sq_db_mmap_entry) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb);
+ if (!ctx->rq_db_mmap_entry) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb);
+ if (!ctx->cq_db_mmap_entry) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ uresp.dev_id = dev->pdev->device;
+ uresp.sdb_type = ctx->sdb_type;
+ uresp.sdb_offset = ctx->sdb_page_off;
+
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ erdma_uctx_user_mmap_entries_remove(ctx);
+ atomic_dec(&dev->num_ctx);
+ return ret;
+}
+
+void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct erdma_ucontext *ctx = to_ectx(ibctx);
+ struct erdma_dev *dev = to_edev(ibctx->device);
+
+ spin_lock(&dev->db_bitmap_lock);
+ if (ctx->sdb_type == ERDMA_SDB_PAGE)
+ clear_bit(ctx->sdb_idx, dev->sdb_page);
+ else if (ctx->sdb_type == ERDMA_SDB_ENTRY)
+ clear_bit(ctx->sdb_idx, dev->sdb_entry);
+
+ erdma_uctx_user_mmap_entries_remove(ctx);
+
+ spin_unlock(&dev->db_bitmap_lock);
+
+ atomic_dec(&dev->num_ctx);
+}
+
+static int ib_qp_state_to_erdma_qp_state[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = ERDMA_QP_STATE_IDLE,
+ [IB_QPS_INIT] = ERDMA_QP_STATE_IDLE,
+ [IB_QPS_RTR] = ERDMA_QP_STATE_RTR,
+ [IB_QPS_RTS] = ERDMA_QP_STATE_RTS,
+ [IB_QPS_SQD] = ERDMA_QP_STATE_CLOSING,
+ [IB_QPS_SQE] = ERDMA_QP_STATE_TERMINATE,
+ [IB_QPS_ERR] = ERDMA_QP_STATE_ERROR
+};
+
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+ struct erdma_qp_attrs new_attrs;
+ enum erdma_qp_attr_mask erdma_attr_mask = 0;
+ struct erdma_qp *qp = to_eqp(ibqp);
+ int ret = 0;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ memset(&new_attrs, 0, sizeof(new_attrs));
+
+ if (attr_mask & IB_QP_STATE) {
+ new_attrs.state = ib_qp_state_to_erdma_qp_state[attr->qp_state];
+
+ erdma_attr_mask |= ERDMA_QP_ATTR_STATE;
+ }
+
+ down_write(&qp->state_lock);
+
+ ret = erdma_modify_qp_internal(qp, &new_attrs, erdma_attr_mask);
+
+ up_write(&qp->state_lock);
+
+ return ret;
+}
+
+int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct erdma_qp *qp;
+ struct erdma_dev *dev;
+
+ if (ibqp && qp_attr && qp_init_attr) {
+ qp = to_eqp(ibqp);
+ dev = to_edev(ibqp->device);
+ } else {
+ return -EINVAL;
+ }
+
+ qp_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
+ qp_init_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
+
+ qp_attr->cap.max_send_wr = qp->attrs.sq_size;
+ qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
+ qp_attr->cap.max_send_sge = qp->attrs.max_send_sge;
+ qp_attr->cap.max_recv_sge = qp->attrs.max_recv_sge;
+
+ qp_attr->path_mtu = ib_mtu_int_to_enum(dev->netdev->mtu);
+ qp_attr->max_rd_atomic = qp->attrs.irq_size;
+ qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
+
+ qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+ qp_init_attr->cap = qp_attr->cap;
+
+ return 0;
+}
+
+static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
+ struct erdma_ureq_create_cq *ureq)
+{
+ int ret;
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+
+ ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mtt, ureq->qbuf_va,
+ ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
+ 1);
+ if (ret)
+ return ret;
+
+ ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
+ &cq->user_cq.user_dbr_page,
+ &cq->user_cq.db_info_dma_addr);
+ if (ret)
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+
+ return ret;
+}
+
+static int erdma_init_kernel_cq(struct erdma_cq *cq)
+{
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+
+ cq->kern_cq.qbuf =
+ dma_alloc_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
+ &cq->kern_cq.qbuf_dma_addr, GFP_KERNEL);
+ if (!cq->kern_cq.qbuf)
+ return -ENOMEM;
+
+ cq->kern_cq.db_record =
+ (u64 *)(cq->kern_cq.qbuf + (cq->depth << CQE_SHIFT));
+ spin_lock_init(&cq->kern_cq.lock);
+ /* use default cqdb addr */
+ cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET;
+
+ return 0;
+}
+
+int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_dev *dev = to_edev(ibcq->device);
+ unsigned int depth = attr->cqe;
+ int ret;
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+
+ if (depth > dev->attrs.max_cqe)
+ return -EINVAL;
+
+ depth = roundup_pow_of_two(depth);
+ cq->ibcq.cqe = depth;
+ cq->depth = depth;
+ cq->assoc_eqn = attr->comp_vector + 1;
+
+ ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq,
+ XA_LIMIT(1, dev->attrs.max_cq - 1),
+ &dev->next_alloc_cqn, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ if (!rdma_is_kernel_res(&ibcq->res)) {
+ struct erdma_ureq_create_cq ureq;
+ struct erdma_uresp_create_cq uresp;
+
+ ret = ib_copy_from_udata(&ureq, udata,
+ min(udata->inlen, sizeof(ureq)));
+ if (ret)
+ goto err_out_xa;
+
+ ret = erdma_init_user_cq(ctx, cq, &ureq);
+ if (ret)
+ goto err_out_xa;
+
+ uresp.cq_id = cq->cqn;
+ uresp.num_cqe = depth;
+
+ ret = ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen));
+ if (ret)
+ goto err_free_res;
+ } else {
+ ret = erdma_init_kernel_cq(cq);
+ if (ret)
+ goto err_out_xa;
+ }
+
+ ret = create_cq_cmd(dev, cq);
+ if (ret)
+ goto err_free_res;
+
+ return 0;
+
+err_free_res:
+ if (!rdma_is_kernel_res(&ibcq->res)) {
+ erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ } else {
+ dma_free_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(depth << CQE_SHIFT),
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ }
+
+err_out_xa:
+ xa_erase(&dev->cq_xa, cq->cqn);
+
+ return ret;
+}
+
+void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
+{
+ struct ib_event event;
+
+ event.device = &dev->ibdev;
+ event.element.port_num = 1;
+ event.event = reason;
+
+ ib_dispatch_event(&event);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
new file mode 100644
index 000000000000..c7baddb1f292
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_VERBS_H__
+#define __ERDMA_VERBS_H__
+
+#include <linux/errno.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_hw.h"
+
+/* RDMA Capability. */
+#define ERDMA_MAX_PD (128 * 1024)
+#define ERDMA_MAX_SEND_WR 4096
+#define ERDMA_MAX_ORD 128
+#define ERDMA_MAX_IRD 128
+#define ERDMA_MAX_SGE_RD 1
+#define ERDMA_MAX_CONTEXT (128 * 1024)
+#define ERDMA_MAX_SEND_SGE 6
+#define ERDMA_MAX_RECV_SGE 1
+#define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE))
+#define ERDMA_MAX_FRMR_PA 512
+
+enum {
+ ERDMA_MMAP_IO_NC = 0, /* no cache */
+};
+
+struct erdma_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u64 address;
+ u8 mmap_flag;
+};
+
+struct erdma_ucontext {
+ struct ib_ucontext ibucontext;
+
+ u32 sdb_type;
+ u32 sdb_idx;
+ u32 sdb_page_idx;
+ u32 sdb_page_off;
+ u64 sdb;
+ u64 rdb;
+ u64 cdb;
+
+ struct rdma_user_mmap_entry *sq_db_mmap_entry;
+ struct rdma_user_mmap_entry *rq_db_mmap_entry;
+ struct rdma_user_mmap_entry *cq_db_mmap_entry;
+
+ /* doorbell records */
+ struct list_head dbrecords_page_list;
+ struct mutex dbrecords_page_mutex;
+};
+
+struct erdma_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+};
+
+/*
+ * MemoryRegion definition.
+ */
+#define ERDMA_MAX_INLINE_MTT_ENTRIES 4
+#define MTT_SIZE(mtt_cnt) (mtt_cnt << 3) /* per mtt takes 8 Bytes. */
+#define ERDMA_MR_MAX_MTT_CNT 524288
+#define ERDMA_MTT_ENTRY_SIZE 8
+
+#define ERDMA_MR_TYPE_NORMAL 0
+#define ERDMA_MR_TYPE_FRMR 1
+#define ERDMA_MR_TYPE_DMA 2
+
+#define ERDMA_MR_INLINE_MTT 0
+#define ERDMA_MR_INDIRECT_MTT 1
+
+#define ERDMA_MR_ACC_LR BIT(0)
+#define ERDMA_MR_ACC_LW BIT(1)
+#define ERDMA_MR_ACC_RR BIT(2)
+#define ERDMA_MR_ACC_RW BIT(3)
+
+static inline u8 to_erdma_access_flags(int access)
+{
+ return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) |
+ (access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) |
+ (access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0);
+}
+
+struct erdma_mem {
+ struct ib_umem *umem;
+ void *mtt_buf;
+ u32 mtt_type;
+ u32 page_size;
+ u32 page_offset;
+ u32 page_cnt;
+ u32 mtt_nents;
+
+ u64 va;
+ u64 len;
+
+ u64 mtt_entry[ERDMA_MAX_INLINE_MTT_ENTRIES];
+};
+
+struct erdma_mr {
+ struct ib_mr ibmr;
+ struct erdma_mem mem;
+ u8 type;
+ u8 access;
+ u8 valid;
+};
+
+struct erdma_user_dbrecords_page {
+ struct list_head list;
+ struct ib_umem *umem;
+ u64 va;
+ int refcnt;
+};
+
+struct erdma_uqp {
+ struct erdma_mem sq_mtt;
+ struct erdma_mem rq_mtt;
+
+ dma_addr_t sq_db_info_dma_addr;
+ dma_addr_t rq_db_info_dma_addr;
+
+ struct erdma_user_dbrecords_page *user_dbr_page;
+
+ u32 rq_offset;
+};
+
+struct erdma_kqp {
+ u16 sq_pi;
+ u16 sq_ci;
+
+ u16 rq_pi;
+ u16 rq_ci;
+
+ u64 *swr_tbl;
+ u64 *rwr_tbl;
+
+ void __iomem *hw_sq_db;
+ void __iomem *hw_rq_db;
+
+ void *sq_buf;
+ dma_addr_t sq_buf_dma_addr;
+
+ void *rq_buf;
+ dma_addr_t rq_buf_dma_addr;
+
+ void *sq_db_info;
+ void *rq_db_info;
+
+ u8 sig_all;
+};
+
+enum erdma_qp_state {
+ ERDMA_QP_STATE_IDLE = 0,
+ ERDMA_QP_STATE_RTR = 1,
+ ERDMA_QP_STATE_RTS = 2,
+ ERDMA_QP_STATE_CLOSING = 3,
+ ERDMA_QP_STATE_TERMINATE = 4,
+ ERDMA_QP_STATE_ERROR = 5,
+ ERDMA_QP_STATE_UNDEF = 7,
+ ERDMA_QP_STATE_COUNT = 8
+};
+
+enum erdma_qp_attr_mask {
+ ERDMA_QP_ATTR_STATE = (1 << 0),
+ ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
+ ERDMA_QP_ATTR_ORD = (1 << 3),
+ ERDMA_QP_ATTR_IRD = (1 << 4),
+ ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
+ ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
+ ERDMA_QP_ATTR_MPA = (1 << 7)
+};
+
+struct erdma_qp_attrs {
+ enum erdma_qp_state state;
+ enum erdma_cc_alg cc; /* Congestion control algorithm */
+ u32 sq_size;
+ u32 rq_size;
+ u32 orq_size;
+ u32 irq_size;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 cookie;
+#define ERDMA_QP_ACTIVE 0
+#define ERDMA_QP_PASSIVE 1
+ u8 qp_type;
+ u8 pd_len;
+};
+
+struct erdma_qp {
+ struct ib_qp ibqp;
+ struct kref ref;
+ struct completion safe_free;
+ struct erdma_dev *dev;
+ struct erdma_cep *cep;
+ struct rw_semaphore state_lock;
+
+ union {
+ struct erdma_kqp kern_qp;
+ struct erdma_uqp user_qp;
+ };
+
+ struct erdma_cq *scq;
+ struct erdma_cq *rcq;
+
+ struct erdma_qp_attrs attrs;
+ spinlock_t lock;
+};
+
+struct erdma_kcq_info {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+ u32 ci;
+ u32 cmdsn;
+ u32 notify_cnt;
+
+ spinlock_t lock;
+ u8 __iomem *db;
+ u64 *db_record;
+};
+
+struct erdma_ucq_info {
+ struct erdma_mem qbuf_mtt;
+ struct erdma_user_dbrecords_page *user_dbr_page;
+ dma_addr_t db_info_dma_addr;
+};
+
+struct erdma_cq {
+ struct ib_cq ibcq;
+ u32 cqn;
+
+ u32 depth;
+ u32 assoc_eqn;
+
+ union {
+ struct erdma_kcq_info kern_cq;
+ struct erdma_ucq_info user_cq;
+ };
+};
+
+#define QP_ID(qp) ((qp)->ibqp.qp_num)
+
+static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id)
+{
+ return (struct erdma_qp *)xa_load(&dev->qp_xa, id);
+}
+
+static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
+{
+ return (struct erdma_cq *)xa_load(&dev->cq_xa, id);
+}
+
+void erdma_qp_get(struct erdma_qp *qp);
+void erdma_qp_put(struct erdma_qp *qp);
+int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask);
+void erdma_qp_llp_close(struct erdma_qp *qp);
+void erdma_qp_cm_drop(struct erdma_qp *qp);
+
+static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
+{
+ return container_of(ibctx, struct erdma_ucontext, ibucontext);
+}
+
+static inline struct erdma_pd *to_epd(struct ib_pd *pd)
+{
+ return container_of(pd, struct erdma_pd, ibpd);
+}
+
+static inline struct erdma_mr *to_emr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct erdma_mr, ibmr);
+}
+
+static inline struct erdma_qp *to_eqp(struct ib_qp *qp)
+{
+ return container_of(qp, struct erdma_qp, ibqp);
+}
+
+static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct erdma_cq, ibcq);
+}
+
+static inline struct erdma_user_mmap_entry *
+to_emmap(struct rdma_user_mmap_entry *ibmmap)
+{
+ return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry);
+}
+
+int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data);
+void erdma_dealloc_ucontext(struct ib_ucontext *ibctx);
+int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr,
+ struct ib_udata *data);
+int erdma_get_port_immutable(struct ib_device *dev, u32 port,
+ struct ib_port_immutable *ib_port_immutable);
+int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *data);
+int erdma_query_port(struct ib_device *dev, u32 port,
+ struct ib_port_attr *attr);
+int erdma_query_gid(struct ib_device *dev, u32 port, int idx,
+ union ib_gid *gid);
+int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data);
+int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *data);
+int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_qp_init_attr *init_attr);
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *data);
+int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 virt, int access, struct ib_udata *udata);
+struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
+int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
+int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
+void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+void erdma_qp_get_ref(struct ib_qp *ibqp);
+void erdma_qp_put_ref(struct ib_qp *ibqp);
+struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
+int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr);
+int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
+int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
+
+#endif
diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index 6eb739052121..14b92e12bf29 100644
--- a/drivers/infiniband/hw/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HFI1
tristate "Cornelis OPX Gen1 support"
- depends on X86_64 && INFINIBAND_RDMAVT && I2C
+ depends on X86_64 && INFINIBAND_RDMAVT && I2C && !UML
select MMU_NOTIFIER
select CRC32
select I2C_ALGOBIT
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 2e4cf2b11653..629beff053ad 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1179,8 +1179,10 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
goto done;
ret = init_user_ctxt(fd, uctxt);
- if (ret)
+ if (ret) {
+ hfi1_free_ctxt_rcv_groups(uctxt);
goto done;
+ }
user_init(uctxt);
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index d6bbdb8fcb50..5d9a7b09ca37 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -742,9 +742,7 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
kzalloc_node(sizeof(*tx->sdma_hdr),
GFP_KERNEL, priv->dd->node);
- netif_tx_napi_add(dev, &txq->napi,
- hfi1_ipoib_poll_tx_ring,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring);
}
return 0;
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 03b098a494b5..3dfa5aff2512 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -216,7 +216,7 @@ static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
* right now.
*/
set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
- netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
+ netif_napi_add_weight(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
rc = msix_netdev_request_rcd_irq(rxq->rcd);
if (rc)
goto bail_context_irq_failure;
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 136f9a99e1e0..7690f996d5e3 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -172,7 +172,7 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n)
}
/*
- * Read nbytes from "from" and and place them in the low bytes
+ * Read nbytes from "from" and place them in the low bytes
* of pbuf->carry. Other bytes are left as-is. Any previous
* value in pbuf->carry is lost.
*
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
index 707f1053f0b7..582b6f68df3d 100644
--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -26,14 +26,10 @@ DECLARE_EVENT_CLASS(hfi1_trace_template,
TP_PROTO(const char *function, struct va_format *vaf),
TP_ARGS(function, vaf),
TP_STRUCT__entry(__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf
- (__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >=
- MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("(%s) %s",
__get_str(function),
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 2855e9ad4b32..f848eedc6a23 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -959,6 +959,7 @@ struct hns_roce_dev {
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;
+ struct work_struct ecc_work;
const struct hns_roce_dfx_hw *dfx;
u32 func_num;
u32 is_vf;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index ba3c742258ef..cbdafaac678a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -55,6 +55,42 @@ enum {
CMD_RST_PRC_EBUSY,
};
+enum ecc_resource_type {
+ ECC_RESOURCE_QPC,
+ ECC_RESOURCE_CQC,
+ ECC_RESOURCE_MPT,
+ ECC_RESOURCE_SRQC,
+ ECC_RESOURCE_GMV,
+ ECC_RESOURCE_QPC_TIMER,
+ ECC_RESOURCE_CQC_TIMER,
+ ECC_RESOURCE_SCCC,
+ ECC_RESOURCE_COUNT,
+};
+
+static const struct {
+ const char *name;
+ u8 read_bt0_op;
+ u8 write_bt0_op;
+} fmea_ram_res[] = {
+ { "ECC_RESOURCE_QPC",
+ HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 },
+ { "ECC_RESOURCE_CQC",
+ HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 },
+ { "ECC_RESOURCE_MPT",
+ HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 },
+ { "ECC_RESOURCE_SRQC",
+ HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 },
+ /* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */
+ { "ECC_RESOURCE_GMV",
+ 0, 0 },
+ { "ECC_RESOURCE_QPC_TIMER",
+ HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 },
+ { "ECC_RESOURCE_CQC_TIMER",
+ HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 },
+ { "ECC_RESOURCE_SCCC",
+ HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 },
+};
+
static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
struct ib_sge *sg)
{
@@ -5855,12 +5891,12 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
}
-static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
+static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
{
struct device *dev = hr_dev->dev;
struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
- int aeqe_found = 0;
+ irqreturn_t aeqe_found = IRQ_NONE;
int event_type;
u32 queue_num;
int sub_type;
@@ -5914,7 +5950,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
eq->event_type = event_type;
eq->sub_type = sub_type;
++eq->cons_index;
- aeqe_found = 1;
+ aeqe_found = IRQ_HANDLED;
hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
@@ -5922,7 +5958,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
}
update_eq_db(eq);
- return aeqe_found;
+
+ return IRQ_RETVAL(aeqe_found);
}
static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
@@ -5937,11 +5974,11 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
}
-static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
+static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
{
struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
- int ceqe_found = 0;
+ irqreturn_t ceqe_found = IRQ_NONE;
u32 cqn;
while (ceqe) {
@@ -5955,21 +5992,21 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
hns_roce_cq_completion(hr_dev, cqn);
++eq->cons_index;
- ceqe_found = 1;
+ ceqe_found = IRQ_HANDLED;
ceqe = next_ceqe_sw_v2(eq);
}
update_eq_db(eq);
- return ceqe_found;
+ return IRQ_RETVAL(ceqe_found);
}
static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
{
struct hns_roce_eq *eq = eq_ptr;
struct hns_roce_dev *hr_dev = eq->hr_dev;
- int int_work;
+ irqreturn_t int_work;
if (eq->type_flag == HNS_ROCE_CEQ)
/* Completion event interrupt */
@@ -5981,27 +6018,22 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
return IRQ_RETVAL(int_work);
}
-static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
+ u32 int_st)
{
- struct hns_roce_dev *hr_dev = dev_id;
- struct device *dev = hr_dev->dev;
- int int_work = 0;
- u32 int_st;
+ struct pci_dev *pdev = hr_dev->pci_dev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ const struct hnae3_ae_ops *ops = ae_dev->ops;
+ irqreturn_t int_work = IRQ_NONE;
u32 int_en;
- /* Abnormal interrupt */
- int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
- struct pci_dev *pdev = hr_dev->pci_dev;
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- const struct hnae3_ae_ops *ops = ae_dev->ops;
+ dev_err(hr_dev->dev, "AEQ overflow!\n");
- dev_err(dev, "AEQ overflow!\n");
-
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
+ 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
/* Set reset level for reset_event() */
if (ops->set_default_reset_request)
@@ -6013,19 +6045,165 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
- int_work = 1;
- } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) {
- dev_err(dev, "RAS interrupt!\n");
+ int_work = IRQ_HANDLED;
+ } else {
+ dev_err(hr_dev->dev, "there is no basic abn irq found.\n");
+ }
+
+ return IRQ_RETVAL(int_work);
+}
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+static int fmea_ram_ecc_query(struct hns_roce_dev *hr_dev,
+ struct fmea_ram_ecc *ecc_info)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ int ret;
- int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_QUERY_RAM_ECC, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR);
+ ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE);
+ ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG);
+
+ return 0;
+}
+
+static int fmea_recover_gmv(struct hns_roce_dev *hr_dev, u32 idx)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ u32 addr_upper;
+ u32 addr_low;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, true);
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
+
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to read gmv, ret = %d.\n", ret);
+ return ret;
+ }
+
+ addr_low = hr_reg_read(req, CFG_GMV_BT_BA_L);
+ addr_upper = hr_reg_read(req, CFG_GMV_BT_BA_H);
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
+ hr_reg_write(req, CFG_GMV_BT_BA_L, addr_low);
+ hr_reg_write(req, CFG_GMV_BT_BA_H, addr_upper);
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
+{
+ if (res_type == ECC_RESOURCE_QPC_TIMER ||
+ res_type == ECC_RESOURCE_CQC_TIMER ||
+ res_type == ECC_RESOURCE_SCCC)
+ return le64_to_cpu(*data);
+
+ return le64_to_cpu(*data) << PAGE_SHIFT;
+}
- int_work = 1;
+static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
+ u32 index)
+{
+ u8 write_bt0_op = fmea_ram_res[res_type].write_bt0_op;
+ u8 read_bt0_op = fmea_ram_res[res_type].read_bt0_op;
+ struct hns_roce_cmd_mailbox *mailbox;
+ u64 addr;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to read fmea ram, ret = %d.\n",
+ ret);
+ goto out;
+ }
+
+ addr = fmea_get_ram_res_addr(res_type, mailbox->buf);
+
+ ret = hns_roce_cmd_mbox(hr_dev, addr, 0, write_bt0_op, index);
+ if (ret)
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to write fmea ram, ret = %d.\n",
+ ret);
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static void fmea_ram_ecc_recover(struct hns_roce_dev *hr_dev,
+ struct fmea_ram_ecc *ecc_info)
+{
+ u32 res_type = ecc_info->res_type;
+ u32 index = ecc_info->index;
+ int ret;
+
+ BUILD_BUG_ON(ARRAY_SIZE(fmea_ram_res) != ECC_RESOURCE_COUNT);
+
+ if (res_type >= ECC_RESOURCE_COUNT) {
+ dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n",
+ res_type);
+ return;
+ }
+
+ if (res_type == ECC_RESOURCE_GMV)
+ ret = fmea_recover_gmv(hr_dev, index);
+ else
+ ret = fmea_recover_others(hr_dev, res_type, index);
+ if (ret)
+ dev_err(hr_dev->dev,
+ "failed to recover %s, index = %u, ret = %d.\n",
+ fmea_ram_res[res_type].name, index, ret);
+}
+
+static void fmea_ram_ecc_work(struct work_struct *ecc_work)
+{
+ struct hns_roce_dev *hr_dev =
+ container_of(ecc_work, struct hns_roce_dev, ecc_work);
+ struct fmea_ram_ecc ecc_info = {};
+
+ if (fmea_ram_ecc_query(hr_dev, &ecc_info)) {
+ dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n");
+ return;
+ }
+
+ if (!ecc_info.is_ecc_err) {
+ dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n");
+ return;
+ }
+
+ fmea_ram_ecc_recover(hr_dev, &ecc_info);
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ irqreturn_t int_work = IRQ_NONE;
+ u32 int_st;
+
+ int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
+
+ if (int_st) {
+ int_work = abnormal_interrupt_basic(hr_dev, int_st);
+ } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ queue_work(hr_dev->irq_workq, &hr_dev->ecc_work);
+ int_work = IRQ_HANDLED;
} else {
- dev_err(dev, "There is no abnormal irq found!\n");
+ dev_err(hr_dev->dev, "there is no abnormal irq found.\n");
}
return IRQ_RETVAL(int_work);
@@ -6342,6 +6520,8 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
}
}
+ INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work);
+
hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
if (!hr_dev->irq_workq) {
dev_err(dev, "failed to create irq workqueue.\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 7ffb7824d268..f96debac30fe 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -250,6 +250,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
HNS_ROCE_OPC_EXT_CFG = 0x8512,
+ HNS_ROCE_QUERY_RAM_ECC = 0x8513,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
};
@@ -1107,6 +1108,11 @@ enum {
#define CFG_GMV_BT_BA_H CMQ_REQ_FIELD_LOC(51, 32)
#define CFG_GMV_BT_IDX CMQ_REQ_FIELD_LOC(95, 64)
+/* Fields of HNS_ROCE_QUERY_RAM_ECC */
+#define QUERY_RAM_ECC_1BIT_ERR CMQ_REQ_FIELD_LOC(31, 0)
+#define QUERY_RAM_ECC_RES_TYPE CMQ_REQ_FIELD_LOC(63, 32)
+#define QUERY_RAM_ECC_TAG CMQ_REQ_FIELD_LOC(95, 64)
+
struct hns_roce_cfg_sgid_tb {
__le32 table_idx_rsv;
__le32 vf_sgid_l;
@@ -1343,6 +1349,12 @@ struct hns_roce_dip {
struct list_head node; /* all dips are on a list */
};
+struct fmea_ram_ecc {
+ u32 is_ecc_err;
+ u32 res_type;
+ u32 index;
+};
+
/* only for RNR timeout issue of HIP08 */
#define HNS_ROCE_CLOCK_ADJUST 1000
#define HNS_ROCE_MAX_CQ_PERIOD 65
@@ -1382,7 +1394,6 @@ struct hns_roce_dip {
#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
-#define HNS_ROCE_V2_VF_INT_ST_RAS_INT_S 1
#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 646fa8677490..7b086fe63a24 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1477,12 +1477,13 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
list_for_each_entry (listen_node, &cm_core->listen_list, list) {
memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
listen_port = listen_node->loc_port;
+ if (listen_port != dst_port ||
+ !(listener_state & listen_node->listener_state))
+ continue;
/* compare node pair, return node handle if a match */
- if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
- !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
- listen_port == dst_port &&
- vlan_id == listen_node->vlan_id &&
- (listener_state & listen_node->listener_state)) {
+ if (!memcmp(listen_addr, ip_zero, sizeof(listen_addr)) ||
+ (!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) &&
+ vlan_id == listen_node->vlan_id)) {
refcount_inc(&listen_node->refcnt);
spin_unlock_irqrestore(&cm_core->listen_list_lock,
flags);
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 58c0e181ca2b..a41e0d21143a 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -4872,10 +4872,12 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
sd_diff = sd_needed - hmc_fpm_misc->max_sds;
if (sd_diff > 128) {
- if (qpwanted > 128 && sd_diff > 144)
+ if (!(loop_count % 2) && qpwanted > 128) {
qpwanted /= 2;
- mrwanted /= 2;
- pblewanted /= 2;
+ } else {
+ mrwanted /= 2;
+ pblewanted /= 2;
+ }
continue;
}
if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index dd3943d22dc6..4f132c6fb653 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -257,10 +257,6 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
iwqp->last_aeq = info->ae_id;
spin_unlock_irqrestore(&iwqp->lock, flags);
ctx_info = &iwqp->ctx_info;
- if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1))
- ctx_info->roce_info->err_rq_idx_valid = true;
- else
- ctx_info->iwarp_info->err_rq_idx_valid = true;
} else {
if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
continue;
@@ -370,16 +366,12 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- if (rdma_protocol_roce(&iwdev->ibdev, 1))
- ctx_info->roce_info->err_rq_idx_valid = false;
- else
- ctx_info->iwarp_info->err_rq_idx_valid = false;
- fallthrough;
default:
- ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d\n",
- info->ae_id, info->qp, info->qp_cq_id);
+ ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
+ info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
- if (!info->sq && ctx_info->roce_info->err_rq_idx_valid) {
+ ctx_info->roce_info->err_rq_idx_valid = info->rq;
+ if (info->rq) {
ctx_info->roce_info->err_rq_idx = info->wqe_idx;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
ctx_info);
@@ -388,7 +380,8 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
irdma_cm_disconn(iwqp);
break;
}
- if (!info->sq && ctx_info->iwarp_info->err_rq_idx_valid) {
+ ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
+ if (info->rq) {
ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
ctx_info->tcp_info_valid = false;
ctx_info->iwarp_info_valid = true;
@@ -1512,10 +1505,7 @@ static int irdma_hmc_setup(struct irdma_pci_f *rf)
int status;
u32 qpcnt;
- if (rf->rdma_ver == IRDMA_GEN_1)
- qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2;
- else
- qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
+ qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
rf->sd_type = IRDMA_SD_TYPE_DIRECT;
status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
@@ -1543,7 +1533,7 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
rf->obj_mem.pa);
rf->obj_mem.va = NULL;
if (rf->rdma_ver != IRDMA_GEN_1) {
- kfree(rf->allocated_ws_nodes);
+ bitmap_free(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
}
kfree(rf->ceqlist);
@@ -1972,9 +1962,8 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
u32 ret;
if (rf->rdma_ver != IRDMA_GEN_1) {
- rf->allocated_ws_nodes =
- kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES),
- sizeof(unsigned long), GFP_KERNEL);
+ rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
+ GFP_KERNEL);
if (!rf->allocated_ws_nodes)
return -ENOMEM;
@@ -2023,7 +2012,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
return 0;
mem_rsrc_kzalloc_fail:
- kfree(rf->allocated_ws_nodes);
+ bitmap_free(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
return ret;
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index ef862bced20f..65e966ad3453 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -85,7 +85,7 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
#define IRDMA_NO_QSET 0xffff
#define IW_CFG_FPM_QP_COUNT 32768
-#define IRDMA_MAX_PAGES_PER_FMR 512
+#define IRDMA_MAX_PAGES_PER_FMR 262144
#define IRDMA_MIN_PAGES_PER_FMR 1
#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index ab3c5208a123..fdf4cc88cb91 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -652,6 +652,7 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
};
static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
+ {0xffff, 0x8002, "Invalid State"},
{0xffff, 0x8006, "Flush No Wqe Pending"},
{0xffff, 0x8007, "Modify QP Bad Close"},
{0xffff, 0x8009, "LLP Closed"},
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 96135a228f26..9b07b8af2997 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -1776,11 +1776,11 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
spin_unlock_irqrestore(&iwcq->lock, flags);
irdma_cq_wq_destroy(iwdev->rf, cq);
- irdma_cq_free_rsrc(iwdev->rf, iwcq);
spin_lock_irqsave(&iwceq->ce_lock, flags);
irdma_sc_cleanup_ceqes(cq, ceq);
spin_unlock_irqrestore(&iwceq->ce_lock, flags);
+ irdma_cq_free_rsrc(iwdev->rf, iwcq);
return 0;
}
@@ -2605,7 +2605,7 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
palloc = &iwpbl->pble_alloc;
iwmr->page_cnt = max_num_sg;
err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
- true);
+ false);
if (err_code)
goto err_get_pble;
@@ -2641,8 +2641,16 @@ static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
if (unlikely(iwmr->npages == iwmr->page_cnt))
return -ENOMEM;
- pbl = palloc->level1.addr;
- pbl[iwmr->npages++] = addr;
+ if (palloc->level == PBLE_LEVEL_2) {
+ struct irdma_pble_info *palloc_info =
+ palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
+
+ palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
+ } else {
+ pbl = palloc->level1.addr;
+ pbl[iwmr->npages] = addr;
+ }
+ iwmr->npages++;
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 08371a80fdc2..be189e0525de 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -523,6 +523,10 @@ repoll:
"Requestor" : "Responder", cq->mcq.cqn);
mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
err_cqe->syndrome, err_cqe->vendor_err_synd);
+ if (wc->status != IB_WC_WR_FLUSH_ERR &&
+ (*cur_qp)->type == MLX5_IB_QPT_REG_UMR)
+ dev->umrc.state = MLX5_UMR_STATE_RECOVER;
+
if (opcode == MLX5_CQE_REQ_ERR) {
wq = &(*cur_qp)->sq;
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 39ffb363ba0c..490ec308e309 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -679,7 +679,15 @@ enum flow_table_type {
#define MLX5_FS_MAX_TYPES 6
#define MLX5_FS_MAX_ENTRIES BIT(16)
-static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
+static bool mlx5_ib_shared_ft_allowed(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return MLX5_CAP_GEN(dev->mdev, shared_object_to_user_object_allowed);
+}
+
+static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_namespace *ns,
struct mlx5_ib_flow_prio *prio,
int priority,
int num_entries, int num_groups,
@@ -688,6 +696,8 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
+ if (mlx5_ib_shared_ft_allowed(&dev->ib_dev))
+ ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
ft_attr.prio = priority;
ft_attr.max_fte = num_entries;
ft_attr.flags = flags;
@@ -784,8 +794,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = prio->flow_table;
if (!ft)
- return _get_prio(ns, prio, priority, max_table_size, num_groups,
- flags);
+ return _get_prio(dev, ns, prio, priority, max_table_size,
+ num_groups, flags);
return prio;
}
@@ -927,7 +937,7 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
prio = &dev->flow_db->opfcs[type];
if (!prio->flow_table) {
- prio = _get_prio(ns, prio, priority,
+ prio = _get_prio(dev, ns, prio, priority,
dev->num_ports * MAX_OPFC_RULES, 1, 0);
if (IS_ERR(prio)) {
err = PTR_ERR(prio);
@@ -1407,8 +1417,8 @@ free_ucmd:
}
static struct mlx5_ib_flow_prio *
-_get_flow_table(struct mlx5_ib_dev *dev,
- struct mlx5_ib_flow_matcher *fs_matcher,
+_get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
+ enum mlx5_flow_namespace_type ns_type,
bool mcast)
{
struct mlx5_flow_namespace *ns = NULL;
@@ -1421,11 +1431,11 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (mcast)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
- priority = ib_prio_to_core_prio(fs_matcher->priority, false);
+ priority = ib_prio_to_core_prio(user_priority, false);
esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- switch (fs_matcher->ns_type) {
+ switch (ns_type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size));
@@ -1452,17 +1462,17 @@ _get_flow_table(struct mlx5_ib_dev *dev,
reformat_l3_tunnel_to_l2) &&
esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- priority = fs_matcher->priority;
+ priority = user_priority;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX:
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size));
- priority = fs_matcher->priority;
+ priority = user_priority;
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size));
- priority = fs_matcher->priority;
+ priority = user_priority;
break;
default:
break;
@@ -1470,11 +1480,11 @@ _get_flow_table(struct mlx5_ib_dev *dev,
max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
- ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
+ ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
if (!ns)
return ERR_PTR(-EOPNOTSUPP);
- switch (fs_matcher->ns_type) {
+ switch (ns_type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
prio = &dev->flow_db->prios[priority];
break;
@@ -1499,7 +1509,7 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (prio->flow_table)
return prio;
- return _get_prio(ns, prio, priority, max_table_size,
+ return _get_prio(dev, ns, prio, priority, max_table_size,
MLX5_FS_MAX_TYPES, flags);
}
@@ -1618,7 +1628,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
mutex_lock(&dev->flow_db->lock);
- ft_prio = _get_flow_table(dev, fs_matcher, mcast);
+ ft_prio = _get_flow_table(dev, fs_matcher->priority,
+ fs_matcher->ns_type, mcast);
if (IS_ERR(ft_prio)) {
err = PTR_ERR(ft_prio);
goto unlock;
@@ -2015,6 +2026,23 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject,
return 0;
}
+static int steering_anchor_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_steering_anchor *obj = uobject->object;
+
+ if (atomic_read(&obj->usecnt))
+ return -EBUSY;
+
+ mutex_lock(&obj->dev->flow_db->lock);
+ put_flow_table(obj->dev, obj->ft_prio, true);
+ mutex_unlock(&obj->dev->flow_db->lock);
+
+ kfree(obj);
+ return 0;
+}
+
static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
struct mlx5_ib_flow_matcher *obj)
{
@@ -2050,12 +2078,10 @@ static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
if (err)
return err;
- if (flags) {
- mlx5_ib_ft_type_to_namespace(
+ if (flags)
+ return mlx5_ib_ft_type_to_namespace(
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
&obj->ns_type);
- return 0;
- }
}
obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
@@ -2121,6 +2147,75 @@ end:
return err;
}
+static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE);
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ enum mlx5_ib_uapi_flow_table_type ib_uapi_ft_type;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_ib_steering_anchor *obj;
+ struct mlx5_ib_flow_prio *ft_prio;
+ u16 priority;
+ u32 ft_id;
+ int err;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ err = uverbs_get_const(&ib_uapi_ft_type, attrs,
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE);
+ if (err)
+ return err;
+
+ err = mlx5_ib_ft_type_to_namespace(ib_uapi_ft_type, &ns_type);
+ if (err)
+ return err;
+
+ err = uverbs_copy_from(&priority, attrs,
+ MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY);
+ if (err)
+ return err;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ mutex_lock(&dev->flow_db->lock);
+ ft_prio = _get_flow_table(dev, priority, ns_type, 0);
+ if (IS_ERR(ft_prio)) {
+ mutex_unlock(&dev->flow_db->lock);
+ err = PTR_ERR(ft_prio);
+ goto free_obj;
+ }
+
+ ft_prio->refcount++;
+ ft_id = mlx5_flow_table_id(ft_prio->flow_table);
+ mutex_unlock(&dev->flow_db->lock);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ &ft_id, sizeof(ft_id));
+ if (err)
+ goto put_flow_table;
+
+ uobj->object = obj;
+ obj->dev = dev;
+ obj->ft_prio = ft_prio;
+ atomic_set(&obj->usecnt, 0);
+
+ return 0;
+
+put_flow_table:
+ mutex_lock(&dev->flow_db->lock);
+ put_flow_table(dev, ft_prio, true);
+ mutex_unlock(&dev->flow_db->lock);
+free_obj:
+ kfree(obj);
+
+ return err;
+}
+
static struct ib_flow_action *
mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
enum mlx5_ib_uapi_flow_table_type ft_type,
@@ -2477,6 +2572,35 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
&UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
&UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_STEERING_ANCHOR_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_TYPE_ALLOC_IDR(steering_anchor_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY));
+
const struct uapi_definition mlx5_ib_flow_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_FLOW_MATCHER),
@@ -2485,6 +2609,9 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
&mlx5_ib_fs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_actions),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)),
{},
};
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b68fddeac0f1..fc94a1b25485 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2738,26 +2738,24 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
- dev->port_caps[port - 1].has_smi = false;
- if (MLX5_CAP_GEN(dev->mdev, port_type) ==
- MLX5_CAP_PORT_TYPE_IB) {
- if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
- err = mlx5_query_hca_vport_context(dev->mdev, 0,
- port, 0,
- &vport_ctx);
- if (err) {
- mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
- port, err);
- return err;
- }
- dev->port_caps[port - 1].has_smi =
- vport_ctx.has_smi;
- } else {
- dev->port_caps[port - 1].has_smi = true;
- }
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ return 0;
+
+ for (port = 1; port <= dev->num_ports; port++) {
+ if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
+ dev->port_caps[port - 1].has_smi = true;
+ continue;
}
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
+ &vport_ctx);
+ if (err) {
+ mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
+ port, err);
+ return err;
+ }
+ dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
}
+
return 0;
}
@@ -4002,7 +4000,7 @@ static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{
int err;
- err = mlx5_mr_cache_cleanup(dev);
+ err = mlx5_mkey_cache_cleanup(dev);
if (err)
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
@@ -4022,7 +4020,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
if (ret)
return ret;
- ret = mlx5_mr_cache_init(dev);
+ ret = mlx5_mkey_cache_init(dev);
if (ret) {
mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
mlx5r_umr_resource_cleanup(dev);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 998b67509a53..2e2ad3918385 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -259,6 +259,12 @@ struct mlx5_ib_flow_matcher {
u8 match_criteria_enable;
};
+struct mlx5_ib_steering_anchor {
+ struct mlx5_ib_flow_prio *ft_prio;
+ struct mlx5_ib_dev *dev;
+ atomic_t usecnt;
+};
+
struct mlx5_ib_pp {
u16 index;
struct mlx5_core_dev *mdev;
@@ -613,6 +619,7 @@ struct mlx5_ib_mkey {
unsigned int ndescs;
struct wait_queue_head wait;
refcount_t usecount;
+ struct mlx5_cache_ent *cache_ent;
};
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
@@ -635,20 +642,9 @@ struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_ib_mkey mmkey;
- /* User MR data */
- struct mlx5_cache_ent *cache_ent;
- /* Everything after cache_ent is zero'd when MR allocated */
struct ib_umem *umem;
union {
- /* Used only while the MR is in the cache */
- struct {
- u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
- struct mlx5_async_work cb_work;
- /* Cache list element */
- struct list_head list;
- };
-
/* Used only by kernel MRs (umem == NULL) */
struct {
void *descs;
@@ -688,12 +684,6 @@ struct mlx5_ib_mr {
};
};
-/* Zero the fields in the mr that are variant depending on usage */
-static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
-{
- memset_after(mr, 0, cache_ent);
-}
-
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
{
return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
@@ -717,21 +707,29 @@ struct mlx5_ib_umr_context {
struct completion done;
};
+enum {
+ MLX5_UMR_STATE_ACTIVE,
+ MLX5_UMR_STATE_RECOVER,
+ MLX5_UMR_STATE_ERR,
+};
+
struct umr_common {
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
- /* control access to UMR QP
+ /* Protects from UMR QP overflow
*/
struct semaphore sem;
+ /* Protects from using UMR while the UMR is not active
+ */
+ struct mutex lock;
+ unsigned int state;
};
struct mlx5_cache_ent {
- struct list_head head;
- /* sync access to the cahce entry
- */
- spinlock_t lock;
-
+ struct xarray mkeys;
+ unsigned long stored;
+ unsigned long reserved;
char name[4];
u32 order;
@@ -743,18 +741,11 @@ struct mlx5_cache_ent {
u8 fill_to_high_water:1;
/*
- * - available_mrs is the length of list head, ie the number of MRs
- * available for immediate allocation.
- * - total_mrs is available_mrs plus all in use MRs that could be
- * returned to the cache.
- * - limit is the low water mark for available_mrs, 2* limit is the
+ * - limit is the low water mark for stored mkeys, 2* limit is the
* upper water mark.
- * - pending is the number of MRs currently being created
*/
- u32 total_mrs;
- u32 available_mrs;
+ u32 in_use;
u32 limit;
- u32 pending;
/* Statistics */
u32 miss;
@@ -763,9 +754,19 @@ struct mlx5_cache_ent {
struct delayed_work dwork;
};
-struct mlx5_mr_cache {
+struct mlx5r_async_create_mkey {
+ union {
+ u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
+ u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
+ };
+ struct mlx5_async_work cb_work;
+ struct mlx5_cache_ent *ent;
+ u32 mkey;
+};
+
+struct mlx5_mkey_cache {
struct workqueue_struct *wq;
- struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
+ struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES];
struct dentry *root;
unsigned long last_add;
};
@@ -1064,7 +1065,7 @@ struct mlx5_ib_dev {
struct mlx5_ib_resources devr;
atomic_t mkey_var;
- struct mlx5_mr_cache cache;
+ struct mlx5_mkey_cache cache;
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
struct mutex slow_path_mutex;
@@ -1309,8 +1310,8 @@ void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
u64 access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
-int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
-int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
+int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
struct mlx5_cache_ent *ent,
@@ -1338,7 +1339,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
+void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent);
void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags);
@@ -1357,7 +1358,7 @@ static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
-static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
+static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {}
static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags) {}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index aedfd7ff4846..129d531bd01b 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -82,15 +82,14 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
MLX5_SET64(mkc, mkc, start_addr, start_addr);
}
-static void assign_mkey_variant(struct mlx5_ib_dev *dev,
- struct mlx5_ib_mkey *mkey, u32 *in)
+static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in)
{
u8 key = atomic_inc_return(&dev->mkey_var);
void *mkc;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, mkey_7_0, key);
- mkey->key = key;
+ *mkey = key;
}
static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
@@ -98,7 +97,7 @@ static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
{
int ret;
- assign_mkey_variant(dev, mkey, in);
+ assign_mkey_variant(dev, &mkey->key, in);
ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
if (!ret)
init_waitqueue_head(&mkey->wait);
@@ -106,20 +105,21 @@ static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
return ret;
}
-static int
-mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
- struct mlx5_ib_mkey *mkey,
- struct mlx5_async_ctx *async_ctx,
- u32 *in, int inlen, u32 *out, int outlen,
- struct mlx5_async_work *context)
+static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
{
- MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
- assign_mkey_variant(dev, mkey, in);
- return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
- create_mkey_callback, context);
+ struct mlx5_ib_dev *dev = async_create->ent->dev;
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ size_t outlen = MLX5_ST_SZ_BYTES(create_mkey_out);
+
+ MLX5_SET(create_mkey_in, async_create->in, opcode,
+ MLX5_CMD_OP_CREATE_MKEY);
+ assign_mkey_variant(dev, &async_create->mkey, async_create->in);
+ return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen,
+ async_create->out, outlen, create_mkey_callback,
+ &async_create->cb_work);
}
-static int mr_cache_max_order(struct mlx5_ib_dev *dev);
+static int mkey_cache_max_order(struct mlx5_ib_dev *dev);
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
@@ -142,40 +142,132 @@ static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
}
+
+static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
+ void *to_store)
+{
+ XA_STATE(xas, &ent->mkeys, 0);
+ void *curr;
+
+ xa_lock_irq(&ent->mkeys);
+ if (limit_pendings &&
+ (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) {
+ xa_unlock_irq(&ent->mkeys);
+ return -EAGAIN;
+ }
+ while (1) {
+ /*
+ * This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version
+ * doesn't transparently unlock. Instead we set the xas index to
+ * the current value of reserved every iteration.
+ */
+ xas_set(&xas, ent->reserved);
+ curr = xas_load(&xas);
+ if (!curr) {
+ if (to_store && ent->stored == ent->reserved)
+ xas_store(&xas, to_store);
+ else
+ xas_store(&xas, XA_ZERO_ENTRY);
+ if (xas_valid(&xas)) {
+ ent->reserved++;
+ if (to_store) {
+ if (ent->stored != ent->reserved)
+ __xa_store(&ent->mkeys,
+ ent->stored,
+ to_store,
+ GFP_KERNEL);
+ ent->stored++;
+ queue_adjust_cache_locked(ent);
+ WRITE_ONCE(ent->dev->cache.last_add,
+ jiffies);
+ }
+ }
+ }
+ xa_unlock_irq(&ent->mkeys);
+
+ /*
+ * Notice xas_nomem() must always be called as it cleans
+ * up any cached allocation.
+ */
+ if (!xas_nomem(&xas, GFP_KERNEL))
+ break;
+ xa_lock_irq(&ent->mkeys);
+ }
+ if (xas_error(&xas))
+ return xas_error(&xas);
+ if (WARN_ON(curr))
+ return -EINVAL;
+ return 0;
+}
+
+static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent)
+{
+ void *old;
+
+ ent->reserved--;
+ old = __xa_erase(&ent->mkeys, ent->reserved);
+ WARN_ON(old);
+}
+
+static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey)
+{
+ void *old;
+
+ old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0);
+ WARN_ON(old);
+ ent->stored++;
+}
+
+static u32 pop_stored_mkey(struct mlx5_cache_ent *ent)
+{
+ void *old, *xa_mkey;
+
+ ent->stored--;
+ ent->reserved--;
+
+ if (ent->stored == ent->reserved) {
+ xa_mkey = __xa_erase(&ent->mkeys, ent->stored);
+ WARN_ON(!xa_mkey);
+ return (u32)xa_to_value(xa_mkey);
+ }
+
+ xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ WARN_ON(!xa_mkey || xa_is_err(xa_mkey));
+ old = __xa_erase(&ent->mkeys, ent->reserved);
+ WARN_ON(old);
+ return (u32)xa_to_value(xa_mkey);
+}
+
static void create_mkey_callback(int status, struct mlx5_async_work *context)
{
- struct mlx5_ib_mr *mr =
- container_of(context, struct mlx5_ib_mr, cb_work);
- struct mlx5_cache_ent *ent = mr->cache_ent;
+ struct mlx5r_async_create_mkey *mkey_out =
+ container_of(context, struct mlx5r_async_create_mkey, cb_work);
+ struct mlx5_cache_ent *ent = mkey_out->ent;
struct mlx5_ib_dev *dev = ent->dev;
unsigned long flags;
if (status) {
- create_mkey_warn(dev, status, mr->out);
- kfree(mr);
- spin_lock_irqsave(&ent->lock, flags);
- ent->pending--;
+ create_mkey_warn(dev, status, mkey_out->out);
+ kfree(mkey_out);
+ xa_lock_irqsave(&ent->mkeys, flags);
+ undo_push_reserve_mkey(ent);
WRITE_ONCE(dev->fill_delay, 1);
- spin_unlock_irqrestore(&ent->lock, flags);
+ xa_unlock_irqrestore(&ent->mkeys, flags);
mod_timer(&dev->delay_timer, jiffies + HZ);
return;
}
- mr->mmkey.type = MLX5_MKEY_MR;
- mr->mmkey.key |= mlx5_idx_to_mkey(
- MLX5_GET(create_mkey_out, mr->out, mkey_index));
- init_waitqueue_head(&mr->mmkey.wait);
-
+ mkey_out->mkey |= mlx5_idx_to_mkey(
+ MLX5_GET(create_mkey_out, mkey_out->out, mkey_index));
WRITE_ONCE(dev->cache.last_add, jiffies);
- spin_lock_irqsave(&ent->lock, flags);
- list_add_tail(&mr->list, &ent->head);
- ent->available_mrs++;
- ent->total_mrs++;
+ xa_lock_irqsave(&ent->mkeys, flags);
+ push_to_reserved(ent, mkey_out->mkey);
/* If we are doing fill_to_high_water then keep going. */
queue_adjust_cache_locked(ent);
- ent->pending--;
- spin_unlock_irqrestore(&ent->lock, flags);
+ xa_unlock_irqrestore(&ent->mkeys, flags);
+ kfree(mkey_out);
}
static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
@@ -197,15 +289,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
return ret;
}
-static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
+static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
{
- struct mlx5_ib_mr *mr;
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr)
- return NULL;
- mr->cache_ent = ent;
-
set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
@@ -215,133 +300,106 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, translations_octword_size,
get_mkc_octo_size(ent->access_mode, ent->ndescs));
MLX5_SET(mkc, mkc, log_page_size, ent->page);
- return mr;
}
/* Asynchronously schedule new MRs to be populated in the cache. */
static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
{
- size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_ib_mr *mr;
+ struct mlx5r_async_create_mkey *async_create;
void *mkc;
- u32 *in;
int err = 0;
int i;
- in = kzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
for (i = 0; i < num; i++) {
- mr = alloc_cache_mr(ent, mkc);
- if (!mr) {
- err = -ENOMEM;
- break;
- }
- spin_lock_irq(&ent->lock);
- if (ent->pending >= MAX_PENDING_REG_MR) {
- err = -EAGAIN;
- spin_unlock_irq(&ent->lock);
- kfree(mr);
- break;
- }
- ent->pending++;
- spin_unlock_irq(&ent->lock);
- err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
- &ent->dev->async_ctx, in, inlen,
- mr->out, sizeof(mr->out),
- &mr->cb_work);
+ async_create = kzalloc(sizeof(struct mlx5r_async_create_mkey),
+ GFP_KERNEL);
+ if (!async_create)
+ return -ENOMEM;
+ mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in,
+ memory_key_mkey_entry);
+ set_cache_mkc(ent, mkc);
+ async_create->ent = ent;
+
+ err = push_mkey(ent, true, NULL);
+ if (err)
+ goto free_async_create;
+
+ err = mlx5_ib_create_mkey_cb(async_create);
if (err) {
- spin_lock_irq(&ent->lock);
- ent->pending--;
- spin_unlock_irq(&ent->lock);
mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
- kfree(mr);
- break;
+ goto err_undo_reserve;
}
}
- kfree(in);
+ return 0;
+
+err_undo_reserve:
+ xa_lock_irq(&ent->mkeys);
+ undo_push_reserve_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+free_async_create:
+ kfree(async_create);
return err;
}
/* Synchronously create a MR in the cache */
-static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
+static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey)
{
size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_ib_mr *mr;
void *mkc;
u32 *in;
int err;
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ set_cache_mkc(ent, mkc);
- mr = alloc_cache_mr(ent, mkc);
- if (!mr) {
- err = -ENOMEM;
- goto free_in;
- }
-
- err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen);
+ err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen);
if (err)
- goto free_mr;
+ goto free_in;
- init_waitqueue_head(&mr->mmkey.wait);
- mr->mmkey.type = MLX5_MKEY_MR;
WRITE_ONCE(ent->dev->cache.last_add, jiffies);
- spin_lock_irq(&ent->lock);
- ent->total_mrs++;
- spin_unlock_irq(&ent->lock);
- kfree(in);
- return mr;
-free_mr:
- kfree(mr);
free_in:
kfree(in);
- return ERR_PTR(err);
+ return err;
}
static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
{
- struct mlx5_ib_mr *mr;
+ u32 mkey;
- lockdep_assert_held(&ent->lock);
- if (list_empty(&ent->head))
+ lockdep_assert_held(&ent->mkeys.xa_lock);
+ if (!ent->stored)
return;
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_del(&mr->list);
- ent->available_mrs--;
- ent->total_mrs--;
- spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key);
- kfree(mr);
- spin_lock_irq(&ent->lock);
+ mkey = pop_stored_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+ mlx5_core_destroy_mkey(ent->dev->mdev, mkey);
+ xa_lock_irq(&ent->mkeys);
}
static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
bool limit_fill)
+ __acquires(&ent->mkeys) __releases(&ent->mkeys)
{
int err;
- lockdep_assert_held(&ent->lock);
+ lockdep_assert_held(&ent->mkeys.xa_lock);
while (true) {
if (limit_fill)
target = ent->limit * 2;
- if (target == ent->available_mrs + ent->pending)
+ if (target == ent->reserved)
return 0;
- if (target > ent->available_mrs + ent->pending) {
- u32 todo = target - (ent->available_mrs + ent->pending);
+ if (target > ent->reserved) {
+ u32 todo = target - ent->reserved;
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
err = add_keys(ent, todo);
if (err == -EAGAIN)
usleep_range(3000, 5000);
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (err) {
if (err != -EAGAIN)
return err;
@@ -366,15 +424,15 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
/*
* Target is the new value of total_mrs the user requests, however we
- * cannot free MRs that are in use. Compute the target value for
- * available_mrs.
+ * cannot free MRs that are in use. Compute the target value for stored
+ * mkeys.
*/
- spin_lock_irq(&ent->lock);
- if (target < ent->total_mrs - ent->available_mrs) {
+ xa_lock_irq(&ent->mkeys);
+ if (target < ent->in_use) {
err = -EINVAL;
goto err_unlock;
}
- target = target - (ent->total_mrs - ent->available_mrs);
+ target = target - ent->in_use;
if (target < ent->limit || target > ent->limit*2) {
err = -EINVAL;
goto err_unlock;
@@ -382,12 +440,12 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
err = resize_available_mrs(ent, target, false);
if (err)
goto err_unlock;
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
return count;
err_unlock:
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
return err;
}
@@ -398,7 +456,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
+ err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use);
if (err < 0)
return err;
@@ -427,10 +485,10 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
* Upon set we immediately fill the cache to high water mark implied by
* the limit.
*/
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
ent->limit = var;
err = resize_available_mrs(ent, 0, true);
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
if (err)
return err;
return count;
@@ -457,17 +515,17 @@ static const struct file_operations limit_fops = {
.read = limit_read,
};
-static bool someone_adding(struct mlx5_mr_cache *cache)
+static bool someone_adding(struct mlx5_mkey_cache *cache)
{
unsigned int i;
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
struct mlx5_cache_ent *ent = &cache->ent[i];
bool ret;
- spin_lock_irq(&ent->lock);
- ret = ent->available_mrs < ent->limit;
- spin_unlock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
+ ret = ent->stored < ent->limit;
+ xa_unlock_irq(&ent->mkeys);
if (ret)
return true;
}
@@ -481,26 +539,26 @@ static bool someone_adding(struct mlx5_mr_cache *cache)
*/
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
{
- lockdep_assert_held(&ent->lock);
+ lockdep_assert_held(&ent->mkeys.xa_lock);
if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
return;
- if (ent->available_mrs < ent->limit) {
+ if (ent->stored < ent->limit) {
ent->fill_to_high_water = true;
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->fill_to_high_water &&
- ent->available_mrs + ent->pending < 2 * ent->limit) {
+ ent->reserved < 2 * ent->limit) {
/*
* Once we start populating due to hitting a low water mark
* continue until we pass the high water mark.
*/
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
- } else if (ent->available_mrs == 2 * ent->limit) {
+ } else if (ent->stored == 2 * ent->limit) {
ent->fill_to_high_water = false;
- } else if (ent->available_mrs > 2 * ent->limit) {
+ } else if (ent->stored > 2 * ent->limit) {
/* Queue deletion of excess entries */
ent->fill_to_high_water = false;
- if (ent->pending)
+ if (ent->stored != ent->reserved)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(1000));
else
@@ -511,25 +569,24 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
static void __cache_work_func(struct mlx5_cache_ent *ent)
{
struct mlx5_ib_dev *dev = ent->dev;
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
int err;
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (ent->disabled)
goto out;
- if (ent->fill_to_high_water &&
- ent->available_mrs + ent->pending < 2 * ent->limit &&
+ if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit &&
!READ_ONCE(dev->fill_delay)) {
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
err = add_keys(ent, 1);
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (ent->disabled)
goto out;
if (err) {
/*
- * EAGAIN only happens if pending is positive, so we
- * will be rescheduled from reg_mr_callback(). The only
+ * EAGAIN only happens if there are pending MRs, so we
+ * will be rescheduled when storing them. The only
* failure path here is ENOMEM.
*/
if (err != -EAGAIN) {
@@ -541,7 +598,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
msecs_to_jiffies(1000));
}
}
- } else if (ent->available_mrs > 2 * ent->limit) {
+ } else if (ent->stored > 2 * ent->limit) {
bool need_delay;
/*
@@ -556,11 +613,11 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
* the garbage collection work to try to run in next cycle, in
* order to free CPU resources to other tasks.
*/
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
need_delay = need_resched() || someone_adding(cache) ||
!time_after(jiffies,
READ_ONCE(cache->last_add) + 300 * HZ);
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (ent->disabled)
goto out;
if (need_delay) {
@@ -571,7 +628,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
queue_adjust_cache_locked(ent);
}
out:
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
}
static void delayed_cache_work_func(struct work_struct *work)
@@ -587,73 +644,59 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
int access_flags)
{
struct mlx5_ib_mr *mr;
+ int err;
- /* Matches access in alloc_cache_mr() */
if (!mlx5r_umr_can_reconfig(dev, 0, access_flags))
return ERR_PTR(-EOPNOTSUPP);
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ xa_lock_irq(&ent->mkeys);
+ ent->in_use++;
+
+ if (!ent->stored) {
queue_adjust_cache_locked(ent);
ent->miss++;
- spin_unlock_irq(&ent->lock);
- mr = create_cache_mr(ent);
- if (IS_ERR(mr))
- return mr;
+ xa_unlock_irq(&ent->mkeys);
+ err = create_cache_mkey(ent, &mr->mmkey.key);
+ if (err) {
+ xa_lock_irq(&ent->mkeys);
+ ent->in_use--;
+ xa_unlock_irq(&ent->mkeys);
+ kfree(mr);
+ return ERR_PTR(err);
+ }
} else {
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_del(&mr->list);
- ent->available_mrs--;
+ mr->mmkey.key = pop_stored_mkey(ent);
queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
-
- mlx5_clear_mr(mr);
+ xa_unlock_irq(&ent->mkeys);
}
+ mr->mmkey.cache_ent = ent;
+ mr->mmkey.type = MLX5_MKEY_MR;
+ init_waitqueue_head(&mr->mmkey.wait);
return mr;
}
-static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
-{
- struct mlx5_cache_ent *ent = mr->cache_ent;
-
- WRITE_ONCE(dev->cache.last_add, jiffies);
- spin_lock_irq(&ent->lock);
- list_add_tail(&mr->list, &ent->head);
- ent->available_mrs++;
- queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
-}
-
static void clean_keys(struct mlx5_ib_dev *dev, int c)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
- struct mlx5_ib_mr *tmp_mr;
- struct mlx5_ib_mr *mr;
- LIST_HEAD(del_list);
+ u32 mkey;
cancel_delayed_work(&ent->dwork);
- while (1) {
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
- spin_unlock_irq(&ent->lock);
- break;
- }
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_move(&mr->list, &del_list);
- ent->available_mrs--;
- ent->total_mrs--;
- spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
- }
-
- list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
- list_del(&mr->list);
- kfree(mr);
+ xa_lock_irq(&ent->mkeys);
+ while (ent->stored) {
+ mkey = pop_stored_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+ mlx5_core_destroy_mkey(dev->mdev, mkey);
+ xa_lock_irq(&ent->mkeys);
}
+ xa_unlock_irq(&ent->mkeys);
}
-static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
if (!mlx5_debugfs_root || dev->is_rep)
return;
@@ -662,9 +705,9 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
dev->cache.root = NULL;
}
-static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
+static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
struct dentry *dir;
int i;
@@ -674,13 +717,13 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev));
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
sprintf(ent->name, "%d", ent->order);
dir = debugfs_create_dir(ent->name, cache->root);
debugfs_create_file("size", 0600, dir, ent, &size_fops);
debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
- debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
+ debugfs_create_ulong("cur", 0400, dir, &ent->stored);
debugfs_create_u32("miss", 0600, dir, &ent->miss);
}
}
@@ -692,9 +735,9 @@ static void delay_time_func(struct timer_list *t)
WRITE_ONCE(dev->fill_delay, 0);
}
-int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
+int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
int i;
@@ -707,22 +750,21 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
timer_setup(&dev->delay_timer, delay_time_func, 0);
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
- INIT_LIST_HEAD(&ent->head);
- spin_lock_init(&ent->lock);
+ xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
ent->order = i + 2;
ent->dev = dev;
ent->limit = 0;
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
- if (i > MR_CACHE_LAST_STD_ENTRY) {
- mlx5_odp_init_mr_cache_entry(ent);
+ if (i > MKEY_CACHE_LAST_STD_ENTRY) {
+ mlx5_odp_init_mkey_cache_entry(ent);
continue;
}
- if (ent->order > mr_cache_max_order(dev))
+ if (ent->order > mkey_cache_max_order(dev))
continue;
ent->page = PAGE_SHIFT;
@@ -734,36 +776,36 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->limit = dev->mdev->profile.mr_cache[i].limit;
else
ent->limit = 0;
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
}
- mlx5_mr_cache_debugfs_init(dev);
+ mlx5_mkey_cache_debugfs_init(dev);
return 0;
}
-int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
+int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
{
unsigned int i;
if (!dev->cache.wq)
return 0;
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
struct mlx5_cache_ent *ent = &dev->cache.ent[i];
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
ent->disabled = true;
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
cancel_delayed_work_sync(&ent->dwork);
}
- mlx5_mr_cache_debugfs_cleanup(dev);
+ mlx5_mkey_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
@@ -830,22 +872,22 @@ static int get_octo_len(u64 addr, u64 len, int page_shift)
return (npages + 1) / 2;
}
-static int mr_cache_max_order(struct mlx5_ib_dev *dev)
+static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
- return MR_CACHE_LAST_STD_ENTRY + 2;
+ return MKEY_CACHE_LAST_STD_ENTRY + 2;
return MLX5_MAX_UMR_SHIFT;
}
-static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
- unsigned int order)
+static struct mlx5_cache_ent *mkey_cache_ent_from_order(struct mlx5_ib_dev *dev,
+ unsigned int order)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
if (order < cache->ent[0].order)
return &cache->ent[0];
order = order - cache->ent[0].order;
- if (order > MR_CACHE_LAST_STD_ENTRY)
+ if (order > MKEY_CACHE_LAST_STD_ENTRY)
return NULL;
return &cache->ent[order];
}
@@ -888,7 +930,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
0, iova);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
- ent = mr_cache_ent_from_order(
+ ent = mkey_cache_ent_from_order(
dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
/*
* Matches access in alloc_cache_mr(). If the MR can't come from the
@@ -1320,7 +1362,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
/* We only track the allocated sizes of MRs from the cache */
- if (!mr->cache_ent)
+ if (!mr->mmkey.cache_ent)
return false;
if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
return false;
@@ -1329,7 +1371,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
if (WARN_ON(!*page_size))
return false;
- return (1ULL << mr->cache_ent->order) >=
+ return (1ULL << mr->mmkey.cache_ent->order) >=
ib_umem_num_dma_blocks(new_umem, *page_size);
}
@@ -1570,15 +1612,17 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
}
/* Stop DMA */
- if (mr->cache_ent) {
- if (mlx5r_umr_revoke_mr(mr)) {
- spin_lock_irq(&mr->cache_ent->lock);
- mr->cache_ent->total_mrs--;
- spin_unlock_irq(&mr->cache_ent->lock);
- mr->cache_ent = NULL;
- }
+ if (mr->mmkey.cache_ent) {
+ xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
+ mr->mmkey.cache_ent->in_use--;
+ xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
+
+ if (mlx5r_umr_revoke_mr(mr) ||
+ push_mkey(mr->mmkey.cache_ent, false,
+ xa_mk_value(mr->mmkey.key)))
+ mr->mmkey.cache_ent = NULL;
}
- if (!mr->cache_ent) {
+ if (!mr->mmkey.cache_ent) {
rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
if (rc)
return rc;
@@ -1595,12 +1639,10 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
mlx5_ib_free_odp_mr(mr);
}
- if (mr->cache_ent) {
- mlx5_mr_cache_free(dev, mr);
- } else {
+ if (!mr->mmkey.cache_ent)
mlx5_free_priv_descs(mr);
- kfree(mr);
- }
+
+ kfree(mr);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 84da5674e1ab..e305bf1dc6c2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1588,7 +1588,7 @@ mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
return err;
}
-void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
+void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
{
if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return;
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 3a48364c0918..e00b94d1b1ea 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -176,6 +176,7 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
dev->umrc.pd = pd;
sema_init(&dev->umrc.sem, MAX_UMR_WR);
+ mutex_init(&dev->umrc.lock);
return 0;
@@ -195,6 +196,31 @@ void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
ib_dealloc_pd(dev->umrc.pd);
}
+static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct ib_qp_attr attr;
+ int err;
+
+ attr.qp_state = IB_QPS_RESET;
+ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+ if (err) {
+ mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
+ goto err;
+ }
+
+ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+ if (err)
+ goto err;
+
+ umrc->state = MLX5_UMR_STATE_ACTIVE;
+ return 0;
+
+err:
+ umrc->state = MLX5_UMR_STATE_ERR;
+ return err;
+}
+
static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
struct mlx5r_umr_wqe *wqe, bool with_data)
{
@@ -231,7 +257,7 @@ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
id.ib_cqe = cqe;
mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
- MLX5_FENCE_MODE_NONE, MLX5_OPCODE_UMR);
+ MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR);
mlx5r_ring_db(qp, 1, ctrl);
@@ -270,17 +296,49 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
mlx5r_umr_init_context(&umr_context);
down(&umrc->sem);
- err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
- with_data);
- if (err)
- mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
- else {
- wait_for_completion(&umr_context.done);
- if (umr_context.status != IB_WC_SUCCESS) {
- mlx5_ib_warn(dev, "reg umr failed (%u)\n",
- umr_context.status);
+ while (true) {
+ mutex_lock(&umrc->lock);
+ if (umrc->state == MLX5_UMR_STATE_ERR) {
+ mutex_unlock(&umrc->lock);
err = -EFAULT;
+ break;
+ }
+
+ if (umrc->state == MLX5_UMR_STATE_RECOVER) {
+ mutex_unlock(&umrc->lock);
+ usleep_range(3000, 5000);
+ continue;
+ }
+
+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
+ with_data);
+ mutex_unlock(&umrc->lock);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR post send failed, err %d\n",
+ err);
+ break;
}
+
+ wait_for_completion(&umr_context.done);
+
+ if (umr_context.status == IB_WC_SUCCESS)
+ break;
+
+ if (umr_context.status == IB_WC_WR_FLUSH_ERR)
+ continue;
+
+ WARN_ON_ONCE(1);
+ mlx5_ib_warn(dev,
+ "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n",
+ umr_context.status);
+ mutex_lock(&umrc->lock);
+ err = mlx5r_umr_recover(dev);
+ mutex_unlock(&umrc->lock);
+ if (err)
+ mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
+ err);
+ err = -EFAULT;
+ break;
}
up(&umrc->sem);
return err;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 03ed7c0fae50..d745ce9dc88a 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3084,7 +3084,7 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
else
DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
- goto err0;
+ goto err1;
}
/* Index only, 18 bit long, lkey = itid << 8 | key */
@@ -3108,7 +3108,7 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
if (rc) {
DP_ERR(dev, "roce register tid returned an error %d\n", rc);
- goto err1;
+ goto err2;
}
mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
@@ -3117,8 +3117,10 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
return mr;
-err1:
+err2:
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
err0:
kfree(mr);
return ERR_PTR(rc);
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index b37b1c6d35c6..26c615772be3 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -321,7 +321,7 @@ struct qib_verbs_txreq {
* These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
* negotiation) are used for the 3rd argument to path_f_set_ib_cfg
* with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
- * are also the the possible values for qib_link_speed_enabled and active
+ * are also the possible values for qib_link_speed_enabled and active
* The values were chosen to match values used within the IB spec.
*/
#define QIB_IB_SDR 1
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index aa290928cf96..3937144b2ae5 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -153,7 +153,7 @@ static int qib_get_base_info(struct file *fp, void __user *ubase,
kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
/*
* for this use, may be cfgctxts summed over all chips that
- * are are configured and present
+ * are configured and present
*/
kinfo->spi_nctxts = dd->cfgctxts;
/* unit (chip/board) our context is on */
@@ -851,7 +851,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
ret = -EPERM;
goto bail;
}
- /* don't allow them to later change to writeable with mprotect */
+ /* don't allow them to later change to writable with mprotect */
vma->vm_flags &= ~VM_MAYWRITE;
start = vma->vm_start;
@@ -941,7 +941,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
goto bail;
}
/*
- * Don't allow permission to later change to writeable
+ * Don't allow permission to later change to writable
* with mprotect.
*/
vma->vm_flags &= ~VM_MAYWRITE;
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 37b628a162e0..6af57067c32e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -58,7 +58,7 @@ static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
/*
* This file contains almost all the chip-specific register information and
* access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
- * exception of SerDes support, which in in qib_sd7220.c.
+ * exception of SerDes support, which in qib_sd7220.c.
*/
/* Below uses machine-generated qib_chipnum_regs.h file */
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ceed302cf6a0..6861c6384f18 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2850,9 +2850,9 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
qib_7322_free_irq(dd);
kfree(dd->cspec->cntrs);
- kfree(dd->cspec->sendchkenable);
- kfree(dd->cspec->sendgrhchk);
- kfree(dd->cspec->sendibchk);
+ bitmap_free(dd->cspec->sendchkenable);
+ bitmap_free(dd->cspec->sendgrhchk);
+ bitmap_free(dd->cspec->sendibchk);
kfree(dd->cspec->msix_entries);
for (i = 0; i < dd->num_pports; i++) {
unsigned long flags;
@@ -6383,18 +6383,11 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
features = qib_7322_boardname(dd);
/* now that piobcnt2k and 4k set, we can allocate these */
- sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
- NUM_VL15_BUFS + BITS_PER_LONG - 1;
- sbufcnt /= BITS_PER_LONG;
- dd->cspec->sendchkenable =
- kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
- GFP_KERNEL);
- dd->cspec->sendgrhchk =
- kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
- GFP_KERNEL);
- dd->cspec->sendibchk =
- kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
- GFP_KERNEL);
+ sbufcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+
+ dd->cspec->sendchkenable = bitmap_zalloc(sbufcnt, GFP_KERNEL);
+ dd->cspec->sendgrhchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
+ dd->cspec->sendibchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
!dd->cspec->sendibchk) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index d1a72e89e297..45211008449f 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1106,8 +1106,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
if (!qib_cpulist_count) {
u32 count = num_online_cpus();
- qib_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
- GFP_KERNEL);
+ qib_cpulist = bitmap_zalloc(count, GFP_KERNEL);
if (qib_cpulist)
qib_cpulist_count = count;
}
@@ -1279,7 +1278,7 @@ static void __exit qib_ib_cleanup(void)
#endif
qib_cpulist_count = 0;
- kfree(qib_cpulist);
+ bitmap_free(qib_cpulist);
WARN_ON(!xa_empty(&qib_dev_table));
qib_dev_cleanup();
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 81b810d006c0..1dc3ccf0cf1f 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -587,7 +587,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
/* Need to release */
u64 pollval;
/*
- * The only writeable bits are the request and CS.
+ * The only writable bits are the request and CS.
* Both should be clear
*/
u64 newval = 0;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index e212929369df..67a1b4562dc2 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -482,7 +482,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
if (err)
goto out_free_dev;
- if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
+ if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
usnic_err("IOMMU of %s does not support cache coherency\n",
dev_name(dev));
err = -EINVAL;
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index da3a398053b8..fb0c008af78c 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -114,6 +114,8 @@ void retransmit_timer(struct timer_list *t)
{
struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
+ pr_debug("%s: fired for qp#%d\n", __func__, qp->elem.index);
+
if (qp->valid) {
qp->comp.timeout = 1;
rxe_run_task(&qp->comp.task, 1);
@@ -560,17 +562,16 @@ int rxe_completer(void *arg)
struct sk_buff *skb = NULL;
struct rxe_pkt_info *pkt = NULL;
enum comp_state state;
- int ret = 0;
+ int ret;
if (!rxe_get(qp))
return -EAGAIN;
- if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
- qp->req.state == QP_STATE_RESET) {
+ if (!qp->valid || qp->comp.state == QP_STATE_ERROR ||
+ qp->comp.state == QP_STATE_RESET) {
rxe_drain_resp_pkts(qp, qp->valid &&
- qp->req.state == QP_STATE_ERROR);
- ret = -EAGAIN;
- goto done;
+ qp->comp.state == QP_STATE_ERROR);
+ goto exit;
}
if (qp->comp.timeout) {
@@ -580,10 +581,8 @@ int rxe_completer(void *arg)
qp->comp.timeout_retry = 0;
}
- if (qp->req.need_retry) {
- ret = -EAGAIN;
- goto done;
- }
+ if (qp->req.need_retry)
+ goto exit;
state = COMPST_GET_ACK;
@@ -676,8 +675,7 @@ int rxe_completer(void *arg)
qp->qp_timeout_jiffies)
mod_timer(&qp->retrans_timer,
jiffies + qp->qp_timeout_jiffies);
- ret = -EAGAIN;
- goto done;
+ goto exit;
case COMPST_ERROR_RETRY:
/* we come here if the retry timer fired and we did
@@ -689,10 +687,8 @@ int rxe_completer(void *arg)
*/
/* there is nothing to retry in this case */
- if (!wqe || (wqe->state == wqe_state_posted)) {
- ret = -EAGAIN;
- goto done;
- }
+ if (!wqe || (wqe->state == wqe_state_posted))
+ goto exit;
/* if we've started a retry, don't start another
* retry sequence, unless this is a timeout.
@@ -730,18 +726,21 @@ int rxe_completer(void *arg)
break;
case COMPST_RNR_RETRY:
+ /* we come here if we received an RNR NAK */
if (qp->comp.rnr_retry > 0) {
if (qp->comp.rnr_retry != 7)
qp->comp.rnr_retry--;
- qp->req.need_retry = 1;
+ /* don't start a retry flow until the
+ * rnr timer has fired
+ */
+ qp->req.wait_for_rnr_timer = 1;
pr_debug("qp#%d set rnr nak timer\n",
qp_num(qp));
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
- ret = -EAGAIN;
- goto done;
+ goto exit;
} else {
rxe_counter_inc(rxe,
RXE_CNT_RNR_RETRY_EXCEEDED);
@@ -754,12 +753,20 @@ int rxe_completer(void *arg)
WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
do_complete(qp, wqe);
rxe_qp_error(qp);
- ret = -EAGAIN;
- goto done;
+ goto exit;
}
}
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the tasklet. A zero return
+ * will continue looping and return to rxe_completer
+ */
done:
+ ret = 0;
+ goto out;
+exit:
+ ret = -EAGAIN;
+out:
if (pkt)
free_pkt(pkt);
rxe_put(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 642b52539ac3..b1a0ab3cd4bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -19,16 +19,16 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
}
if (cqe > rxe->attr.max_cqe) {
- pr_warn("cqe(%d) > max_cqe(%d)\n",
- cqe, rxe->attr.max_cqe);
+ pr_debug("cqe(%d) > max_cqe(%d)\n",
+ cqe, rxe->attr.max_cqe);
goto err1;
}
if (cq) {
count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
- pr_warn("cqe(%d) < current # elements in queue (%d)",
- cqe, count);
+ pr_debug("cqe(%d) < current # elements in queue (%d)",
+ cqe, count);
goto err1;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 0e022ae1b8a5..22f6cc31d1d6 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -77,9 +77,8 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
-int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
+int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
-int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr);
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
void rxe_mr_cleanup(struct rxe_pool_elem *elem);
@@ -145,7 +144,7 @@ static inline int rcv_wqe_size(int max_sge)
max_sge * sizeof(struct ib_sge);
}
-void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
+void free_rd_atomic_resource(struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index fc3942e04a1f..850b80f5ad8b 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -24,7 +24,7 @@ u8 rxe_get_next_key(u32 last_key)
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
- struct rxe_map_set *set = mr->cur_map_set;
+
switch (mr->type) {
case IB_MR_TYPE_DMA:
@@ -32,8 +32,8 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG:
- if (iova < set->iova || length > set->length ||
- iova > set->iova + set->length - length)
+ if (iova < mr->iova || length > mr->length ||
+ iova > mr->iova + mr->length - length)
return -EFAULT;
return 0;
@@ -65,89 +65,41 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}
-static void rxe_mr_free_map_set(int num_map, struct rxe_map_set *set)
-{
- int i;
-
- for (i = 0; i < num_map; i++)
- kfree(set->map[i]);
-
- kfree(set->map);
- kfree(set);
-}
-
-static int rxe_mr_alloc_map_set(int num_map, struct rxe_map_set **setp)
+static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
int i;
- struct rxe_map_set *set;
+ int num_map;
+ struct rxe_map **map = mr->map;
- set = kmalloc(sizeof(*set), GFP_KERNEL);
- if (!set)
- goto err_out;
+ num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
- set->map = kmalloc_array(num_map, sizeof(struct rxe_map *), GFP_KERNEL);
- if (!set->map)
- goto err_free_set;
+ mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
+ if (!mr->map)
+ goto err1;
for (i = 0; i < num_map; i++) {
- set->map[i] = kmalloc(sizeof(struct rxe_map), GFP_KERNEL);
- if (!set->map[i])
- goto err_free_map;
+ mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
+ if (!mr->map[i])
+ goto err2;
}
- *setp = set;
-
- return 0;
-
-err_free_map:
- for (i--; i >= 0; i--)
- kfree(set->map[i]);
-
- kfree(set->map);
-err_free_set:
- kfree(set);
-err_out:
- return -ENOMEM;
-}
-
-/**
- * rxe_mr_alloc() - Allocate memory map array(s) for MR
- * @mr: Memory region
- * @num_buf: Number of buffer descriptors to support
- * @both: If non zero allocate both mr->map and mr->next_map
- * else just allocate mr->map. Used for fast MRs
- *
- * Return: 0 on success else an error
- */
-static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
-{
- int ret;
- int num_map;
-
BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
- num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
mr->map_mask = RXE_BUF_PER_MAP - 1;
+
mr->num_buf = num_buf;
- mr->max_buf = num_map * RXE_BUF_PER_MAP;
mr->num_map = num_map;
-
- ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set);
- if (ret)
- return -ENOMEM;
-
- if (both) {
- ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set);
- if (ret)
- goto err_free;
- }
+ mr->max_buf = num_map * RXE_BUF_PER_MAP;
return 0;
-err_free:
- rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
- mr->cur_map_set = NULL;
+err2:
+ for (i--; i >= 0; i--)
+ kfree(mr->map[i]);
+
+ kfree(mr->map);
+err1:
return -ENOMEM;
}
@@ -164,7 +116,6 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
- struct rxe_map_set *set;
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
struct ib_umem *umem;
@@ -172,6 +123,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int num_buf;
void *vaddr;
int err;
+ int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
@@ -185,20 +137,18 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
rxe_mr_init(access, mr);
- err = rxe_mr_alloc(mr, num_buf, 0);
+ err = rxe_mr_alloc(mr, num_buf);
if (err) {
pr_warn("%s: Unable to allocate memory for map\n",
__func__);
goto err_release_umem;
}
- set = mr->cur_map_set;
- set->page_shift = PAGE_SHIFT;
- set->page_mask = PAGE_SIZE - 1;
-
- num_buf = 0;
- map = set->map;
+ mr->page_shift = PAGE_SHIFT;
+ mr->page_mask = PAGE_SIZE - 1;
+ num_buf = 0;
+ map = mr->map;
if (length > 0) {
buf = map[0]->buf;
@@ -214,29 +164,33 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
pr_warn("%s: Unable to get virtual address\n",
__func__);
err = -ENOMEM;
- goto err_release_umem;
+ goto err_cleanup_map;
}
buf->addr = (uintptr_t)vaddr;
buf->size = PAGE_SIZE;
num_buf++;
buf++;
+
}
}
mr->ibmr.pd = &pd->ibpd;
mr->umem = umem;
mr->access = access;
+ mr->length = length;
+ mr->iova = iova;
+ mr->va = start;
+ mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_USER;
- set->length = length;
- set->iova = iova;
- set->va = start;
- set->offset = ib_umem_offset(umem);
-
return 0;
+err_cleanup_map:
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
+ kfree(mr->map);
err_release_umem:
ib_umem_release(umem);
err_out:
@@ -250,7 +204,7 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
/* always allow remote access for FMRs */
rxe_mr_init(IB_ACCESS_REMOTE, mr);
- err = rxe_mr_alloc(mr, max_pages, 1);
+ err = rxe_mr_alloc(mr, max_pages);
if (err)
goto err1;
@@ -268,24 +222,21 @@ err1:
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
size_t *offset_out)
{
- struct rxe_map_set *set = mr->cur_map_set;
- size_t offset = iova - set->iova + set->offset;
+ size_t offset = iova - mr->iova + mr->offset;
int map_index;
int buf_index;
u64 length;
- struct rxe_map *map;
- if (likely(set->page_shift)) {
- *offset_out = offset & set->page_mask;
- offset >>= set->page_shift;
+ if (likely(mr->page_shift)) {
+ *offset_out = offset & mr->page_mask;
+ offset >>= mr->page_shift;
*n_out = offset & mr->map_mask;
*m_out = offset >> mr->map_shift;
} else {
map_index = 0;
buf_index = 0;
- map = set->map[map_index];
- length = map->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
while (offset >= length) {
offset -= length;
@@ -295,8 +246,7 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
map_index++;
buf_index = 0;
}
- map = set->map[map_index];
- length = map->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
}
*m_out = map_index;
@@ -317,7 +267,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
goto out;
}
- if (!mr->cur_map_set) {
+ if (!mr->map) {
addr = (void *)(uintptr_t)iova;
goto out;
}
@@ -330,13 +280,13 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
lookup_iova(mr, iova, &m, &n, &offset);
- if (offset + length > mr->cur_map_set->map[m]->buf[n].size) {
+ if (offset + length > mr->map[m]->buf[n].size) {
pr_warn("crosses page boundary\n");
addr = NULL;
goto out;
}
- addr = (void *)(uintptr_t)mr->cur_map_set->map[m]->buf[n].addr + offset;
+ addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
out:
return addr;
@@ -372,7 +322,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return 0;
}
- WARN_ON_ONCE(!mr->cur_map_set);
+ WARN_ON_ONCE(!mr->map);
err = mr_check_range(mr, iova, length);
if (err) {
@@ -382,7 +332,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
lookup_iova(mr, iova, &m, &i, &offset);
- map = mr->cur_map_set->map + m;
+ map = mr->map + m;
buf = map[0]->buf + i;
while (length > 0) {
@@ -576,22 +526,22 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
return mr;
}
-int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
+int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_mr *mr;
int ret;
- mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+ mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8);
if (!mr) {
- pr_err("%s: No MR for rkey %#x\n", __func__, rkey);
+ pr_err("%s: No MR for key %#x\n", __func__, key);
ret = -EINVAL;
goto err;
}
- if (rkey != mr->rkey) {
- pr_err("%s: rkey (%#x) doesn't match mr->rkey (%#x)\n",
- __func__, rkey, mr->rkey);
+ if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) {
+ pr_err("%s: wr key (%#x) doesn't match mr key (%#x)\n",
+ __func__, key, (mr->rkey ? mr->rkey : mr->lkey));
ret = -EINVAL;
goto err_drop_ref;
}
@@ -628,9 +578,8 @@ err:
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
- u32 key = wqe->wr.wr.reg.key & 0xff;
+ u32 key = wqe->wr.wr.reg.key;
u32 access = wqe->wr.wr.reg.access;
- struct rxe_map_set *set;
/* user can only register MR in free state */
if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
@@ -646,36 +595,19 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
return -EINVAL;
}
+ /* user is only allowed to change key portion of l/rkey */
+ if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
+ pr_warn("%s: key = 0x%x has wrong index mr->lkey = 0x%x\n",
+ __func__, key, mr->lkey);
+ return -EINVAL;
+ }
+
mr->access = access;
- mr->lkey = (mr->lkey & ~0xff) | key;
- mr->rkey = (access & IB_ACCESS_REMOTE) ? mr->lkey : 0;
+ mr->lkey = key;
+ mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0;
+ mr->iova = wqe->wr.wr.reg.mr->iova;
mr->state = RXE_MR_STATE_VALID;
- set = mr->cur_map_set;
- mr->cur_map_set = mr->next_map_set;
- mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova;
- mr->next_map_set = set;
-
- return 0;
-}
-
-int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map_set *set = mr->next_map_set;
- struct rxe_map *map;
- struct rxe_phys_buf *buf;
-
- if (unlikely(set->nbuf == mr->num_buf))
- return -ENOMEM;
-
- map = set->map[set->nbuf / RXE_BUF_PER_MAP];
- buf = &map->buf[set->nbuf % RXE_BUF_PER_MAP];
-
- buf->addr = addr;
- buf->size = ibmr->page_size;
- set->nbuf++;
-
return 0;
}
@@ -687,7 +619,7 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (atomic_read(&mr->num_mw) > 0)
return -EINVAL;
- rxe_put(mr);
+ rxe_cleanup(mr);
return 0;
}
@@ -695,14 +627,15 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{
struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
+ int i;
rxe_put(mr_pd(mr));
-
ib_umem_release(mr->umem);
- if (mr->cur_map_set)
- rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
+ if (mr->map) {
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
- if (mr->next_map_set)
- rxe_mr_free_map_set(mr->num_map, mr->next_map_set);
+ kfree(mr->map);
+ }
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 2e1fa844fabf..104993801a80 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -33,6 +33,8 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
+ rxe_finalize(mw);
+
return 0;
}
@@ -40,7 +42,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
{
struct rxe_mw *mw = to_rmw(ibmw);
- rxe_put(mw);
+ rxe_cleanup(mw);
return 0;
}
@@ -48,8 +50,6 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_mw *mw, struct rxe_mr *mr)
{
- u32 key = wqe->wr.wr.mw.rkey & 0xff;
-
if (mw->ibmw.type == IB_MW_TYPE_1) {
if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
pr_err_once(
@@ -87,11 +87,6 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
}
- if (unlikely(key == (mw->rkey & 0xff))) {
- pr_err_once("attempt to bind MW with same key\n");
- return -EINVAL;
- }
-
/* remaining checks only apply to a nonzero MR */
if (!mr)
return 0;
@@ -113,21 +108,21 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
(IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
!(mr->access & IB_ACCESS_LOCAL_WRITE))) {
pr_err_once(
- "attempt to bind an writeable MW to an MR without local write access\n");
+ "attempt to bind an Writable MW to an MR without local write access\n");
return -EINVAL;
}
/* C10-75 */
if (mw->access & IB_ZERO_BASED) {
- if (unlikely(wqe->wr.wr.mw.length > mr->cur_map_set->length)) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
pr_err_once(
"attempt to bind a ZB MW outside of the MR\n");
return -EINVAL;
}
} else {
- if (unlikely((wqe->wr.wr.mw.addr < mr->cur_map_set->iova) ||
+ if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
- (mr->cur_map_set->iova + mr->cur_map_set->length)))) {
+ (mr->iova + mr->length)))) {
pr_err_once(
"attempt to bind a VA MW outside of the MR\n");
return -EINVAL;
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 568a7cbd13d4..86c7a8bf3cbb 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -105,6 +105,12 @@ enum rxe_device_param {
RXE_INFLIGHT_SKBS_PER_QP_HIGH = 64,
RXE_INFLIGHT_SKBS_PER_QP_LOW = 16,
+ /* Max number of interations of each tasklet
+ * before yielding the cpu to let other
+ * work make progress
+ */
+ RXE_MAX_ITERATIONS = 1024,
+
/* Delay before calling arbiter timer */
RXE_NSEC_ARB_TIMER_DELAY = 200,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 19b14826385b..f50620f5a0a1 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -6,6 +6,7 @@
#include "rxe.h"
+#define RXE_POOL_TIMEOUT (200)
#define RXE_POOL_ALIGN (16)
static const struct rxe_type_info {
@@ -136,10 +137,14 @@ void *rxe_alloc(struct rxe_pool *pool)
elem->pool = pool;
elem->obj = obj;
kref_init(&elem->ref_cnt);
+ init_completion(&elem->complete);
- err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
+ /* allocate index in array but leave pointer as NULL so it
+ * can't be looked up until rxe_finalize() is called
+ */
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
&pool->next, GFP_KERNEL);
- if (err)
+ if (err < 0)
goto err_free;
return obj;
@@ -151,9 +156,11 @@ err_cnt:
return NULL;
}
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
+ bool sleepable)
{
int err;
+ gfp_t gfp_flags;
if (WARN_ON(pool->type == RXE_TYPE_MR))
return -EINVAL;
@@ -164,10 +171,19 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
elem->pool = pool;
elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
-
- err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
- &pool->next, GFP_KERNEL);
- if (err)
+ init_completion(&elem->complete);
+
+ /* AH objects are unique in that the create_ah verb
+ * can be called in atomic context. If the create_ah
+ * call is not sleepable use GFP_ATOMIC.
+ */
+ gfp_flags = sleepable ? GFP_KERNEL : GFP_ATOMIC;
+
+ if (sleepable)
+ might_sleep();
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
+ &pool->next, gfp_flags);
+ if (err < 0)
goto err_cnt;
return 0;
@@ -181,16 +197,15 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
struct rxe_pool_elem *elem;
struct xarray *xa = &pool->xa;
- unsigned long flags;
void *obj;
- xa_lock_irqsave(xa, flags);
+ rcu_read_lock();
elem = xa_load(xa, index);
if (elem && kref_get_unless_zero(&elem->ref_cnt))
obj = elem->obj;
else
obj = NULL;
- xa_unlock_irqrestore(xa, flags);
+ rcu_read_unlock();
return obj;
}
@@ -198,17 +213,74 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
static void rxe_elem_release(struct kref *kref)
{
struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
- struct rxe_pool *pool = elem->pool;
- xa_erase(&pool->xa, elem->index);
+ complete(&elem->complete);
+}
+
+int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+{
+ struct rxe_pool *pool = elem->pool;
+ struct xarray *xa = &pool->xa;
+ static int timeout = RXE_POOL_TIMEOUT;
+ int ret, err = 0;
+ void *xa_ret;
+
+ if (sleepable)
+ might_sleep();
+
+ /* erase xarray entry to prevent looking up
+ * the pool elem from its index
+ */
+ xa_ret = xa_erase(xa, elem->index);
+ WARN_ON(xa_err(xa_ret));
+
+ /* if this is the last call to rxe_put complete the
+ * object. It is safe to touch obj->elem after this since
+ * it is freed below
+ */
+ __rxe_put(elem);
+
+ /* wait until all references to the object have been
+ * dropped before final object specific cleanup and
+ * return to rdma-core
+ */
+ if (sleepable) {
+ if (!completion_done(&elem->complete) && timeout) {
+ ret = wait_for_completion_timeout(&elem->complete,
+ timeout);
+
+ /* Shouldn't happen. There are still references to
+ * the object but, rather than deadlock, free the
+ * object or pass back to rdma-core.
+ */
+ if (WARN_ON(!ret))
+ err = -EINVAL;
+ }
+ } else {
+ unsigned long until = jiffies + timeout;
+
+ /* AH objects are unique in that the destroy_ah verb
+ * can be called in atomic context. This delay
+ * replaces the wait_for_completion call above
+ * when the destroy_ah call is not sleepable
+ */
+ while (!completion_done(&elem->complete) &&
+ time_before(jiffies, until))
+ mdelay(1);
+
+ if (WARN_ON(!completion_done(&elem->complete)))
+ err = -EINVAL;
+ }
if (pool->cleanup)
pool->cleanup(elem);
if (pool->type == RXE_TYPE_MR)
- kfree(elem->obj);
+ kfree_rcu(elem->obj);
atomic_dec(&pool->num_elem);
+
+ return err;
}
int __rxe_get(struct rxe_pool_elem *elem)
@@ -220,3 +292,11 @@ int __rxe_put(struct rxe_pool_elem *elem)
{
return kref_put(&elem->ref_cnt, rxe_elem_release);
}
+
+void __rxe_finalize(struct rxe_pool_elem *elem)
+{
+ void *xa_ret;
+
+ xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL);
+ WARN_ON(xa_err(xa_ret));
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 0860660d65ec..9d83cb32092f 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -24,6 +24,7 @@ struct rxe_pool_elem {
void *obj;
struct kref ref_cnt;
struct list_head list;
+ struct completion complete;
u32 index;
};
@@ -57,21 +58,28 @@ void rxe_pool_cleanup(struct rxe_pool *pool);
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
-
-#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
+ bool sleepable);
+#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem, true)
+#define rxe_add_to_pool_ah(pool, obj, sleepable) __rxe_add_to_pool(pool, \
+ &(obj)->elem, sleepable)
/* lookup an indexed object from index. takes a reference on object */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
int __rxe_get(struct rxe_pool_elem *elem);
-
#define rxe_get(obj) __rxe_get(&(obj)->elem)
int __rxe_put(struct rxe_pool_elem *elem);
-
#define rxe_put(obj) __rxe_put(&(obj)->elem)
+int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable);
+#define rxe_cleanup(obj) __rxe_cleanup(&(obj)->elem, true)
+#define rxe_cleanup_ah(obj, sleepable) __rxe_cleanup(&(obj)->elem, sleepable)
+
#define rxe_read(obj) kref_read(&(obj)->elem.ref_cnt)
+void __rxe_finalize(struct rxe_pool_elem *elem);
+#define rxe_finalize(obj) __rxe_finalize(&(obj)->elem)
+
#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 22e9b85344c3..516bf9b95e48 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -120,17 +120,15 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
struct resp_res *res = &qp->resp.resources[i];
- free_rd_atomic_resource(qp, res);
+ free_rd_atomic_resource(res);
}
kfree(qp->resp.resources);
qp->resp.resources = NULL;
}
}
-void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
+void free_rd_atomic_resource(struct resp_res *res)
{
- if (res->type == RXE_ATOMIC_MASK)
- kfree_skb(res->atomic.skb);
res->type = 0;
}
@@ -142,7 +140,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
if (qp->resp.resources) {
for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
res = &qp->resp.resources[i];
- free_rd_atomic_resource(qp, res);
+ free_rd_atomic_resource(res);
}
}
}
@@ -174,6 +172,14 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_lock_init(&qp->state_lock);
+ spin_lock_init(&qp->req.task.state_lock);
+ spin_lock_init(&qp->resp.task.state_lock);
+ spin_lock_init(&qp->comp.task.state_lock);
+
+ spin_lock_init(&qp->sq.sq_lock);
+ spin_lock_init(&qp->rq.producer_lock);
+ spin_lock_init(&qp->rq.consumer_lock);
+
atomic_set(&qp->ssn, 0);
atomic_set(&qp->skb_out, 0);
}
@@ -230,10 +236,10 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
QUEUE_TYPE_FROM_CLIENT);
qp->req.state = QP_STATE_RESET;
+ qp->comp.state = QP_STATE_RESET;
qp->req.opcode = -1;
qp->comp.opcode = -1;
- spin_lock_init(&qp->sq.sq_lock);
skb_queue_head_init(&qp->req_pkts);
rxe_init_task(rxe, &qp->req.task, qp,
@@ -284,9 +290,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
}
}
- spin_lock_init(&qp->rq.producer_lock);
- spin_lock_init(&qp->rq.consumer_lock);
-
skb_queue_head_init(&qp->resp_pkts);
rxe_init_task(rxe, &qp->resp.task, qp,
@@ -490,6 +493,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
/* move qp to the reset state */
qp->req.state = QP_STATE_RESET;
+ qp->comp.state = QP_STATE_RESET;
qp->resp.state = QP_STATE_RESET;
/* let state machines reset themselves drain work and packet queues
@@ -507,6 +511,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
atomic_set(&qp->ssn, 0);
qp->req.opcode = -1;
qp->req.need_retry = 0;
+ qp->req.wait_for_rnr_timer = 0;
qp->req.noack_pkts = 0;
qp->resp.msn = 0;
qp->resp.opcode = -1;
@@ -552,6 +557,7 @@ void rxe_qp_error(struct rxe_qp *qp)
{
qp->req.state = QP_STATE_ERROR;
qp->resp.state = QP_STATE_ERROR;
+ qp->comp.state = QP_STATE_ERROR;
qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
@@ -689,6 +695,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
pr_debug("qp#%d state -> INIT\n", qp_num(qp));
qp->req.state = QP_STATE_INIT;
qp->resp.state = QP_STATE_INIT;
+ qp->comp.state = QP_STATE_INIT;
break;
case IB_QPS_RTR:
@@ -699,6 +706,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
case IB_QPS_RTS:
pr_debug("qp#%d state -> RTS\n", qp_num(qp));
qp->req.state = QP_STATE_READY;
+ qp->comp.state = QP_STATE_READY;
break;
case IB_QPS_SQD:
@@ -804,13 +812,15 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
if (qp->rq.queue)
rxe_queue_cleanup(qp->rq.queue);
- atomic_dec(&qp->scq->num_wq);
- if (qp->scq)
+ if (qp->scq) {
+ atomic_dec(&qp->scq->num_wq);
rxe_put(qp->scq);
+ }
- atomic_dec(&qp->rcq->num_wq);
- if (qp->rcq)
+ if (qp->rcq) {
+ atomic_dec(&qp->rcq->num_wq);
rxe_put(qp->rcq);
+ }
if (qp->pd)
rxe_put(qp->pd);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 6227112ef7a2..ed44042782fa 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -7,9 +7,6 @@
#ifndef RXE_QUEUE_H
#define RXE_QUEUE_H
-/* for definition of shared struct rxe_queue_buf */
-#include <uapi/rdma/rdma_user_rxe.h>
-
/* Implements a simple circular buffer that is shared between user
* and the driver and can be resized. The requested element size is
* rounded up to a power of 2 and the number of elements in the buffer
@@ -53,6 +50,8 @@ enum queue_type {
QUEUE_TYPE_FROM_DRIVER,
};
+struct rxe_queue_buf;
+
struct rxe_queue {
struct rxe_dev *rxe;
struct rxe_queue_buf *buf;
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 9d98237389cf..f63771207970 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -15,8 +15,7 @@ static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
u32 opcode);
static inline void retry_first_write_send(struct rxe_qp *qp,
- struct rxe_send_wqe *wqe,
- unsigned int mask, int npsn)
+ struct rxe_send_wqe *wqe, int npsn)
{
int i;
@@ -83,7 +82,7 @@ static void req_retry(struct rxe_qp *qp)
if (mask & WR_WRITE_OR_SEND_MASK) {
npsn = (qp->comp.psn - wqe->first_psn) &
BTH_PSN_MASK;
- retry_first_write_send(qp, wqe, mask, npsn);
+ retry_first_write_send(qp, wqe, npsn);
}
if (mask & WR_READ_MASK) {
@@ -101,7 +100,11 @@ void rnr_nak_timer(struct timer_list *t)
{
struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
- pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
+ pr_debug("%s: fired for qp#%d\n", __func__, qp_num(qp));
+
+ /* request a send queue retry */
+ qp->req.need_retry = 1;
+ qp->req.wait_for_rnr_timer = 0;
rxe_run_task(&qp->req.task, 1);
}
@@ -161,16 +164,36 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
(wqe->state != wqe_state_processing)))
return NULL;
- if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
- (index != cons))) {
- qp->req.wait_fence = 1;
- return NULL;
- }
-
wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
return wqe;
}
+/**
+ * rxe_wqe_is_fenced - check if next wqe is fenced
+ * @qp: the queue pair
+ * @wqe: the next wqe
+ *
+ * Returns: 1 if wqe needs to wait
+ * 0 if wqe is ready to go
+ */
+static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ /* Local invalidate fence (LIF) see IBA 10.6.5.1
+ * Requires ALL previous operations on the send queue
+ * are complete. Make mandatory for the rxe driver.
+ */
+ if (wqe->wr.opcode == IB_WR_LOCAL_INV)
+ return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
+
+ /* Fence see IBA 10.8.3.3
+ * Requires that all previous read and atomic operations
+ * are complete.
+ */
+ return (wqe->wr.send_flags & IB_SEND_FENCE) &&
+ atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
+}
+
static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
{
switch (opcode) {
@@ -581,9 +604,11 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->status = IB_WC_SUCCESS;
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
- if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- qp->sq_sig_type == IB_SIGNAL_ALL_WR)
- rxe_run_task(&qp->comp.task, 1);
+ /* There is no ack coming for local work requests
+ * which can lead to a deadlock. So go ahead and complete
+ * it now.
+ */
+ rxe_run_task(&qp->comp.task, 1);
return 0;
}
@@ -599,6 +624,7 @@ int rxe_requester(void *arg)
u32 payload;
int mtu;
int opcode;
+ int err;
int ret;
struct rxe_send_wqe rollback_wqe;
u32 rollback_psn;
@@ -609,10 +635,20 @@ int rxe_requester(void *arg)
if (!rxe_get(qp))
return -EAGAIN;
-next_wqe:
- if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
+ if (unlikely(!qp->valid))
goto exit;
+ if (unlikely(qp->req.state == QP_STATE_ERROR)) {
+ wqe = req_next_wqe(qp);
+ if (wqe)
+ /*
+ * Generate an error completion for error qp state
+ */
+ goto err;
+ else
+ goto exit;
+ }
+
if (unlikely(qp->req.state == QP_STATE_RESET)) {
qp->req.wqe_index = queue_get_consumer(q,
QUEUE_TYPE_FROM_CLIENT);
@@ -620,10 +656,17 @@ next_wqe:
qp->req.need_rd_atomic = 0;
qp->req.wait_psn = 0;
qp->req.need_retry = 0;
+ qp->req.wait_for_rnr_timer = 0;
goto exit;
}
- if (unlikely(qp->req.need_retry)) {
+ /* we come here if the retransmit timer has fired
+ * or if the rnr timer has fired. If the retransmit
+ * timer fires while we are processing an RNR NAK wait
+ * until the rnr timer has fired before starting the
+ * retry flow
+ */
+ if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
req_retry(qp);
qp->req.need_retry = 0;
}
@@ -632,12 +675,17 @@ next_wqe:
if (unlikely(!wqe))
goto exit;
+ if (rxe_wqe_is_fenced(qp, wqe)) {
+ qp->req.wait_fence = 1;
+ goto exit;
+ }
+
if (wqe->mask & WR_LOCAL_OP_MASK) {
- ret = rxe_do_local_ops(qp, wqe);
- if (unlikely(ret))
+ err = rxe_do_local_ops(qp, wqe);
+ if (unlikely(err))
goto err;
else
- goto next_wqe;
+ goto done;
}
if (unlikely(qp_type(qp) == IB_QPT_RC &&
@@ -685,9 +733,8 @@ next_wqe:
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- __rxe_do_task(&qp->comp.task);
- rxe_put(qp);
- return 0;
+ rxe_run_task(&qp->comp.task, 0);
+ goto done;
}
payload = mtu;
}
@@ -703,25 +750,29 @@ next_wqe:
if (unlikely(!av)) {
pr_err("qp#%d Failed no address vector\n", qp_num(qp));
wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err_drop_ah;
+ goto err;
}
skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
if (unlikely(!skb)) {
pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err_drop_ah;
+ if (ah)
+ rxe_put(ah);
+ goto err;
}
- ret = finish_packet(qp, av, wqe, &pkt, skb, payload);
- if (unlikely(ret)) {
+ err = finish_packet(qp, av, wqe, &pkt, skb, payload);
+ if (unlikely(err)) {
pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
- if (ret == -EFAULT)
+ if (err == -EFAULT)
wqe->status = IB_WC_LOC_PROT_ERR;
else
wqe->status = IB_WC_LOC_QP_OP_ERR;
kfree_skb(skb);
- goto err_drop_ah;
+ if (ah)
+ rxe_put(ah);
+ goto err;
}
if (ah)
@@ -736,13 +787,14 @@ next_wqe:
save_state(wqe, qp, &rollback_wqe, &rollback_psn);
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
- ret = rxe_xmit_packet(qp, &pkt, skb);
- if (ret) {
+
+ err = rxe_xmit_packet(qp, &pkt, skb);
+ if (err) {
qp->need_req_skb = 1;
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
- if (ret == -EAGAIN) {
+ if (err == -EAGAIN) {
rxe_run_task(&qp->req.task, 1);
goto exit;
}
@@ -753,16 +805,23 @@ next_wqe:
update_state(qp, &pkt);
- goto next_wqe;
-
-err_drop_ah:
- if (ah)
- rxe_put(ah);
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the tasklet. A zero return
+ * will continue looping and return to rxe_requester
+ */
+done:
+ ret = 0;
+ goto out;
err:
+ /* update wqe_index for each wqe completion */
+ qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error;
- __rxe_do_task(&qp->comp.task);
-
+ qp->req.state = QP_STATE_ERROR;
+ rxe_run_task(&qp->comp.task, 0);
exit:
+ ret = -EAGAIN;
+out:
rxe_put(qp);
- return -EAGAIN;
+
+ return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index f4f6ee5d81fe..b36ec5c4d5e0 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -21,6 +21,7 @@ enum resp_states {
RESPST_CHK_RKEY,
RESPST_EXECUTE,
RESPST_READ_REPLY,
+ RESPST_ATOMIC_REPLY,
RESPST_COMPLETE,
RESPST_ACKNOWLEDGE,
RESPST_CLEANUP,
@@ -55,6 +56,7 @@ static char *resp_state_name[] = {
[RESPST_CHK_RKEY] = "CHK_RKEY",
[RESPST_EXECUTE] = "EXECUTE",
[RESPST_READ_REPLY] = "READ_REPLY",
+ [RESPST_ATOMIC_REPLY] = "ATOMIC_REPLY",
[RESPST_COMPLETE] = "COMPLETE",
[RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
[RESPST_CLEANUP] = "CLEANUP",
@@ -448,7 +450,8 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
if (rkey_is_mw(rkey)) {
mw = rxe_lookup_mw(qp, access, rkey);
if (!mw) {
- pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
+ pr_debug("%s: no MW matches rkey %#x\n",
+ __func__, rkey);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
@@ -468,7 +471,8 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
} else {
mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
if (!mr) {
- pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
+ pr_debug("%s: no MR matches rkey %#x\n",
+ __func__, rkey);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
@@ -549,49 +553,106 @@ out:
return rc;
}
+static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ int type)
+{
+ struct resp_res *res;
+ u32 pkts;
+
+ res = &qp->resp.resources[qp->resp.res_head];
+ rxe_advance_resp_resource(qp);
+ free_rd_atomic_resource(res);
+
+ res->type = type;
+ res->replay = 0;
+
+ switch (type) {
+ case RXE_READ_MASK:
+ res->read.va = qp->resp.va + qp->resp.offset;
+ res->read.va_org = qp->resp.va + qp->resp.offset;
+ res->read.resid = qp->resp.resid;
+ res->read.length = qp->resp.resid;
+ res->read.rkey = qp->resp.rkey;
+
+ pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
+ res->first_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
+
+ res->state = rdatm_res_state_new;
+ break;
+ case RXE_ATOMIC_MASK:
+ res->first_psn = pkt->psn;
+ res->last_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ break;
+ }
+
+ return res;
+}
+
/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);
-static enum resp_states process_atomic(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
+static enum resp_states atomic_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
{
u64 *vaddr;
enum resp_states ret;
struct rxe_mr *mr = qp->resp.mr;
+ struct resp_res *res = qp->resp.res;
+ u64 value;
- if (mr->state != RXE_MR_STATE_VALID) {
- ret = RESPST_ERR_RKEY_VIOLATION;
- goto out;
+ if (!res) {
+ res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
+ qp->resp.res = res;
}
- vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
+ if (!res->replay) {
+ if (mr->state != RXE_MR_STATE_VALID) {
+ ret = RESPST_ERR_RKEY_VIOLATION;
+ goto out;
+ }
- /* check vaddr is 8 bytes aligned. */
- if (!vaddr || (uintptr_t)vaddr & 7) {
- ret = RESPST_ERR_MISALIGNED_ATOMIC;
- goto out;
- }
+ vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
+ sizeof(u64));
- spin_lock_bh(&atomic_ops_lock);
+ /* check vaddr is 8 bytes aligned. */
+ if (!vaddr || (uintptr_t)vaddr & 7) {
+ ret = RESPST_ERR_MISALIGNED_ATOMIC;
+ goto out;
+ }
- qp->resp.atomic_orig = *vaddr;
+ spin_lock_bh(&atomic_ops_lock);
+ res->atomic.orig_val = value = *vaddr;
- if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
- if (*vaddr == atmeth_comp(pkt))
- *vaddr = atmeth_swap_add(pkt);
- } else {
- *vaddr += atmeth_swap_add(pkt);
- }
+ if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+ if (value == atmeth_comp(pkt))
+ value = atmeth_swap_add(pkt);
+ } else {
+ value += atmeth_swap_add(pkt);
+ }
+
+ *vaddr = value;
+ spin_unlock_bh(&atomic_ops_lock);
+
+ qp->resp.msn++;
- spin_unlock_bh(&atomic_ops_lock);
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
- ret = RESPST_NONE;
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
+ }
+
+ ret = RESPST_ACKNOWLEDGE;
out:
return ret;
}
static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt,
struct rxe_pkt_info *ack,
int opcode,
int payload,
@@ -629,7 +690,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
}
if (ack->mask & RXE_ATMACK_MASK)
- atmack_set_orig(ack, qp->resp.atomic_orig);
+ atmack_set_orig(ack, qp->resp.res->atomic.orig_val);
err = rxe_prepare(&qp->pri_av, ack, skb);
if (err) {
@@ -640,34 +701,6 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
return skb;
}
-static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
-{
- struct resp_res *res;
- u32 pkts;
-
- res = &qp->resp.resources[qp->resp.res_head];
- rxe_advance_resp_resource(qp);
- free_rd_atomic_resource(qp, res);
-
- res->type = RXE_READ_MASK;
- res->replay = 0;
- res->read.va = qp->resp.va + qp->resp.offset;
- res->read.va_org = qp->resp.va + qp->resp.offset;
- res->read.resid = qp->resp.resid;
- res->read.length = qp->resp.resid;
- res->read.rkey = qp->resp.rkey;
-
- pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
- res->first_psn = pkt->psn;
- res->cur_psn = pkt->psn;
- res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
-
- res->state = rdatm_res_state_new;
-
- return res;
-}
-
/**
* rxe_recheck_mr - revalidate MR from rkey and get a reference
* @qp: the qp
@@ -738,7 +771,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
struct rxe_mr *mr;
if (!res) {
- res = rxe_prepare_read_res(qp, req_pkt);
+ res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
qp->resp.res = res;
}
@@ -771,7 +804,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
payload = min_t(int, res->read.resid, mtu);
- skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
+ skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
res->cur_psn, AETH_ACK_UNLIMITED);
if (!skb)
return RESPST_ERR_RNR;
@@ -858,9 +891,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
qp->resp.msn++;
return RESPST_READ_REPLY;
} else if (pkt->mask & RXE_ATOMIC_MASK) {
- err = process_atomic(qp, pkt);
- if (err)
- return err;
+ return RESPST_ATOMIC_REPLY;
} else {
/* Unreachable */
WARN_ON_ONCE(1);
@@ -997,14 +1028,13 @@ finish:
return RESPST_CLEANUP;
}
-static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
- u8 syndrome, u32 psn)
+static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{
int err = 0;
struct rxe_pkt_info ack_pkt;
struct sk_buff *skb;
- skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
+ skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
0, psn, syndrome);
if (!skb) {
err = -ENOMEM;
@@ -1019,40 +1049,29 @@ err1:
return err;
}
-static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
- u8 syndrome)
+static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{
- int rc = 0;
+ int err = 0;
struct rxe_pkt_info ack_pkt;
struct sk_buff *skb;
- struct resp_res *res;
- skb = prepare_ack_packet(qp, pkt, &ack_pkt,
- IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
- syndrome);
+ skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE,
+ 0, psn, syndrome);
if (!skb) {
- rc = -ENOMEM;
+ err = -ENOMEM;
goto out;
}
- res = &qp->resp.resources[qp->resp.res_head];
- free_rd_atomic_resource(qp, res);
- rxe_advance_resp_resource(qp);
-
- skb_get(skb);
- res->type = RXE_ATOMIC_MASK;
- res->atomic.skb = skb;
- res->first_psn = ack_pkt.psn;
- res->last_psn = ack_pkt.psn;
- res->cur_psn = ack_pkt.psn;
+ err = rxe_xmit_packet(qp, &ack_pkt, skb);
+ if (err)
+ pr_err_ratelimited("Failed sending atomic ack\n");
- rc = rxe_xmit_packet(qp, &ack_pkt, skb);
- if (rc) {
- pr_err_ratelimited("Failed sending ack\n");
- rxe_put(qp);
- }
+ /* have to clear this since it is used to trigger
+ * long read replies
+ */
+ qp->resp.res = NULL;
out:
- return rc;
+ return err;
}
static enum resp_states acknowledge(struct rxe_qp *qp,
@@ -1062,11 +1081,11 @@ static enum resp_states acknowledge(struct rxe_qp *qp,
return RESPST_CLEANUP;
if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
- send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
+ send_ack(qp, qp->resp.aeth_syndrome, pkt->psn);
else if (pkt->mask & RXE_ATOMIC_MASK)
- send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
+ send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
else if (bth_ack(pkt))
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
+ send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
return RESPST_CLEANUP;
}
@@ -1119,7 +1138,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
/* SEND. Ack again and cleanup. C9-105. */
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
+ send_ack(qp, AETH_ACK_UNLIMITED, prev_psn);
return RESPST_CLEANUP;
} else if (pkt->mask & RXE_READ_MASK) {
struct resp_res *res;
@@ -1173,14 +1192,11 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
/* Find the operation in our list of responder resources. */
res = find_resource(qp, pkt->psn);
if (res) {
- skb_get(res->atomic.skb);
- /* Resend the result. */
- rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
- if (rc) {
- pr_err("Failed resending result. This flow is not handled - skb ignored\n");
- rc = RESPST_CLEANUP;
- goto out;
- }
+ res->replay = 1;
+ res->cur_psn = pkt->psn;
+ qp->resp.res = res;
+ rc = RESPST_ATOMIC_REPLY;
+ goto out;
}
/* Resource not found. Class D error. Drop the request. */
@@ -1260,17 +1276,15 @@ int rxe_responder(void *arg)
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
enum resp_states state;
struct rxe_pkt_info *pkt = NULL;
- int ret = 0;
+ int ret;
if (!rxe_get(qp))
return -EAGAIN;
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
- if (!qp->valid) {
- ret = -EINVAL;
- goto done;
- }
+ if (!qp->valid)
+ goto exit;
switch (qp->resp.state) {
case QP_STATE_RESET:
@@ -1316,6 +1330,9 @@ int rxe_responder(void *arg)
case RESPST_READ_REPLY:
state = read_reply(qp, pkt);
break;
+ case RESPST_ATOMIC_REPLY:
+ state = atomic_reply(qp, pkt);
+ break;
case RESPST_ACKNOWLEDGE:
state = acknowledge(qp, pkt);
break;
@@ -1327,7 +1344,7 @@ int rxe_responder(void *arg)
break;
case RESPST_ERR_PSN_OUT_OF_SEQ:
/* RC only - Class B. Drop packet. */
- send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
+ send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
state = RESPST_CLEANUP;
break;
@@ -1349,7 +1366,7 @@ int rxe_responder(void *arg)
if (qp_type(qp) == IB_QPT_RC) {
rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
/* RC - class B */
- send_ack(qp, pkt, AETH_RNR_NAK |
+ send_ack(qp, AETH_RNR_NAK |
(~AETH_TYPE_MASK &
qp->attr.min_rnr_timer),
pkt->psn);
@@ -1438,7 +1455,7 @@ int rxe_responder(void *arg)
case RESPST_ERROR:
qp->resp.goto_error = 0;
- pr_warn("qp#%d moved to error state\n", qp_num(qp));
+ pr_debug("qp#%d moved to error state\n", qp_num(qp));
rxe_qp_error(qp);
goto exit;
@@ -1447,9 +1464,16 @@ int rxe_responder(void *arg)
}
}
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the tasklet. A zero return
+ * will continue looping and return to rxe_responder
+ */
+done:
+ ret = 0;
+ goto out;
exit:
ret = -EAGAIN;
-done:
+out:
rxe_put(qp);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 0c4db5bb17d7..2248cf33d776 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -8,7 +8,7 @@
#include <linux/interrupt.h>
#include <linux/hardirq.h>
-#include "rxe_task.h"
+#include "rxe.h"
int __rxe_do_task(struct rxe_task *task)
@@ -33,6 +33,7 @@ void rxe_do_task(struct tasklet_struct *t)
int cont;
int ret;
struct rxe_task *task = from_tasklet(task, t, tasklet);
+ unsigned int iterations = RXE_MAX_ITERATIONS;
spin_lock_bh(&task->state_lock);
switch (task->state) {
@@ -61,13 +62,20 @@ void rxe_do_task(struct tasklet_struct *t)
spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_BUSY:
- if (ret)
+ if (ret) {
task->state = TASK_STATE_START;
- else
+ } else if (iterations--) {
cont = 1;
+ } else {
+ /* reschedule the tasklet and exit
+ * the loop to give up the cpu
+ */
+ tasklet_schedule(&task->tasklet);
+ task->state = TASK_STATE_START;
+ }
break;
- /* soneone tried to run the task since the last time we called
+ /* someone tried to run the task since the last time we called
* func, so we will call one more time regardless of the
* return value
*/
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 9d995854a174..e264cf69bf55 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -115,7 +115,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
{
struct rxe_ucontext *uc = to_ruc(ibuc);
- rxe_put(uc);
+ rxe_cleanup(uc);
}
static int rxe_port_immutable(struct ib_device *dev, u32 port_num,
@@ -149,7 +149,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_pd *pd = to_rpd(ibpd);
- rxe_put(pd);
+ rxe_cleanup(pd);
return 0;
}
@@ -176,7 +176,8 @@ static int rxe_create_ah(struct ib_ah *ibah,
if (err)
return err;
- err = rxe_add_to_pool(&rxe->ah_pool, ah);
+ err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
+ init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
if (err)
return err;
@@ -188,7 +189,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = copy_to_user(&uresp->ah_num, &ah->ah_num,
sizeof(uresp->ah_num));
if (err) {
- rxe_put(ah);
+ rxe_cleanup(ah);
return -EFAULT;
}
} else if (ah->is_user) {
@@ -197,6 +198,8 @@ static int rxe_create_ah(struct ib_ah *ibah,
}
rxe_init_av(init_attr->ah_attr, &ah->av);
+ rxe_finalize(ah);
+
return 0;
}
@@ -228,7 +231,8 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
- rxe_put(ah);
+ rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
+
return 0;
}
@@ -308,12 +312,13 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err)
- goto err_put;
+ goto err_cleanup;
return 0;
-err_put:
- rxe_put(srq);
+err_cleanup:
+ rxe_cleanup(srq);
+
return err;
}
@@ -362,7 +367,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rxe_srq *srq = to_rsrq(ibsrq);
- rxe_put(srq);
+ rxe_cleanup(srq);
return 0;
}
@@ -429,10 +434,11 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (err)
goto qp_init;
+ rxe_finalize(qp);
return 0;
qp_init:
- rxe_put(qp);
+ rxe_cleanup(qp);
return err;
}
@@ -485,7 +491,7 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (ret)
return ret;
- rxe_put(qp);
+ rxe_cleanup(qp);
return 0;
}
@@ -803,7 +809,7 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
rxe_cq_disable(cq);
- rxe_put(cq);
+ rxe_cleanup(cq);
return 0;
}
@@ -898,6 +904,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_get(pd);
rxe_mr_init_dma(pd, access, mr);
+ rxe_finalize(mr);
return &mr->ibmr;
}
@@ -926,11 +933,13 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
if (err)
goto err3;
+ rxe_finalize(mr);
+
return &mr->ibmr;
err3:
rxe_put(pd);
- rxe_put(mr);
+ rxe_cleanup(mr);
err2:
return ERR_PTR(err);
}
@@ -958,35 +967,52 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
if (err)
goto err2;
+ rxe_finalize(mr);
+
return &mr->ibmr;
err2:
rxe_put(pd);
- rxe_put(mr);
+ rxe_cleanup(mr);
err1:
return ERR_PTR(err);
}
-/* build next_map_set from scatterlist
- * The IB_WR_REG_MR WR will swap map_sets
- */
+static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct rxe_mr *mr = to_rmr(ibmr);
+ struct rxe_map *map;
+ struct rxe_phys_buf *buf;
+
+ if (unlikely(mr->nbuf == mr->num_buf))
+ return -ENOMEM;
+
+ map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
+ buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
+
+ buf->addr = addr;
+ buf->size = ibmr->page_size;
+ mr->nbuf++;
+
+ return 0;
+}
+
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map_set *set = mr->next_map_set;
int n;
- set->nbuf = 0;
+ mr->nbuf = 0;
- n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_mr_set_page);
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
- set->va = ibmr->iova;
- set->iova = ibmr->iova;
- set->length = ibmr->length;
- set->page_shift = ilog2(ibmr->page_size);
- set->page_mask = ibmr->page_size - 1;
- set->offset = set->iova & set->page_mask;
+ mr->va = ibmr->iova;
+ mr->iova = ibmr->iova;
+ mr->length = ibmr->length;
+ mr->page_shift = ilog2(ibmr->page_size);
+ mr->page_mask = ibmr->page_size - 1;
+ mr->offset = mr->iova & mr->page_mask;
return n;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index ac464e68c923..96af3e054f4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -9,7 +9,6 @@
#include <linux/interrupt.h>
#include <linux/workqueue.h>
-#include <rdma/rdma_user_rxe.h>
#include "rxe_pool.h"
#include "rxe_task.h"
#include "rxe_hw_counters.h"
@@ -124,11 +123,13 @@ struct rxe_req_info {
int need_rd_atomic;
int wait_psn;
int need_retry;
+ int wait_for_rnr_timer;
int noack_pkts;
struct rxe_task task;
};
struct rxe_comp_info {
+ enum rxe_qp_state state;
u32 psn;
int opcode;
int timeout;
@@ -155,7 +156,7 @@ struct resp_res {
union {
struct {
- struct sk_buff *skb;
+ u64 orig_val;
} atomic;
struct {
u64 va_org;
@@ -189,7 +190,6 @@ struct rxe_resp_info {
u32 resid;
u32 rkey;
u32 length;
- u64 atomic_orig;
/* SRQ only */
struct {
@@ -288,17 +288,6 @@ struct rxe_map {
struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
};
-struct rxe_map_set {
- struct rxe_map **map;
- u64 va;
- u64 iova;
- size_t length;
- u32 offset;
- u32 nbuf;
- int page_shift;
- int page_mask;
-};
-
static inline int rkey_is_mw(u32 rkey)
{
u32 index = rkey >> 8;
@@ -316,20 +305,26 @@ struct rxe_mr {
u32 rkey;
enum rxe_mr_state state;
enum ib_mr_type type;
+ u64 va;
+ u64 iova;
+ size_t length;
+ u32 offset;
int access;
+ int page_shift;
+ int page_mask;
int map_shift;
int map_mask;
u32 num_buf;
+ u32 nbuf;
u32 max_buf;
u32 num_map;
atomic_t num_mw;
- struct rxe_map_set *cur_map_set;
- struct rxe_map_set *next_map_set;
+ struct rxe_map **map;
};
enum rxe_mw_state {
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 17f34d584cd9..f88d2971c2c6 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -725,11 +725,11 @@ static int siw_proc_mpareply(struct siw_cep *cep)
enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR;
rv = siw_recv_mpa_rr(cep);
- if (rv != -EAGAIN)
- siw_cancel_mpatimer(cep);
if (rv)
goto out_err;
+ siw_cancel_mpatimer(cep);
+
rep = &cep->mpa.hdr;
if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) {
@@ -895,7 +895,8 @@ static int siw_proc_mpareply(struct siw_cep *cep)
}
out_err:
- siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
+ if (rv != -EAGAIN)
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
return rv;
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 09316072b789..8dedae7ae79e 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -1167,7 +1167,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
err_out:
siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
- if (cq && cq->queue) {
+ if (cq->queue) {
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f7995519bbc8..ed25061fac62 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1109,7 +1109,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
* if he sets the device address back to be based on GID index 0,
* he no longer wishs to control it.
*
- * If the user doesn't control the the device address,
+ * If the user doesn't control the device address,
* IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
* the port GUID has changed and GID at index 0 has changed
* so we need to change priv->local_gid and priv->dev->dev_addr
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2a8961b685c2..a4904371e2db 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1664,8 +1664,10 @@ static void ipoib_napi_add(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC);
- netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE);
+ netif_napi_add_weight(dev, &priv->recv_napi, ipoib_rx_poll,
+ IPOIB_NUM_WC);
+ netif_napi_add_weight(dev, &priv->send_napi, ipoib_tx_poll,
+ MAX_SEND_CQE);
}
static void ipoib_napi_del(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 321949a570ed..620ae5b2d80d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -568,7 +568,7 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
iscsi_host_free(shost);
}
@@ -685,7 +685,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
return cls_session;
remove_host:
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
free_host:
iscsi_host_free(shost);
return NULL;
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index bd5f3b5e1727..7b83f48f60c5 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -537,6 +537,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
struct iscsi_hdr *hdr;
char *data;
int length;
+ bool full_feature_phase;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "login_rsp");
@@ -550,6 +551,9 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
hdr = desc->rsp + sizeof(struct iser_ctrl);
data = desc->rsp + ISER_HEADERS_LEN;
length = wc->byte_len - ISER_HEADERS_LEN;
+ full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
+ ISCSI_FULL_FEATURE_PHASE) &&
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL);
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, length);
@@ -560,7 +564,8 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
- if (iser_conn->iscsi_conn->session->discovery_sess)
+ if (!full_feature_phase ||
+ iser_conn->iscsi_conn->session->discovery_sess)
return;
/* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index c08f2d9133b6..a00ca117303a 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -246,6 +246,7 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
device = ib_conn->device;
ib_dev = device->ib_device;
+ /* +1 for drain */
if (ib_conn->pi_support)
max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
else
@@ -267,7 +268,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
init_attr.qp_context = (void *)ib_conn;
init_attr.send_cq = ib_conn->cq;
init_attr.recv_cq = ib_conn->cq;
- init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
+ /* +1 for drain */
+ init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS + 1;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -485,7 +487,7 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
iser_conn, err);
/* block until all flush errors are consumed */
- ib_drain_sq(ib_conn->qp);
+ ib_drain_qp(ib_conn->qp);
}
return 1;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index 385a19846c24..1e6ffafa2db3 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -32,11 +32,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
{
- struct rtrs_clt_stats_pcpu *s;
-
- s = get_cpu_ptr(stats->pcpu_stats);
- s->rdma.failover_cnt++;
- put_cpu_ptr(stats->pcpu_stats);
+ this_cpu_inc(stats->pcpu_stats->rdma.failover_cnt);
}
int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
@@ -169,12 +165,8 @@ int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
size_t size, int d)
{
- struct rtrs_clt_stats_pcpu *s;
-
- s = get_cpu_ptr(stats->pcpu_stats);
- s->rdma.dir[d].cnt++;
- s->rdma.dir[d].size_total += size;
- put_cpu_ptr(stats->pcpu_stats);
+ this_cpu_inc(stats->pcpu_stats->rdma.dir[d].cnt);
+ this_cpu_add(stats->pcpu_stats->rdma.dir[d].size_total, size);
}
void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 9809c3883979..baecde41d126 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -740,25 +740,25 @@ struct path_it {
struct rtrs_clt_path *(*next_path)(struct path_it *it);
};
-/**
- * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
+/*
+ * rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL
* @head: the head for the list.
- * @ptr: the list head to take the next element from.
- * @type: the type of the struct this is embedded in.
- * @memb: the name of the list_head within the struct.
+ * @clt_path: The element to take the next clt_path from.
*
- * Next element returned in round-robin fashion, i.e. head will be skipped,
+ * Next clt path returned in round-robin fashion, i.e. head will be skipped,
* but if list is observed as empty, NULL will be returned.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
+ * This function may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
-#define list_next_or_null_rr_rcu(head, ptr, type, memb) \
-({ \
- list_next_or_null_rcu(head, ptr, type, memb) ?: \
- list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
- type, memb); \
-})
+static inline struct rtrs_clt_path *
+rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path)
+{
+ return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?:
+ list_next_or_null_rcu(head,
+ READ_ONCE((&clt_path->s.entry)->next),
+ typeof(*clt_path), s.entry);
+}
/**
* get_next_path_rr() - Returns path in round-robin fashion.
@@ -789,10 +789,8 @@ static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
path = list_first_or_null_rcu(&clt->paths_list,
typeof(*path), s.entry);
else
- path = list_next_or_null_rr_rcu(&clt->paths_list,
- &path->s.entry,
- typeof(*path),
- s.entry);
+ path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path);
+
rcu_assign_pointer(*ppcpu_path, path);
return path;
@@ -1403,8 +1401,7 @@ static int alloc_permits(struct rtrs_clt_sess *clt)
unsigned int chunk_bits;
int err, i;
- clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
- sizeof(long), GFP_KERNEL);
+ clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL);
if (!clt->permits_map) {
err = -ENOMEM;
goto out_err;
@@ -1426,7 +1423,7 @@ static int alloc_permits(struct rtrs_clt_sess *clt)
return 0;
err_map:
- kfree(clt->permits_map);
+ bitmap_free(clt->permits_map);
clt->permits_map = NULL;
out_err:
return err;
@@ -1434,13 +1431,11 @@ out_err:
static void free_permits(struct rtrs_clt_sess *clt)
{
- if (clt->permits_map) {
- size_t sz = clt->queue_depth;
-
+ if (clt->permits_map)
wait_event(clt->permits_wait,
- find_first_bit(clt->permits_map, sz) >= sz);
- }
- kfree(clt->permits_map);
+ bitmap_empty(clt->permits_map, clt->queue_depth));
+
+ bitmap_free(clt->permits_map);
clt->permits_map = NULL;
kfree(clt->permits);
clt->permits = NULL;
@@ -2277,8 +2272,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
* removed. If @sess is the last element, then @next is NULL.
*/
rcu_read_lock();
- next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry,
- typeof(*next), s.entry);
+ next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path);
rcu_read_unlock();
/*
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 9a1e5c2ae55c..ac0df734eba8 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -23,6 +23,17 @@
#define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \
__stringify(RTRS_PROTO_VER_MINOR)
+/*
+ * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
+ * and the minimum chunk size is 4096 (2^12).
+ * So the maximum sess_queue_depth is 65536 (2^16) in theory.
+ * But mempool_create, create_qp and ib_post_send fail with
+ * "cannot allocate memory" error if sess_queue_depth is too big.
+ * Therefore the pratical max value of sess_queue_depth is
+ * somewhere between 1 and 65534 and it depends on the system.
+ */
+#define MAX_SESS_QUEUE_DEPTH 65535
+
enum rtrs_imm_const {
MAX_IMM_TYPE_BITS = 4,
MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1),
@@ -46,16 +57,6 @@ enum {
MAX_PATHS_NUM = 128,
- /*
- * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
- * and the minimum chunk size is 4096 (2^12).
- * So the maximum sess_queue_depth is 65536 (2^16) in theory.
- * But mempool_create, create_qp and ib_post_send fail with
- * "cannot allocate memory" error if sess_queue_depth is too big.
- * Therefore the pratical max value of sess_queue_depth is
- * somewhere between 1 and 65534 and it depends on the system.
- */
- MAX_SESS_QUEUE_DEPTH = 65535,
MIN_CHUNK_SIZE = 8192,
RTRS_HB_INTERVAL_MS = 5000,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
index 44b1c1652131..2aff1213a19d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -14,9 +14,14 @@
int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
{
if (enable) {
- struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+ int cpu;
+ struct rtrs_srv_stats_rdma_stats *r;
+
+ for_each_possible_cpu(cpu) {
+ r = per_cpu_ptr(stats->rdma_stats, cpu);
+ memset(r, 0, sizeof(*r));
+ }
- memset(r, 0, sizeof(*r));
return 0;
}
@@ -25,11 +30,22 @@ int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page)
{
- struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+ int cpu;
+ struct rtrs_srv_stats_rdma_stats sum;
+ struct rtrs_srv_stats_rdma_stats *r;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = per_cpu_ptr(stats->rdma_stats, cpu);
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ }
- return sysfs_emit(page, "%lld %lld %lld %lldn %u\n",
- (s64)atomic64_read(&r->dir[READ].cnt),
- (s64)atomic64_read(&r->dir[READ].size_total),
- (s64)atomic64_read(&r->dir[WRITE].cnt),
- (s64)atomic64_read(&r->dir[WRITE].size_total), 0);
+ return sysfs_emit(page, "%llu %llu %llu %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index b94ae12c2795..2a3c9ac64a42 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -220,6 +220,8 @@ static void rtrs_srv_path_stats_release(struct kobject *kobj)
stats = container_of(kobj, struct rtrs_srv_stats, kobj_stats);
+ free_percpu(stats->rdma_stats);
+
kfree(stats);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 24024bce2566..34c03bde5064 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -11,7 +11,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include <linux/module.h>
-#include <linux/mempool.h>
#include "rtrs-srv.h"
#include "rtrs-log.h"
@@ -26,11 +25,7 @@ MODULE_LICENSE("GPL");
#define DEFAULT_SESS_QUEUE_DEPTH 512
#define MAX_HDR_SIZE PAGE_SIZE
-/* We guarantee to serve 10 paths at least */
-#define CHUNK_POOL_SZ 10
-
static struct rtrs_rdma_dev_pd dev_pd;
-static mempool_t *chunk_pool;
struct class *rtrs_dev_class;
static struct rtrs_srv_ib_ctx ib_ctx;
@@ -1358,7 +1353,7 @@ static void free_srv(struct rtrs_srv_sess *srv)
WARN_ON(refcount_read(&srv->refcount));
for (i = 0; i < srv->queue_depth; i++)
- mempool_free(srv->chunks[i], chunk_pool);
+ __free_pages(srv->chunks[i], get_order(max_chunk_size));
kfree(srv->chunks);
mutex_destroy(&srv->paths_mutex);
mutex_destroy(&srv->paths_ev_mutex);
@@ -1411,7 +1406,8 @@ static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
goto err_free_srv;
for (i = 0; i < srv->queue_depth; i++) {
- srv->chunks[i] = mempool_alloc(chunk_pool, GFP_KERNEL);
+ srv->chunks[i] = alloc_pages(GFP_KERNEL,
+ get_order(max_chunk_size));
if (!srv->chunks[i])
goto err_free_chunks;
}
@@ -1424,7 +1420,7 @@ static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
err_free_chunks:
while (i--)
- mempool_free(srv->chunks[i], chunk_pool);
+ __free_pages(srv->chunks[i], get_order(max_chunk_size));
kfree(srv->chunks);
err_free_srv:
@@ -1513,6 +1509,7 @@ static void free_path(struct rtrs_srv_path *srv_path)
kobject_del(&srv_path->kobj);
kobject_put(&srv_path->kobj);
} else {
+ free_percpu(srv_path->stats->rdma_stats);
kfree(srv_path->stats);
kfree(srv_path);
}
@@ -1755,13 +1752,17 @@ static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
if (!srv_path->stats)
goto err_free_sess;
+ srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats);
+ if (!srv_path->stats->rdma_stats)
+ goto err_free_stats;
+
srv_path->stats->srv_path = srv_path;
srv_path->dma_addr = kcalloc(srv->queue_depth,
sizeof(*srv_path->dma_addr),
GFP_KERNEL);
if (!srv_path->dma_addr)
- goto err_free_stats;
+ goto err_free_percpu;
srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
GFP_KERNEL);
@@ -1813,6 +1814,8 @@ err_free_con:
kfree(srv_path->s.con);
err_free_dma_addr:
kfree(srv_path->dma_addr);
+err_free_percpu:
+ free_percpu(srv_path->stats->rdma_stats);
err_free_stats:
kfree(srv_path->stats);
err_free_sess:
@@ -2266,14 +2269,10 @@ static int __init rtrs_server_init(void)
err);
return err;
}
- chunk_pool = mempool_create_page_pool(sess_queue_depth * CHUNK_POOL_SZ,
- get_order(max_chunk_size));
- if (!chunk_pool)
- return -ENOMEM;
rtrs_dev_class = class_create(THIS_MODULE, "rtrs-server");
if (IS_ERR(rtrs_dev_class)) {
err = PTR_ERR(rtrs_dev_class);
- goto out_chunk_pool;
+ goto out_err;
}
rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
if (!rtrs_wq) {
@@ -2285,9 +2284,7 @@ static int __init rtrs_server_init(void)
out_dev_class:
class_destroy(rtrs_dev_class);
-out_chunk_pool:
- mempool_destroy(chunk_pool);
-
+out_err:
return err;
}
@@ -2295,7 +2292,6 @@ static void __exit rtrs_server_exit(void)
{
destroy_workqueue(rtrs_wq);
class_destroy(rtrs_dev_class);
- mempool_destroy(chunk_pool);
rtrs_rdma_dev_pd_deinit(&dev_pd);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 6292e87f6afd..186a63c217df 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/refcount.h>
+#include <linux/percpu.h>
#include "rtrs-pri.h"
/*
@@ -29,15 +30,15 @@ enum rtrs_srv_state {
*/
struct rtrs_srv_stats_rdma_stats {
struct {
- atomic64_t cnt;
- atomic64_t size_total;
+ u64 cnt;
+ u64 size_total;
} dir[2];
};
struct rtrs_srv_stats {
- struct kobject kobj_stats;
- struct rtrs_srv_stats_rdma_stats rdma_stats;
- struct rtrs_srv_path *srv_path;
+ struct kobject kobj_stats;
+ struct rtrs_srv_stats_rdma_stats __percpu *rdma_stats;
+ struct rtrs_srv_path *srv_path;
};
struct rtrs_srv_con {
@@ -130,8 +131,8 @@ void close_path(struct rtrs_srv_path *srv_path);
static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
size_t size, int d)
{
- atomic64_inc(&s->rdma_stats.dir[d].cnt);
- atomic64_add(size, &s->rdma_stats.dir[d].size_total);
+ this_cpu_inc(s->rdma_stats->dir[d].cnt);
+ this_cpu_add(s->rdma_stats->dir[d].size_total, size);
}
/* functions which are implemented in rtrs-srv-stats.c */
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index f86ee1c4b970..21cbe30d526f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -565,12 +565,9 @@ static int srpt_refresh_port(struct srpt_port *sport)
if (ret)
return ret;
- sport->port_guid_id.wwn.priv = sport;
- srpt_format_guid(sport->port_guid_id.name,
- sizeof(sport->port_guid_id.name),
+ srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
&sport->gid.global.interface_id);
- sport->port_gid_id.wwn.priv = sport;
- snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name),
+ snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
@@ -2221,13 +2218,13 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
ch->sport = sport;
- if (ib_cm_id) {
- ch->ib_cm.cm_id = ib_cm_id;
- ib_cm_id->context = ch;
- } else {
+ if (rdma_cm_id) {
ch->using_rdma_cm = true;
ch->rdma_cm.cm_id = rdma_cm_id;
rdma_cm_id->context = ch;
+ } else {
+ ch->ib_cm.cm_id = ib_cm_id;
+ ib_cm_id->context = ch;
}
/*
* ch->rq_size should be at least as large as the initiator queue
@@ -2314,31 +2311,35 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
tag_num = ch->rq_size;
tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
- mutex_lock(&sport->port_guid_id.mutex);
- list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) {
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (sport->guid_id) {
+ mutex_lock(&sport->guid_id->mutex);
+ list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
+ }
+ mutex_unlock(&sport->guid_id->mutex);
}
- mutex_unlock(&sport->port_guid_id.mutex);
- mutex_lock(&sport->port_gid_id.mutex);
- list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) {
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (sport->gid_id) {
+ mutex_lock(&sport->gid_id->mutex);
+ list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL, i_port_id,
ch, NULL);
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- /* Retry without leading "0x" */
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ /* Retry without leading "0x" */
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
+ }
+ mutex_unlock(&sport->gid_id->mutex);
}
- mutex_unlock(&sport->port_gid_id.mutex);
if (IS_ERR_OR_NULL(ch->sess)) {
WARN_ON_ONCE(ch->sess == NULL);
@@ -2983,7 +2984,12 @@ static int srpt_release_sport(struct srpt_port *sport)
return 0;
}
-static struct se_wwn *__srpt_lookup_wwn(const char *name)
+struct port_and_port_id {
+ struct srpt_port *sport;
+ struct srpt_port_id **port_id;
+};
+
+static struct port_and_port_id __srpt_lookup_port(const char *name)
{
struct ib_device *dev;
struct srpt_device *sdev;
@@ -2998,25 +3004,38 @@ static struct se_wwn *__srpt_lookup_wwn(const char *name)
for (i = 0; i < dev->phys_port_cnt; i++) {
sport = &sdev->port[i];
- if (strcmp(sport->port_guid_id.name, name) == 0)
- return &sport->port_guid_id.wwn;
- if (strcmp(sport->port_gid_id.name, name) == 0)
- return &sport->port_gid_id.wwn;
+ if (strcmp(sport->guid_name, name) == 0) {
+ kref_get(&sdev->refcnt);
+ return (struct port_and_port_id){
+ sport, &sport->guid_id};
+ }
+ if (strcmp(sport->gid_name, name) == 0) {
+ kref_get(&sdev->refcnt);
+ return (struct port_and_port_id){
+ sport, &sport->gid_id};
+ }
}
}
- return NULL;
+ return (struct port_and_port_id){};
}
-static struct se_wwn *srpt_lookup_wwn(const char *name)
+/**
+ * srpt_lookup_port() - Look up an RDMA port by name
+ * @name: ASCII port name
+ *
+ * Increments the RDMA port reference count if an RDMA port pointer is returned.
+ * The caller must drop that reference count by calling srpt_port_put_ref().
+ */
+static struct port_and_port_id srpt_lookup_port(const char *name)
{
- struct se_wwn *wwn;
+ struct port_and_port_id papi;
spin_lock(&srpt_dev_lock);
- wwn = __srpt_lookup_wwn(name);
+ papi = __srpt_lookup_port(name);
spin_unlock(&srpt_dev_lock);
- return wwn;
+ return papi;
}
static void srpt_free_srq(struct srpt_device *sdev)
@@ -3101,6 +3120,18 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
return ret;
}
+static void srpt_free_sdev(struct kref *refcnt)
+{
+ struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt);
+
+ kfree(sdev);
+}
+
+static void srpt_sdev_put(struct srpt_device *sdev)
+{
+ kref_put(&sdev->refcnt, srpt_free_sdev);
+}
+
/**
* srpt_add_one - InfiniBand device addition callback function
* @device: Describes a HCA.
@@ -3119,6 +3150,7 @@ static int srpt_add_one(struct ib_device *device)
if (!sdev)
return -ENOMEM;
+ kref_init(&sdev->refcnt);
sdev->device = device;
mutex_init(&sdev->sdev_mutex);
@@ -3182,10 +3214,6 @@ static int srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
- mutex_init(&sport->port_guid_id.mutex);
- INIT_LIST_HEAD(&sport->port_guid_id.tpg_list);
- mutex_init(&sport->port_gid_id.mutex);
- INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
ret = srpt_refresh_port(sport);
if (ret) {
@@ -3214,7 +3242,7 @@ err_ring:
srpt_free_srq(sdev);
ib_dealloc_pd(sdev->pd);
free_dev:
- kfree(sdev);
+ srpt_sdev_put(sdev);
pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
return ret;
}
@@ -3258,7 +3286,7 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
ib_dealloc_pd(sdev->pd);
- kfree(sdev);
+ srpt_sdev_put(sdev);
}
static struct ib_client srpt_client = {
@@ -3286,10 +3314,10 @@ static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
{
struct srpt_port *sport = wwn->priv;
- if (wwn == &sport->port_guid_id.wwn)
- return &sport->port_guid_id;
- if (wwn == &sport->port_gid_id.wwn)
- return &sport->port_gid_id;
+ if (sport->guid_id && &sport->guid_id->wwn == wwn)
+ return sport->guid_id;
+ if (sport->gid_id && &sport->gid_id->wwn == wwn)
+ return sport->gid_id;
WARN_ON_ONCE(true);
return NULL;
}
@@ -3774,7 +3802,31 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
- return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
+ struct port_and_port_id papi = srpt_lookup_port(name);
+ struct srpt_port *sport = papi.sport;
+ struct srpt_port_id *port_id;
+
+ if (!papi.port_id)
+ return ERR_PTR(-EINVAL);
+ if (*papi.port_id) {
+ /* Attempt to create a directory that already exists. */
+ WARN_ON_ONCE(true);
+ return &(*papi.port_id)->wwn;
+ }
+ port_id = kzalloc(sizeof(*port_id), GFP_KERNEL);
+ if (!port_id) {
+ srpt_sdev_put(sport->sdev);
+ return ERR_PTR(-ENOMEM);
+ }
+ mutex_init(&port_id->mutex);
+ INIT_LIST_HEAD(&port_id->tpg_list);
+ port_id->wwn.priv = sport;
+ memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
+ sport->gid_name, ARRAY_SIZE(port_id->name));
+
+ *papi.port_id = port_id;
+
+ return &port_id->wwn;
}
/**
@@ -3783,6 +3835,18 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
*/
static void srpt_drop_tport(struct se_wwn *wwn)
{
+ struct srpt_port_id *port_id = container_of(wwn, typeof(*port_id), wwn);
+ struct srpt_port *sport = wwn->priv;
+
+ if (sport->guid_id == port_id)
+ sport->guid_id = NULL;
+ else if (sport->gid_id == port_id)
+ sport->gid_id = NULL;
+ else
+ WARN_ON_ONCE(true);
+
+ srpt_sdev_put(sport->sdev);
+ kfree(port_id);
}
static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 76e66f630c17..4c46b301eea1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -376,7 +376,7 @@ struct srpt_tpg {
};
/**
- * struct srpt_port_id - information about an RDMA port name
+ * struct srpt_port_id - LIO RDMA port information
* @mutex: Protects @tpg_list changes.
* @tpg_list: TPGs associated with the RDMA port name.
* @wwn: WWN associated with the RDMA port name.
@@ -393,7 +393,7 @@ struct srpt_port_id {
};
/**
- * struct srpt_port - information associated by SRPT with a single IB port
+ * struct srpt_port - SRPT RDMA port information
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
@@ -402,8 +402,10 @@ struct srpt_port_id {
* @lid: cached value of the port's lid.
* @gid: cached value of the port's gid.
* @work: work structure for refreshing the aforementioned cached values.
- * @port_guid_id: target port GUID
- * @port_gid_id: target port GID
+ * @guid_name: port name in GUID format.
+ * @guid_id: LIO target port information for the port name in GUID format.
+ * @gid_name: port name in GID format.
+ * @gid_id: LIO target port information for the port name in GID format.
* @port_attrib: Port attributes that can be accessed through configfs.
* @refcount: Number of objects associated with this port.
* @freed_channels: Completion that will be signaled once @refcount becomes 0.
@@ -419,8 +421,10 @@ struct srpt_port {
u32 lid;
union ib_gid gid;
struct work_struct work;
- struct srpt_port_id port_guid_id;
- struct srpt_port_id port_gid_id;
+ char guid_name[64];
+ struct srpt_port_id *guid_id;
+ char gid_name[64];
+ struct srpt_port_id *gid_id;
struct srpt_port_attrib port_attrib;
atomic_t refcount;
struct completion *freed_channels;
@@ -430,6 +434,7 @@ struct srpt_port {
/**
* struct srpt_device - information associated by SRPT with a single HCA
+ * @refcnt: Reference count for this device.
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
* @lkey: L_Key (local key) with write access to all local memory.
@@ -445,6 +450,7 @@ struct srpt_port {
* @port: Information about the ports owned by this HCA.
*/
struct srpt_device {
+ struct kref refcnt;
struct ib_device *device;
struct ib_pd *pd;
u32 lkey;
diff --git a/drivers/input/input-core-private.h b/drivers/input/input-core-private.h
new file mode 100644
index 000000000000..116834cf8868
--- /dev/null
+++ b/drivers/input/input-core-private.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _INPUT_CORE_PRIVATE_H
+#define _INPUT_CORE_PRIVATE_H
+
+/*
+ * Functions and definitions that are private to input core,
+ * should not be used by input drivers or handlers.
+ */
+
+struct input_dev;
+
+void input_mt_release_slots(struct input_dev *dev);
+void input_handle_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value);
+
+#endif /* _INPUT_CORE_PRIVATE_H */
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 44fe6f2f063c..14b53dac1253 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -8,6 +8,7 @@
#include <linux/input/mt.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include "input-core-private.h"
#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
@@ -259,10 +260,13 @@ static void __input_mt_drop_unused(struct input_dev *dev, struct input_mt *mt)
{
int i;
+ lockdep_assert_held(&dev->event_lock);
+
for (i = 0; i < mt->num_slots; i++) {
- if (!input_mt_is_used(mt, &mt->slots[i])) {
- input_mt_slot(dev, i);
- input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
+ if (input_mt_is_active(&mt->slots[i]) &&
+ !input_mt_is_used(mt, &mt->slots[i])) {
+ input_handle_event(dev, EV_ABS, ABS_MT_SLOT, i);
+ input_handle_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
}
}
}
@@ -278,13 +282,44 @@ void input_mt_drop_unused(struct input_dev *dev)
struct input_mt *mt = dev->mt;
if (mt) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
__input_mt_drop_unused(dev, mt);
mt->frame++;
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_mt_drop_unused);
/**
+ * input_mt_release_slots() - Deactivate all slots
+ * @dev: input device with allocated MT slots
+ *
+ * Lift all active slots.
+ */
+void input_mt_release_slots(struct input_dev *dev)
+{
+ struct input_mt *mt = dev->mt;
+
+ lockdep_assert_held(&dev->event_lock);
+
+ if (mt) {
+ /* This will effectively mark all slots unused. */
+ mt->frame++;
+
+ __input_mt_drop_unused(dev, mt);
+
+ if (test_bit(ABS_PRESSURE, dev->absbit))
+ input_handle_event(dev, EV_ABS, ABS_PRESSURE, 0);
+
+ mt->frame++;
+ }
+}
+
+/**
* input_mt_sync_frame() - synchronize mt frame
* @dev: input device with allocated MT slots
*
@@ -300,8 +335,13 @@ void input_mt_sync_frame(struct input_dev *dev)
if (!mt)
return;
- if (mt->flags & INPUT_MT_DROP_UNUSED)
+ if (mt->flags & INPUT_MT_DROP_UNUSED) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
__input_mt_drop_unused(dev, mt);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
use_count = true;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1365c9dfb5f2..ebb2b7f0f8ff 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include "input-compat.h"
+#include "input-core-private.h"
#include "input-poller.h"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
@@ -142,6 +143,8 @@ static void input_pass_values(struct input_dev *dev,
struct input_handle *handle;
struct input_value *v;
+ lockdep_assert_held(&dev->event_lock);
+
if (!count)
return;
@@ -174,44 +177,6 @@ static void input_pass_values(struct input_dev *dev,
}
}
-static void input_pass_event(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
-{
- struct input_value vals[] = { { type, code, value } };
-
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
-}
-
-/*
- * Generate software autorepeat event. Note that we take
- * dev->event_lock here to avoid racing with input_event
- * which may cause keys get "stuck".
- */
-static void input_repeat_key(struct timer_list *t)
-{
- struct input_dev *dev = from_timer(dev, t, timer);
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- if (test_bit(dev->repeat_key, dev->key) &&
- is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
- struct input_value vals[] = {
- { EV_KEY, dev->repeat_key, 2 },
- input_value_sync
- };
-
- input_set_timestamp(dev, ktime_get());
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
-
- if (dev->rep[REP_PERIOD])
- mod_timer(&dev->timer, jiffies +
- msecs_to_jiffies(dev->rep[REP_PERIOD]));
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
#define INPUT_IGNORE_EVENT 0
#define INPUT_PASS_TO_HANDLERS 1
#define INPUT_PASS_TO_DEVICE 2
@@ -275,6 +240,10 @@ static int input_get_disposition(struct input_dev *dev,
int disposition = INPUT_IGNORE_EVENT;
int value = *pval;
+ /* filter-out events from inhibited devices */
+ if (dev->inhibited)
+ return INPUT_IGNORE_EVENT;
+
switch (type) {
case EV_SYN:
@@ -375,19 +344,9 @@ static int input_get_disposition(struct input_dev *dev,
return disposition;
}
-static void input_handle_event(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
+static void input_event_dispose(struct input_dev *dev, int disposition,
+ unsigned int type, unsigned int code, int value)
{
- int disposition;
-
- /* filter-out events from inhibited devices */
- if (dev->inhibited)
- return;
-
- disposition = input_get_disposition(dev, type, code, &value);
- if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN)
- add_input_randomness(type, code, value);
-
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);
@@ -426,7 +385,22 @@ static void input_handle_event(struct input_dev *dev,
input_pass_values(dev, dev->vals, dev->num_vals);
dev->num_vals = 0;
}
+}
+void input_handle_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value)
+{
+ int disposition;
+
+ lockdep_assert_held(&dev->event_lock);
+
+ disposition = input_get_disposition(dev, type, code, &value);
+ if (disposition != INPUT_IGNORE_EVENT) {
+ if (type != EV_SYN)
+ add_input_randomness(type, code, value);
+
+ input_event_dispose(dev, disposition, type, code, value);
+ }
}
/**
@@ -613,7 +587,7 @@ static void __input_release_device(struct input_handle *handle)
lockdep_is_held(&dev->mutex));
if (grabber == handle) {
rcu_assign_pointer(dev->grab, NULL);
- /* Make sure input_pass_event() notices that grab is gone */
+ /* Make sure input_pass_values() notices that grab is gone */
synchronize_rcu();
list_for_each_entry(handle, &dev->h_list, d_node)
@@ -736,7 +710,7 @@ void input_close_device(struct input_handle *handle)
if (!--handle->open) {
/*
- * synchronize_rcu() makes sure that input_pass_event()
+ * synchronize_rcu() makes sure that input_pass_values()
* completed and that no more input events are delivered
* through this handle
*/
@@ -751,22 +725,21 @@ EXPORT_SYMBOL(input_close_device);
* Simulate keyup events for all keys that are marked as pressed.
* The function must be called with dev->event_lock held.
*/
-static void input_dev_release_keys(struct input_dev *dev)
+static bool input_dev_release_keys(struct input_dev *dev)
{
bool need_sync = false;
int code;
+ lockdep_assert_held(&dev->event_lock);
+
if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
for_each_set_bit(code, dev->key, KEY_CNT) {
- input_pass_event(dev, EV_KEY, code, 0);
+ input_handle_event(dev, EV_KEY, code, 0);
need_sync = true;
}
-
- if (need_sync)
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
-
- memset(dev->key, 0, sizeof(dev->key));
}
+
+ return need_sync;
}
/*
@@ -793,7 +766,8 @@ static void input_disconnect_device(struct input_dev *dev)
* generate events even after we done here but they will not
* reach any handlers.
*/
- input_dev_release_keys(dev);
+ if (input_dev_release_keys(dev))
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
list_for_each_entry(handle, &dev->h_list, d_node)
handle->open = 0;
@@ -1004,12 +978,16 @@ int input_set_keycode(struct input_dev *dev,
} else if (test_bit(EV_KEY, dev->evbit) &&
!is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(old_keycode, dev->key)) {
- struct input_value vals[] = {
- { EV_KEY, old_keycode, 0 },
- input_value_sync
- };
-
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
+ /*
+ * We have to use input_event_dispose() here directly instead
+ * of input_handle_event() because the key we want to release
+ * here is considered no longer supported by the device and
+ * input_handle_event() will ignore it.
+ */
+ input_event_dispose(dev, INPUT_PASS_TO_HANDLERS,
+ EV_KEY, old_keycode, 0);
+ input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH,
+ EV_SYN, SYN_REPORT, 1);
}
out:
@@ -1784,7 +1762,8 @@ void input_reset_device(struct input_dev *dev)
spin_lock_irqsave(&dev->event_lock, flags);
input_dev_toggle(dev, true);
- input_dev_release_keys(dev);
+ if (input_dev_release_keys(dev))
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
spin_unlock_irqrestore(&dev->event_lock, flags);
mutex_unlock(&dev->mutex);
@@ -1806,7 +1785,9 @@ static int input_inhibit_device(struct input_dev *dev)
}
spin_lock_irq(&dev->event_lock);
+ input_mt_release_slots(dev);
input_dev_release_keys(dev);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
input_dev_toggle(dev, false);
spin_unlock_irq(&dev->event_lock);
@@ -1857,7 +1838,8 @@ static int input_dev_suspend(struct device *dev)
* Keys that are pressed now are unlikely to be
* still pressed when we resume.
*/
- input_dev_release_keys(input_dev);
+ if (input_dev_release_keys(input_dev))
+ input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
@@ -1891,7 +1873,8 @@ static int input_dev_freeze(struct device *dev)
* Keys that are pressed now are unlikely to be
* still pressed when we resume.
*/
- input_dev_release_keys(input_dev);
+ if (input_dev_release_keys(input_dev))
+ input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
spin_unlock_irq(&input_dev->event_lock);
@@ -2259,6 +2242,34 @@ static void devm_input_device_unregister(struct device *dev, void *res)
__input_unregister_device(input);
}
+/*
+ * Generate software autorepeat event. Note that we take
+ * dev->event_lock here to avoid racing with input_event
+ * which may cause keys get "stuck".
+ */
+static void input_repeat_key(struct timer_list *t)
+{
+ struct input_dev *dev = from_timer(dev, t, timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (!dev->inhibited &&
+ test_bit(dev->repeat_key, dev->key) &&
+ is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
+
+ input_set_timestamp(dev, ktime_get());
+ input_handle_event(dev, EV_KEY, dev->repeat_key, 2);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
+
+ if (dev->rep[REP_PERIOD])
+ mod_timer(&dev->timer, jiffies +
+ msecs_to_jiffies(dev->rep[REP_PERIOD]));
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
/**
* input_enable_softrepeat - enable software autorepeat
* @dev: input device
diff --git a/drivers/input/joystick/adc-joystick.c b/drivers/input/joystick/adc-joystick.c
index 78ebca7d400a..e0cfdc84763f 100644
--- a/drivers/input/joystick/adc-joystick.c
+++ b/drivers/input/joystick/adc-joystick.c
@@ -222,13 +222,6 @@ static int adc_joystick_probe(struct platform_device *pdev)
if (error)
return error;
- input_set_drvdata(input, joy);
- error = input_register_device(input);
- if (error) {
- dev_err(dev, "Unable to register input device\n");
- return error;
- }
-
joy->buffer = iio_channel_get_all_cb(dev, adc_joystick_handle, joy);
if (IS_ERR(joy->buffer)) {
dev_err(dev, "Unable to allocate callback buffer\n");
@@ -241,6 +234,14 @@ static int adc_joystick_probe(struct platform_device *pdev)
return error;
}
+ input_set_drvdata(input, joy);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "Unable to register input device\n");
+ return error;
+ }
+
return 0;
}
diff --git a/drivers/input/joystick/sensehat-joystick.c b/drivers/input/joystick/sensehat-joystick.c
index 5ad1fe4ff496..a84df39d3b2f 100644
--- a/drivers/input/joystick/sensehat-joystick.c
+++ b/drivers/input/joystick/sensehat-joystick.c
@@ -98,10 +98,8 @@ static int sensehat_joystick_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Could not retrieve interrupt request");
+ if (irq < 0)
return irq;
- }
error = devm_request_threaded_irq(&pdev->dev, irq,
NULL, sensehat_joystick_report,
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4ea79db8f134..a20ee693b22b 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -795,7 +795,7 @@ config KEYBOARD_MT6779
config KEYBOARD_MTK_PMIC
tristate "MediaTek PMIC keys support"
- depends on MFD_MT6397
+ depends on MFD_MT6397 || COMPILE_TEST
help
Say Y here if you want to use the pmic keys (powerkey/homekey).
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 1592da4de336..1a1a05d7cd42 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -8,17 +8,19 @@
* Copyright (C) 2008-2010 Analog Devices Inc.
*/
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/pm.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/i2c.h>
-#include <linux/gpio/driver.h>
+#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/timekeeping.h>
#include <linux/platform_data/adp5588.h>
@@ -36,18 +38,18 @@
* asserted.
*/
#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
+#define WA_DELAYED_READOUT_TIME 25
struct adp5588_kpad {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work work;
+ ktime_t irq_time;
unsigned long delay;
unsigned short keycode[ADP5588_KEYMAPSIZE];
const struct adp5588_gpi_map *gpimap;
unsigned short gpimapsize;
#ifdef CONFIG_GPIOLIB
unsigned char gpiomap[ADP5588_MAXGPIO];
- bool export_gpio;
struct gpio_chip gc;
struct mutex gpio_lock; /* Protect cached dir, dat_out */
u8 dat_out[3];
@@ -179,6 +181,21 @@ static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
return n_unused;
}
+static void adp5588_gpio_do_teardown(void *_kpad)
+{
+ struct adp5588_kpad *kpad = _kpad;
+ struct device *dev = &kpad->client->dev;
+ const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
+ const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
+ int error;
+
+ error = gpio_data->teardown(kpad->client,
+ kpad->gc.base, kpad->gc.ngpio,
+ gpio_data->context);
+ if (error)
+ dev_warn(&kpad->client->dev, "teardown failed %d\n", error);
+}
+
static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
@@ -195,8 +212,6 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
return 0;
}
- kpad->export_gpio = true;
-
kpad->gc.direction_input = adp5588_gpio_direction_input;
kpad->gc.direction_output = adp5588_gpio_direction_output;
kpad->gc.get = adp5588_gpio_get_value;
@@ -210,9 +225,9 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
mutex_init(&kpad->gpio_lock);
- error = gpiochip_add_data(&kpad->gc, kpad);
+ error = devm_gpiochip_add_data(dev, &kpad->gc, kpad);
if (error) {
- dev_err(dev, "gpiochip_add failed, err: %d\n", error);
+ dev_err(dev, "gpiochip_add failed: %d\n", error);
return error;
}
@@ -227,41 +242,24 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->gc.base, kpad->gc.ngpio,
gpio_data->context);
if (error)
- dev_warn(dev, "setup failed, %d\n", error);
+ dev_warn(dev, "setup failed: %d\n", error);
}
- return 0;
-}
-
-static void adp5588_gpio_remove(struct adp5588_kpad *kpad)
-{
- struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
- const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
- int error;
-
- if (!kpad->export_gpio)
- return;
-
if (gpio_data->teardown) {
- error = gpio_data->teardown(kpad->client,
- kpad->gc.base, kpad->gc.ngpio,
- gpio_data->context);
+ error = devm_add_action(dev, adp5588_gpio_do_teardown, kpad);
if (error)
- dev_warn(dev, "teardown failed %d\n", error);
+ dev_warn(dev, "failed to schedule teardown: %d\n",
+ error);
}
- gpiochip_remove(&kpad->gc);
+ return 0;
}
+
#else
static inline int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
return 0;
}
-
-static inline void adp5588_gpio_remove(struct adp5588_kpad *kpad)
-{
-}
#endif
static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt)
@@ -289,13 +287,36 @@ static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt)
}
}
-static void adp5588_work(struct work_struct *work)
+static irqreturn_t adp5588_hard_irq(int irq, void *handle)
{
- struct adp5588_kpad *kpad = container_of(work,
- struct adp5588_kpad, work.work);
+ struct adp5588_kpad *kpad = handle;
+
+ kpad->irq_time = ktime_get();
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t adp5588_thread_irq(int irq, void *handle)
+{
+ struct adp5588_kpad *kpad = handle;
struct i2c_client *client = kpad->client;
+ ktime_t target_time, now;
+ unsigned long delay;
int status, ev_cnt;
+ /*
+ * Readout needs to wait for at least 25ms after the notification
+ * for REVID < 4.
+ */
+ if (kpad->delay) {
+ target_time = ktime_add_ms(kpad->irq_time, kpad->delay);
+ now = ktime_get();
+ if (ktime_before(now, target_time)) {
+ delay = ktime_to_us(ktime_sub(target_time, now));
+ usleep_range(delay, delay + 1000);
+ }
+ }
+
status = adp5588_read(client, INT_STAT);
if (status & ADP5588_OVR_FLOW_INT) /* Unlikely and should never happen */
@@ -308,20 +329,8 @@ static void adp5588_work(struct work_struct *work)
input_sync(kpad->input);
}
}
- adp5588_write(client, INT_STAT, status); /* Status is W1C */
-}
-
-static irqreturn_t adp5588_irq(int irq, void *handle)
-{
- struct adp5588_kpad *kpad = handle;
- /*
- * use keventd context to read the event fifo registers
- * Schedule readout at least 25ms after notification for
- * REVID < 4
- */
-
- schedule_delayed_work(&kpad->work, kpad->delay);
+ adp5588_write(client, INT_STAT, status); /* Status is W1C */
return IRQ_HANDLED;
}
@@ -496,30 +505,27 @@ static int adp5588_probe(struct i2c_client *client,
return -EINVAL;
}
- kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
- input = input_allocate_device();
- if (!kpad || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ kpad = devm_kzalloc(&client->dev, sizeof(*kpad), GFP_KERNEL);
+ if (!kpad)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
kpad->client = client;
kpad->input = input;
- INIT_DELAYED_WORK(&kpad->work, adp5588_work);
ret = adp5588_read(client, DEV_ID);
- if (ret < 0) {
- error = ret;
- goto err_free_mem;
- }
+ if (ret < 0)
+ return ret;
revid = (u8) ret & ADP5588_DEVICE_ID_MASK;
if (WA_DELAYED_READOUT_REVID(revid))
- kpad->delay = msecs_to_jiffies(30);
+ kpad->delay = msecs_to_jiffies(WA_DELAYED_READOUT_TIME);
input->name = client->name;
input->phys = "adp5588-keys/input0";
- input->dev.parent = &client->dev;
input_set_drvdata(input, kpad);
@@ -556,95 +562,63 @@ static int adp5588_probe(struct i2c_client *client,
error = input_register_device(input);
if (error) {
- dev_err(&client->dev, "unable to register input device\n");
- goto err_free_mem;
+ dev_err(&client->dev, "unable to register input device: %d\n",
+ error);
+ return error;
}
- error = request_irq(client->irq, adp5588_irq,
- IRQF_TRIGGER_FALLING,
- client->dev.driver->name, kpad);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ adp5588_hard_irq, adp5588_thread_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, kpad);
if (error) {
- dev_err(&client->dev, "irq %d busy?\n", client->irq);
- goto err_unreg_dev;
+ dev_err(&client->dev, "failed to request irq %d: %d\n",
+ client->irq, error);
+ return error;
}
error = adp5588_setup(client);
if (error)
- goto err_free_irq;
+ return error;
if (kpad->gpimapsize)
adp5588_report_switch_state(kpad);
error = adp5588_gpio_add(kpad);
if (error)
- goto err_free_irq;
-
- device_init_wakeup(&client->dev, 1);
- i2c_set_clientdata(client, kpad);
+ return error;
dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
return 0;
-
- err_free_irq:
- free_irq(client->irq, kpad);
- cancel_delayed_work_sync(&kpad->work);
- err_unreg_dev:
- input_unregister_device(input);
- input = NULL;
- err_free_mem:
- input_free_device(input);
- kfree(kpad);
-
- return error;
}
static int adp5588_remove(struct i2c_client *client)
{
- struct adp5588_kpad *kpad = i2c_get_clientdata(client);
-
adp5588_write(client, CFG, 0);
- free_irq(client->irq, kpad);
- cancel_delayed_work_sync(&kpad->work);
- input_unregister_device(kpad->input);
- adp5588_gpio_remove(kpad);
- kfree(kpad);
+ /* all resources will be freed by devm */
return 0;
}
-#ifdef CONFIG_PM
-static int adp5588_suspend(struct device *dev)
+static int __maybe_unused adp5588_suspend(struct device *dev)
{
- struct adp5588_kpad *kpad = dev_get_drvdata(dev);
- struct i2c_client *client = kpad->client;
+ struct i2c_client *client = to_i2c_client(dev);
disable_irq(client->irq);
- cancel_delayed_work_sync(&kpad->work);
-
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(client->irq);
return 0;
}
-static int adp5588_resume(struct device *dev)
+static int __maybe_unused adp5588_resume(struct device *dev)
{
- struct adp5588_kpad *kpad = dev_get_drvdata(dev);
- struct i2c_client *client = kpad->client;
-
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(client->irq);
+ struct i2c_client *client = to_i2c_client(dev);
enable_irq(client->irq);
return 0;
}
-static const struct dev_pm_ops adp5588_dev_pm_ops = {
- .suspend = adp5588_suspend,
- .resume = adp5588_resume,
-};
-#endif
+static SIMPLE_DEV_PM_OPS(adp5588_dev_pm_ops, adp5588_suspend, adp5588_resume);
static const struct i2c_device_id adp5588_id[] = {
{ "adp5588-keys", 0 },
@@ -656,9 +630,7 @@ MODULE_DEVICE_TABLE(i2c, adp5588_id);
static struct i2c_driver adp5588_driver = {
.driver = {
.name = KBUILD_MODNAME,
-#ifdef CONFIG_PM
.pm = &adp5588_dev_pm_ops,
-#endif
},
.probe = adp5588_probe,
.remove = adp5588_remove,
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index cc73a149da28..c14136b733a9 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -12,6 +12,7 @@
// expensive.
#include <linux/module.h>
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/input.h>
@@ -518,6 +519,50 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev,
return 0;
}
+static void cros_ec_keyb_parse_vivaldi_physmap(struct cros_ec_keyb *ckdev)
+{
+ u32 *physmap = ckdev->vdata.function_row_physmap;
+ unsigned int row, col, scancode;
+ int n_physmap;
+ int error;
+ int i;
+
+ n_physmap = device_property_count_u32(ckdev->dev,
+ "function-row-physmap");
+ if (n_physmap <= 0)
+ return;
+
+ if (n_physmap >= VIVALDI_MAX_FUNCTION_ROW_KEYS) {
+ dev_warn(ckdev->dev,
+ "only up to %d top row keys is supported (%d specified)\n",
+ VIVALDI_MAX_FUNCTION_ROW_KEYS, n_physmap);
+ n_physmap = VIVALDI_MAX_FUNCTION_ROW_KEYS;
+ }
+
+ error = device_property_read_u32_array(ckdev->dev,
+ "function-row-physmap",
+ physmap, n_physmap);
+ if (error) {
+ dev_warn(ckdev->dev,
+ "failed to parse function-row-physmap property: %d\n",
+ error);
+ return;
+ }
+
+ /*
+ * Convert (in place) from row/column encoding to matrix "scancode"
+ * used by the driver.
+ */
+ for (i = 0; i < n_physmap; i++) {
+ row = KEY_ROW(physmap[i]);
+ col = KEY_COL(physmap[i]);
+ scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+ physmap[i] = scancode;
+ }
+
+ ckdev->vdata.num_function_row_keys = n_physmap;
+}
+
/**
* cros_ec_keyb_register_matrix - Register matrix keys
*
@@ -534,11 +579,6 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
struct input_dev *idev;
const char *phys;
int err;
- struct property *prop;
- const __be32 *p;
- u32 *physmap;
- u32 key_pos;
- unsigned int row, col, scancode, n_physmap;
err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols);
if (err)
@@ -573,7 +613,7 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
idev->id.product = 0;
idev->dev.parent = dev;
- ckdev->ghost_filter = of_property_read_bool(dev->of_node,
+ ckdev->ghost_filter = device_property_read_bool(dev,
"google,needs-ghost-filter");
err = matrix_keypad_build_keymap(NULL, NULL, ckdev->rows, ckdev->cols,
@@ -589,22 +629,7 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
input_set_drvdata(idev, ckdev);
ckdev->idev = idev;
cros_ec_keyb_compute_valid_keys(ckdev);
-
- physmap = ckdev->vdata.function_row_physmap;
- n_physmap = 0;
- of_property_for_each_u32(dev->of_node, "function-row-physmap",
- prop, p, key_pos) {
- if (n_physmap == VIVALDI_MAX_FUNCTION_ROW_KEYS) {
- dev_warn(dev, "Only support up to %d top row keys\n",
- VIVALDI_MAX_FUNCTION_ROW_KEYS);
- break;
- }
- row = KEY_ROW(key_pos);
- col = KEY_COL(key_pos);
- scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
- physmap[n_physmap++] = scancode;
- }
- ckdev->vdata.num_function_row_keys = n_physmap;
+ cros_ec_keyb_parse_vivaldi_physmap(ckdev);
err = input_register_device(ckdev->idev);
if (err) {
@@ -653,14 +678,19 @@ static const struct attribute_group cros_ec_keyb_attr_group = {
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
- struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_device *ec;
struct device *dev = &pdev->dev;
struct cros_ec_keyb *ckdev;
bool buttons_switches_only = device_get_match_data(dev);
int err;
- if (!dev->of_node)
- return -ENODEV;
+ /*
+ * If the parent ec device has not been probed yet, defer the probe of
+ * this keyboard/button driver until later.
+ */
+ ec = dev_get_drvdata(pdev->dev.parent);
+ if (!ec)
+ return -EPROBE_DEFER;
ckdev = devm_kzalloc(dev, sizeof(*ckdev), GFP_KERNEL);
if (!ckdev)
@@ -713,6 +743,14 @@ static int cros_ec_keyb_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_ec_keyb_acpi_match[] = {
+ { "GOOG0007", true },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_keyb_acpi_match);
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id cros_ec_keyb_of_match[] = {
{ .compatible = "google,cros-ec-keyb" },
@@ -730,6 +768,7 @@ static struct platform_driver cros_ec_keyb_driver = {
.driver = {
.name = "cros-ec-keyb",
.of_match_table = of_match_ptr(cros_ec_keyb_of_match),
+ .acpi_match_table = ACPI_PTR(cros_ec_keyb_acpi_match),
.pm = &cros_ec_keyb_pm_ops,
},
};
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
index 2e7c9187c10f..bf447bf598fb 100644
--- a/drivers/input/keyboard/mt6779-keypad.c
+++ b/drivers/input/keyboard/mt6779-keypad.c
@@ -17,6 +17,11 @@
#define MTK_KPD_DEBOUNCE 0x0018
#define MTK_KPD_DEBOUNCE_MASK GENMASK(13, 0)
#define MTK_KPD_DEBOUNCE_MAX_MS 256
+#define MTK_KPD_SEL 0x0020
+#define MTK_KPD_SEL_COL GENMASK(15, 10)
+#define MTK_KPD_SEL_ROW GENMASK(9, 4)
+#define MTK_KPD_SEL_COLMASK(c) GENMASK((c) + 9, 10)
+#define MTK_KPD_SEL_ROWMASK(r) GENMASK((r) + 3, 4)
#define MTK_KPD_NUM_MEMS 5
#define MTK_KPD_NUM_BITS 136 /* 4*32+8 MEM5 only use 8 BITS */
@@ -42,7 +47,7 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
const unsigned short *keycode = keypad->input_dev->keycode;
DECLARE_BITMAP(new_state, MTK_KPD_NUM_BITS);
DECLARE_BITMAP(change, MTK_KPD_NUM_BITS);
- unsigned int bit_nr;
+ unsigned int bit_nr, key;
unsigned int row, col;
unsigned int scancode;
unsigned int row_shift = get_count_order(keypad->n_cols);
@@ -61,8 +66,10 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
if (bit_nr % 32 >= 16)
continue;
- row = bit_nr / 32;
- col = bit_nr % 32;
+ key = bit_nr / 32 * 16 + bit_nr % 32;
+ row = key / 9;
+ col = key % 9;
+
scancode = MATRIX_SCAN_CODE(row, col, row_shift);
/* 1: not pressed, 0: pressed */
pressed = !test_bit(bit_nr, new_state);
@@ -159,6 +166,11 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
regmap_write(keypad->regmap, MTK_KPD_DEBOUNCE,
(debounce * (1 << 5)) & MTK_KPD_DEBOUNCE_MASK);
+ regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_ROW,
+ MTK_KPD_SEL_ROWMASK(keypad->n_rows));
+ regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_COL,
+ MTK_KPD_SEL_COLMASK(keypad->n_cols));
+
keypad->clk = devm_clk_get(&pdev->dev, "kpd");
if (IS_ERR(keypad->clk))
return PTR_ERR(keypad->clk);
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index c31ab4368388..6404081253ea 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -18,17 +18,9 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define MTK_PMIC_PWRKEY_RST_EN_MASK 0x1
-#define MTK_PMIC_PWRKEY_RST_EN_SHIFT 6
-#define MTK_PMIC_HOMEKEY_RST_EN_MASK 0x1
-#define MTK_PMIC_HOMEKEY_RST_EN_SHIFT 5
-#define MTK_PMIC_RST_DU_MASK 0x3
-#define MTK_PMIC_RST_DU_SHIFT 8
-
-#define MTK_PMIC_PWRKEY_RST \
- (MTK_PMIC_PWRKEY_RST_EN_MASK << MTK_PMIC_PWRKEY_RST_EN_SHIFT)
-#define MTK_PMIC_HOMEKEY_RST \
- (MTK_PMIC_HOMEKEY_RST_EN_MASK << MTK_PMIC_HOMEKEY_RST_EN_SHIFT)
+#define MTK_PMIC_RST_DU_MASK GENMASK(9, 8)
+#define MTK_PMIC_PWRKEY_RST BIT(6)
+#define MTK_PMIC_HOMEKEY_RST BIT(5)
#define MTK_PMIC_PWRKEY_INDEX 0
#define MTK_PMIC_HOMEKEY_INDEX 1
@@ -39,50 +31,58 @@ struct mtk_pmic_keys_regs {
u32 deb_mask;
u32 intsel_reg;
u32 intsel_mask;
+ u32 rst_en_mask;
};
#define MTK_PMIC_KEYS_REGS(_deb_reg, _deb_mask, \
- _intsel_reg, _intsel_mask) \
+ _intsel_reg, _intsel_mask, _rst_mask) \
{ \
.deb_reg = _deb_reg, \
.deb_mask = _deb_mask, \
.intsel_reg = _intsel_reg, \
.intsel_mask = _intsel_mask, \
+ .rst_en_mask = _rst_mask, \
}
struct mtk_pmic_regs {
const struct mtk_pmic_keys_regs keys_regs[MTK_PMIC_MAX_KEY_COUNT];
u32 pmic_rst_reg;
+ u32 rst_lprst_mask; /* Long-press reset timeout bitmask */
};
static const struct mtk_pmic_regs mt6397_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6397_CHRSTATUS,
- 0x8, MT6397_INT_RSV, 0x10),
+ 0x8, MT6397_INT_RSV, 0x10, MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6397_OCSTATUS2,
- 0x10, MT6397_INT_RSV, 0x8),
+ 0x10, MT6397_INT_RSV, 0x8, MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6397_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
static const struct mtk_pmic_regs mt6323_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6323_CHRSTATUS,
- 0x2, MT6323_INT_MISC_CON, 0x10),
+ 0x2, MT6323_INT_MISC_CON, 0x10, MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6323_CHRSTATUS,
- 0x4, MT6323_INT_MISC_CON, 0x8),
+ 0x4, MT6323_INT_MISC_CON, 0x8, MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6323_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
static const struct mtk_pmic_regs mt6358_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
- 0x2, MT6358_PSC_TOP_INT_CON0, 0x5),
+ 0x2, MT6358_PSC_TOP_INT_CON0, 0x5,
+ MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
- 0x8, MT6358_PSC_TOP_INT_CON0, 0xa),
+ 0x8, MT6358_PSC_TOP_INT_CON0, 0xa,
+ MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6358_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
struct mtk_pmic_keys_info {
@@ -108,53 +108,49 @@ enum mtk_pmic_keys_lp_mode {
};
static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys,
- u32 pmic_rst_reg)
+ const struct mtk_pmic_regs *regs)
{
- int ret;
+ const struct mtk_pmic_keys_regs *kregs_home, *kregs_pwr;
u32 long_press_mode, long_press_debounce;
+ u32 value, mask;
+ int error;
+
+ kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs;
+ kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs;
- ret = of_property_read_u32(keys->dev->of_node,
- "power-off-time-sec", &long_press_debounce);
- if (ret)
+ error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec",
+ &long_press_debounce);
+ if (error)
long_press_debounce = 0;
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_RST_DU_MASK << MTK_PMIC_RST_DU_SHIFT,
- long_press_debounce << MTK_PMIC_RST_DU_SHIFT);
+ mask = regs->rst_lprst_mask;
+ value = long_press_debounce << (ffs(regs->rst_lprst_mask) - 1);
- ret = of_property_read_u32(keys->dev->of_node,
- "mediatek,long-press-mode", &long_press_mode);
- if (ret)
+ error = of_property_read_u32(keys->dev->of_node,
+ "mediatek,long-press-mode",
+ &long_press_mode);
+ if (error)
long_press_mode = LP_DISABLE;
switch (long_press_mode) {
- case LP_ONEKEY:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- MTK_PMIC_PWRKEY_RST);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- 0);
- break;
case LP_TWOKEY:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- MTK_PMIC_PWRKEY_RST);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- MTK_PMIC_HOMEKEY_RST);
- break;
+ value |= kregs_home->rst_en_mask;
+ fallthrough;
+
+ case LP_ONEKEY:
+ value |= kregs_pwr->rst_en_mask;
+ fallthrough;
+
case LP_DISABLE:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- 0);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- 0);
+ mask |= kregs_home->rst_en_mask;
+ mask |= kregs_pwr->rst_en_mask;
break;
+
default:
break;
}
+
+ regmap_update_bits(keys->regmap, regs->pmic_rst_reg, mask, value);
}
static irqreturn_t mtk_pmic_keys_irq_handler_thread(int irq, void *data)
@@ -358,7 +354,7 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
return error;
}
- mtk_pmic_keys_lp_reset_setup(keys, mtk_pmic_regs->pmic_rst_reg);
+ mtk_pmic_keys_lp_reset_setup(keys, mtk_pmic_regs);
platform_set_drvdata(pdev, keys);
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 8a7ce41b8c56..ee9d04a3f0d5 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -179,11 +179,9 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
int error;
u64 keys;
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
return IRQ_NONE;
- }
low = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE31_0);
high = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE63_32);
@@ -207,11 +205,9 @@ static int omap4_keypad_open(struct input_dev *input)
struct device *dev = input->dev.parent;
int error;
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
return error;
- }
disable_irq(keypad_data->irq);
@@ -254,9 +250,10 @@ static void omap4_keypad_close(struct input_dev *input)
struct device *dev = input->dev.parent;
int error;
- error = pm_runtime_get_sync(dev);
- if (error < 0)
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
+ dev_err(dev, "%s: pm_runtime_resume_and_get() failed: %d\n",
+ __func__, error);
disable_irq(keypad_data->irq);
omap4_keypad_stop(keypad_data);
@@ -392,10 +389,9 @@ static int omap4_keypad_probe(struct platform_device *pdev)
* Enable clocks for the keypad module so that we can read
* revision register.
*/
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error) {
+ dev_err(dev, "pm_runtime_resume_and_get() failed\n");
return error;
}
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 6b4138771a3f..b2e8097a2e6d 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -40,7 +40,6 @@
#define IQS7222_SLDR_SETUP_2_RES_MASK GENMASK(15, 8)
#define IQS7222_SLDR_SETUP_2_RES_SHIFT 8
#define IQS7222_SLDR_SETUP_2_TOP_SPEED_MASK GENMASK(7, 0)
-#define IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK GENMASK(9, 0)
#define IQS7222_GPIO_SETUP_0_GPIO_EN BIT(0)
@@ -54,6 +53,9 @@
#define IQS7222_SYS_SETUP_ACK_RESET BIT(0)
#define IQS7222_EVENT_MASK_ATI BIT(12)
+#define IQS7222_EVENT_MASK_SLDR BIT(10)
+#define IQS7222_EVENT_MASK_TOUCH BIT(1)
+#define IQS7222_EVENT_MASK_PROX BIT(0)
#define IQS7222_COMMS_HOLD BIT(0)
#define IQS7222_COMMS_ERROR 0xEEEE
@@ -92,11 +94,11 @@ enum iqs7222_reg_key_id {
enum iqs7222_reg_grp_id {
IQS7222_REG_GRP_STAT,
+ IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_CYCLE,
IQS7222_REG_GRP_GLBL,
IQS7222_REG_GRP_BTN,
IQS7222_REG_GRP_CHAN,
- IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_SLDR,
IQS7222_REG_GRP_GPIO,
IQS7222_REG_GRP_SYS,
@@ -135,12 +137,12 @@ struct iqs7222_event_desc {
static const struct iqs7222_event_desc iqs7222_kp_events[] = {
{
.name = "event-prox",
- .enable = BIT(0),
+ .enable = IQS7222_EVENT_MASK_PROX,
.reg_key = IQS7222_REG_KEY_PROX,
},
{
.name = "event-touch",
- .enable = BIT(1),
+ .enable = IQS7222_EVENT_MASK_TOUCH,
.reg_key = IQS7222_REG_KEY_TOUCH,
},
};
@@ -556,13 +558,6 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
.label = "current reference trim",
},
{
- .name = "azoteq,rf-filt-enable",
- .reg_grp = IQS7222_REG_GRP_GLBL,
- .reg_offset = 0,
- .reg_shift = 15,
- .reg_width = 1,
- },
- {
.name = "azoteq,max-counts",
.reg_grp = IQS7222_REG_GRP_GLBL,
.reg_offset = 0,
@@ -1272,9 +1267,22 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
struct i2c_client *client = iqs7222->client;
ktime_t ati_timeout;
u16 sys_status = 0;
- u16 sys_setup = iqs7222->sys_setup[0] & ~IQS7222_SYS_SETUP_ACK_RESET;
+ u16 sys_setup;
int error, i;
+ /*
+ * The reserved fields of the system setup register may have changed
+ * as a result of other registers having been written. As such, read
+ * the register's latest value to avoid unexpected behavior when the
+ * register is written in the loop that follows.
+ */
+ error = iqs7222_read_word(iqs7222, IQS7222_SYS_SETUP, &sys_setup);
+ if (error)
+ return error;
+
+ sys_setup &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
+ sys_setup &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
+
for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
/*
* Trigger ATI from streaming and normal-power modes so that
@@ -1299,12 +1307,15 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
if (error)
return error;
- if (sys_status & IQS7222_SYS_STATUS_ATI_ACTIVE)
- continue;
+ if (sys_status & IQS7222_SYS_STATUS_RESET)
+ return 0;
if (sys_status & IQS7222_SYS_STATUS_ATI_ERROR)
break;
+ if (sys_status & IQS7222_SYS_STATUS_ATI_ACTIVE)
+ continue;
+
/*
* Use stream-in-touch mode if either slider reports
* absolute position.
@@ -1321,7 +1332,7 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
dev_err(&client->dev,
"ATI attempt %d of %d failed with status 0x%02X, %s\n",
i + 1, IQS7222_NUM_RETRIES, (u8)sys_status,
- i < IQS7222_NUM_RETRIES ? "retrying..." : "stopping");
+ i + 1 < IQS7222_NUM_RETRIES ? "retrying" : "stopping");
}
return -ETIMEDOUT;
@@ -1334,6 +1345,34 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
int error, i, j, k;
/*
+ * Acknowledge reset before writing any registers in case the device
+ * suffers a spurious reset during initialization. Because this step
+ * may change the reserved fields of the second filter beta register,
+ * its cache must be updated.
+ *
+ * Writing the second filter beta register, in turn, may clobber the
+ * system status register. As such, the filter beta register pair is
+ * written first to protect against this hazard.
+ */
+ if (dir == WRITE) {
+ u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1;
+ u16 filt_setup;
+
+ error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
+ iqs7222->sys_setup[0] |
+ IQS7222_SYS_SETUP_ACK_RESET);
+ if (error)
+ return error;
+
+ error = iqs7222_read_word(iqs7222, reg, &filt_setup);
+ if (error)
+ return error;
+
+ iqs7222->filt_setup[1] &= GENMASK(7, 0);
+ iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0));
+ }
+
+ /*
* Take advantage of the stop-bit disable function, if available, to
* save the trouble of having to reopen a communication window after
* each burst read or write.
@@ -1957,8 +1996,8 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
int ext_chan = rounddown(num_chan, 10);
int count, error, reg_offset, i;
+ u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset];
u16 *sldr_setup = iqs7222->sldr_setup[sldr_index];
- u16 *sys_setup = iqs7222->sys_setup;
unsigned int chan_sel[4], val;
error = iqs7222_parse_props(iqs7222, &sldr_node, sldr_index,
@@ -2003,7 +2042,7 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
reg_offset = dev_desc->sldr_res < U16_MAX ? 0 : 1;
sldr_setup[0] |= count;
- sldr_setup[3 + reg_offset] &= ~IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK;
+ sldr_setup[3 + reg_offset] &= ~GENMASK(ext_chan - 1, 0);
for (i = 0; i < ARRAY_SIZE(chan_sel); i++) {
sldr_setup[5 + reg_offset + i] = 0;
@@ -2081,17 +2120,19 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
sldr_setup[0] |= dev_desc->wheel_enable;
}
+ /*
+ * The absence of a register offset makes it safe to assume the device
+ * supports gestures, each of which is first disabled until explicitly
+ * enabled.
+ */
+ if (!reg_offset)
+ for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++)
+ sldr_setup[9] &= ~iqs7222_sl_events[i].enable;
+
for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++) {
const char *event_name = iqs7222_sl_events[i].name;
struct fwnode_handle *event_node;
- /*
- * The absence of a register offset means the remaining fields
- * in the group represent gesture settings.
- */
- if (iqs7222_sl_events[i].enable && !reg_offset)
- sldr_setup[9] &= ~iqs7222_sl_events[i].enable;
-
event_node = fwnode_get_named_child_node(sldr_node, event_name);
if (!event_node)
continue;
@@ -2104,6 +2145,22 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
if (error)
return error;
+ /*
+ * The press/release event does not expose a direct GPIO link,
+ * but one can be emulated by tying each of the participating
+ * channels to the same GPIO.
+ */
+ error = iqs7222_gpio_select(iqs7222, event_node,
+ i ? iqs7222_sl_events[i].enable
+ : sldr_setup[3 + reg_offset],
+ i ? 1568 + sldr_index * 30
+ : sldr_setup[4 + reg_offset]);
+ if (error)
+ return error;
+
+ if (!reg_offset)
+ sldr_setup[9] |= iqs7222_sl_events[i].enable;
+
error = fwnode_property_read_u32(event_node, "linux,code",
&val);
if (error) {
@@ -2115,26 +2172,20 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
iqs7222->sl_code[sldr_index][i] = val;
input_set_capability(iqs7222->keypad, EV_KEY, val);
- /*
- * The press/release event is determined based on whether the
- * coordinate field reports 0xFFFF and has no explicit enable
- * control.
- */
- if (!iqs7222_sl_events[i].enable || reg_offset)
- continue;
-
- sldr_setup[9] |= iqs7222_sl_events[i].enable;
-
- error = iqs7222_gpio_select(iqs7222, event_node,
- iqs7222_sl_events[i].enable,
- 1568 + sldr_index * 30);
- if (error)
- return error;
-
if (!dev_desc->event_offset)
continue;
- sys_setup[dev_desc->event_offset] |= BIT(10 + sldr_index);
+ /*
+ * The press/release event is determined based on whether the
+ * coordinate field reports 0xFFFF and solely relies on touch
+ * or proximity interrupts to be unmasked.
+ */
+ if (i && !reg_offset)
+ *event_mask |= (IQS7222_EVENT_MASK_SLDR << sldr_index);
+ else if (sldr_setup[4 + reg_offset] == dev_desc->touch_link)
+ *event_mask |= IQS7222_EVENT_MASK_TOUCH;
+ else
+ *event_mask |= IQS7222_EVENT_MASK_PROX;
}
/*
@@ -2227,11 +2278,6 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
return error;
}
- sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
- sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
-
- sys_setup[0] |= IQS7222_SYS_SETUP_ACK_RESET;
-
return iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_SYS,
IQS7222_REG_KEY_NONE);
}
@@ -2299,29 +2345,37 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
input_report_abs(iqs7222->keypad, iqs7222->sl_axis[i],
sldr_pos);
- for (j = 0; j < ARRAY_SIZE(iqs7222_sl_events); j++) {
- u16 mask = iqs7222_sl_events[j].mask;
- u16 val = iqs7222_sl_events[j].val;
+ input_report_key(iqs7222->keypad, iqs7222->sl_code[i][0],
+ sldr_pos < dev_desc->sldr_res);
- if (!iqs7222_sl_events[j].enable) {
- input_report_key(iqs7222->keypad,
- iqs7222->sl_code[i][j],
- sldr_pos < dev_desc->sldr_res);
- continue;
- }
+ /*
+ * A maximum resolution indicates the device does not support
+ * gestures, in which case the remaining fields are ignored.
+ */
+ if (dev_desc->sldr_res == U16_MAX)
+ continue;
- /*
- * The remaining offsets represent gesture state, and
- * are discarded in the case of IQS7222C because only
- * absolute position is reported.
- */
- if (num_stat < IQS7222_MAX_COLS_STAT)
- continue;
+ if (!(le16_to_cpu(status[1]) & IQS7222_EVENT_MASK_SLDR << i))
+ continue;
+
+ /*
+ * Skip the press/release event, as it does not have separate
+ * status fields and is handled separately.
+ */
+ for (j = 1; j < ARRAY_SIZE(iqs7222_sl_events); j++) {
+ u16 mask = iqs7222_sl_events[j].mask;
+ u16 val = iqs7222_sl_events[j].val;
input_report_key(iqs7222->keypad,
iqs7222->sl_code[i][j],
(state & mask) == val);
}
+
+ input_sync(iqs7222->keypad);
+
+ for (j = 1; j < ARRAY_SIZE(iqs7222_sl_events); j++)
+ input_report_key(iqs7222->keypad,
+ iqs7222->sl_code[i][j], 0);
}
input_sync(iqs7222->keypad);
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index 812edfced86e..0caaf3e64215 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -57,7 +57,7 @@ struct pip_app_resp_head {
* The value of data_status can be the first byte of data or
* the command status or the unsupported command code depending on the
* requested command code.
- */
+ */
u8 data_status;
} __packed;
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 23507fce3a2b..18ccbd45004a 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -41,7 +41,7 @@ struct gpio_mouse {
/*
* Timer function which is run every scan_ms ms when the device is opened.
- * The dev input variable is set to the the input_dev pointer.
+ * The dev input variable is set to the input_dev pointer.
*/
static void gpio_mouse_scan(struct input_dev *input)
{
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index a9065c6ab550..da2c67cb8642 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -350,6 +350,10 @@ static int __init gscps2_probe(struct parisc_device *dev)
ps2port->port = serio;
ps2port->padev = dev;
ps2port->addr = ioremap(hpa, GSC_STATUS + 4);
+ if (!ps2port->addr) {
+ ret = -ENOMEM;
+ goto fail_nomem;
+ }
spin_lock_init(&ps2port->lock);
gscps2_reset(ps2port);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 148a7c5fd0e2..4fbec7bbecca 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -67,25 +67,84 @@ static inline void i8042_write_command(int val)
#include <linux/dmi.h>
-static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+#define SERIO_QUIRK_NOKBD BIT(0)
+#define SERIO_QUIRK_NOAUX BIT(1)
+#define SERIO_QUIRK_NOMUX BIT(2)
+#define SERIO_QUIRK_FORCEMUX BIT(3)
+#define SERIO_QUIRK_UNLOCK BIT(4)
+#define SERIO_QUIRK_PROBE_DEFER BIT(5)
+#define SERIO_QUIRK_RESET_ALWAYS BIT(6)
+#define SERIO_QUIRK_RESET_NEVER BIT(7)
+#define SERIO_QUIRK_DIECT BIT(8)
+#define SERIO_QUIRK_DUMBKBD BIT(9)
+#define SERIO_QUIRK_NOLOOP BIT(10)
+#define SERIO_QUIRK_NOTIMEOUT BIT(11)
+#define SERIO_QUIRK_KBDRESET BIT(12)
+#define SERIO_QUIRK_DRITEK BIT(13)
+#define SERIO_QUIRK_NOPNP BIT(14)
+
+/* Quirk table for different mainboards. Options similar or identical to i8042
+ * module parameters.
+ * ORDERING IS IMPORTANT! The first match will be apllied and the rest ignored.
+ * This allows entries to overwrite vendor wide quirks on a per device basis.
+ * Where this is irrelevant, entries are sorted case sensitive by DMI_SYS_VENDOR
+ * and/or DMI_BOARD_VENDOR to make it easier to avoid dublicate entries.
+ */
+static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
{
- /*
- * Arima-Rioworks HDAMB -
- * AUX LOOP command does not raise AUX IRQ
- */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
- DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
- DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* ASUS G1S */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_BOARD_NAME, "G1S"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
+ {
+ /* Asus X450LCP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ /* ASUS ZenBook UX425UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
},
+ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ /* ASUS ZenBook UM325UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
+ },
+ /*
+ * On some Asus laptops, just running self tests cause problems.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
},
{
/* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */
@@ -94,585 +153,689 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"),
DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
+ /* ASUS G1S */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "G1S"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Acer Aspire 5710 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Embedded Box PC 3000 */
+ /* Acer Aspire 7738 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* OQO Model 01 */
+ /* Acer Aspire 5536 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* ULI EV4873 - AUX LOOP does not work properly */
+ /*
+ * Acer Aspire 5738z
+ * Touchpad stops working in mux mode when dis- + re-enabled
+ * with the touchpad enable/disable toggle hotkey
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Microsoft Virtual Machine */
+ /* Acer Aspire One 150 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Medion MAM 2070 */
+ /* Acer Aspire One 532h */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AO532h"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Medion Akoya E7225 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Blue FB5601 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "blue"),
- DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte M912 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte M1022M netbook */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
- DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte Spring Peak - defines wrong chassis type */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some Wistron based laptops need us to explicitly enable the 'Dritek
+ * keyboard extension' to make their extra keys start generating scancodes.
+ * Originally, this was just confined to older laptops, but a few Acer laptops
+ * have turned up in 2007 that also need this again.
+ */
{
+ /* Acer Aspire 5100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5610 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5630 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
- { }
-};
-
-/*
- * Some Fujitsu notebooks are having trouble with touchpads if
- * active multiplexing mode is activated. Luckily they don't have
- * external PS/2 ports so we can safely disable it.
- * ... apparently some Toshibas don't like MUX mode either and
- * die horrible death on reboot.
- */
-static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{
- /* Fujitsu Lifebook P7010/P7010D */
+ /* Acer Aspire 5650 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook P7010 */
+ /* Acer Aspire 5680 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook P5020D */
+ /* Acer Aspire 5720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook S2000 */
+ /* Acer Aspire 9110 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook S6230 */
+ /* Acer TravelMate 660 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook T725 laptop */
+ /* Acer TravelMate 2490 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook U745 */
+ /* Acer TravelMate 4280 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu T70H */
+ /* Amoi M636/A737 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu-Siemens Lifebook T3010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Fujitsu-Siemens Lifebook E4010 */
+ /* Compal HEL80I */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu-Siemens Amilo Pro 2010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Fujitsu-Siemens Amilo Pro 2030 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * No data is coming from the touchscreen unless KBC
- * is in legacy mode.
- */
- /* Panasonic CF-29 */
+ /* Advent 4211 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
+ DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /*
- * HP Pavilion DV4017EA -
- * errors on MUX ports are reported without raising AUXDATA
- * causing "spurious NAK" messages.
- */
+ /* Dell Embedded Box PC 3000 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * HP Pavilion ZT1000 -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
- */
+ /* Dell XPS M1530 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * HP Pavilion DV4270ca -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
- */
+ /* Dell Vostro 1510 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Dell Vostro V13 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
+ /* Dell Vostro 1320 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Dell Vostro 1520 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Dell Vostro 1720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Sharp Actius MM20 */
+ /* Entroware Proteus */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some Fujitsu notebooks are having trouble with touchpads if
+ * active multiplexing mode is activated. Luckily they don't have
+ * external PS/2 ports so we can safely disable it.
+ * ... apparently some Toshibas don't like MUX mode either and
+ * die horrible death on reboot.
+ */
{
- /* Sony Vaio FS-115b */
+ /* Fujitsu Lifebook P7010/P7010D */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * Sony Vaio FZ-240E -
- * reset and GET ID commands issued via KBD port are
- * sometimes being delivered to AUX3.
- */
+ /* Fujitsu Lifebook P5020D */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * Most (all?) VAIOs do not have external PS/2 ports nor
- * they implement active multiplexing properly, and
- * MUX discovery usually messes up keyboard/touchpad.
- */
+ /* Fujitsu Lifebook S2000 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Amoi M636/A737 */
+ /* Fujitsu Lifebook S6230 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Lenovo 3000 n100 */
+ /* Fujitsu Lifebook T725 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Lenovo XiaoXin Air 12 */
+ /* Fujitsu Lifebook U745 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Fujitsu T70H */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5710 */
+ /* Fujitsu A544 laptop */
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Acer Aspire 7738 */
+ /* Fujitsu AH544 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Gericom Bellagio */
+ /* Fujitsu U574 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* IBM 2656 */
+ /* Fujitsu UH554 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
- DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Dell XPS M1530 */
+ /* Fujitsu Lifebook P7010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Compal HEL80I */
+ /* Fujitsu-Siemens Lifebook T3010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro 1510 */
+ /* Fujitsu-Siemens Lifebook E4010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5536 */
+ /* Fujitsu-Siemens Amilo Pro 2010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro V13 */
+ /* Fujitsu-Siemens Amilo Pro 2030 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Newer HP Pavilion dv4 models */
+ /* Gigabyte M912 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Asus X450LCP */
+ /* Gigabyte Spring Peak - defines wrong chassis type */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Avatar AVIU-145A6 */
+ /* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
- DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* TUXEDO BU1406 */
+ /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
+ /*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
{
- /* Lenovo LaVie Z */
+ /* Gigabyte P35 v2 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /*
- * Acer Aspire 5738z
- * Touchpad stops working in mux mode when dis- + re-enabled
- * with the touchpad enable/disable toggle hotkey
- */
+ /* Gigabyte P34 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /* Entroware Proteus */
+ /* Gigabyte P57 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Gericom Bellagio */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /* Gigabyte M1022M netbook */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
- { }
-};
-
-static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
{
/*
- * Sony Vaio VGN-CS series require MUX or the touch sensor
- * buttons will disturb touchpad operation
+ * HP Pavilion DV4017EA -
+ * errors on MUX ports are reported without raising AUXDATA
+ * causing "spurious NAK" messages.
*/
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-
-/*
- * On some Asus laptops, just running self tests cause problems.
- */
-static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
{
+ /*
+ * HP Pavilion ZT1000 -
+ * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
},
- }, {
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /*
+ * HP Pavilion DV4270ca -
+ * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
{
- /* MSI Wind U-100 */
+ /* Newer HP Pavilion dv4 models */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
- /* LG Electronics X110 */
+ /* IBM 2656 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "X110"),
- DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire One 150 */
+ /* Avatar AVIU-145A6 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Intel MBO Desktop D845PESV */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
{
+ /*
+ * Intel NUC D54250WYK - does not have i8042 controller but
+ * declares PS/2 devices in DSDT.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
{
+ /* Lenovo 3000 n100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo XiaoXin Air 12 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo LaVie Z */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo Ideapad U455 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Lenovo ThinkPad L460 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Advent 4211 */
+ /* Lenovo ThinkPad Twist S230u */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
+ },
+ {
+ /* LG Electronics X110 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "X110"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
/* Medion Akoya Mini E1210 */
@@ -680,6 +843,7 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
/* Medion Akoya E1222 */
@@ -687,331 +851,434 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Mivvy M310 */
+ /* MSI Wind U-100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOPNP)
},
{
- /* Dell Vostro 1320 */
+ /*
+ * No data is coming from the touchscreen unless KBC
+ * is in legacy mode.
+ */
+ /* Panasonic CF-29 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro 1520 */
+ /* Medion Akoya E7225 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Dell Vostro 1720 */
+ /* Microsoft Virtual Machine */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo Ideapad U455 */
+ /* Medion MAM 2070 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo ThinkPad L460 */
+ /* TUXEDO BU1406 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ /* OQO Model 01 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo ThinkPad Twist S230u */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Entroware Proteus */
+ /* Acer Aspire 5 A515 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-#ifdef CONFIG_PNP
-static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
{
- /* Intel MBO Desktop D845PESV */
+ /* ULI EV4873 - AUX LOOP does not work properly */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
/*
- * Intel NUC D54250WYK - does not have i8042 controller but
- * declares PS/2 devices in DSDT.
+ * Arima-Rioworks HDAMB -
+ * AUX LOOP command does not raise AUX IRQ
*/
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
+ DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* MSI Wind U-100 */
+ /* Sharp Actius MM20 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5 A515 */
+ /*
+ * Sony Vaio FZ-240E -
+ * reset and GET ID commands issued via KBD port are
+ * sometimes being delivered to AUX3.
+ */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
- DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /*
+ * Most (all?) VAIOs do not have external PS/2 ports nor
+ * they implement active multiplexing properly, and
+ * MUX discovery usually messes up keyboard/touchpad.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-
-static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
{
+ /* Sony Vaio FS-115b */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /*
+ * Sony Vaio VGN-CS series require MUX or the touch sensor
+ * buttons will disturb touchpad operation
+ */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
},
+ .driver_data = (void *)(SERIO_QUIRK_FORCEMUX)
},
{
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-#endif
-
-static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
{
- /* Dell Vostro V13 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+ * none of them have an external PS/2 port so this can safely be set for
+ * all of them. These two are based on a Clevo design, but have the
+ * board_name changed.
+ */
{
- /* Newer HP Pavilion dv4 models */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Fujitsu A544 laptop */
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Fujitsu AH544 laptop */
- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ /* Mivvy M310 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
+ DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
{
- /* Fujitsu Lifebook T725 laptop */
+ /* Schenker XMG C504 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /* Fujitsu U574 laptop */
- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ /* Blue FB5601 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
+ DMI_MATCH(DMI_SYS_VENDOR, "blue"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+ * none of them have an external PS/2 port so this can safely be set for
+ * all of them.
+ * Clevo barebones come with board_vendor and/or system_vendor set to
+ * either the very generic string "Notebook" and/or a different value
+ * for each individual reseller. The only somewhat universal way to
+ * identify them is by board_name.
+ */
{
- /* Fujitsu UH554 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-/*
- * Some Wistron based laptops need us to explicitly enable the 'Dritek
- * keyboard extension' to make their extra keys start generating scancodes.
- * Originally, this was just confined to older laptops, but a few Acer laptops
- * have turned up in 2007 that also need this again.
- */
-static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
{
- /* Acer Aspire 5100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5610 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
+ DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5630 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
+ DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5650 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5680 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ /*
+ * At least one modern Clevo barebone has the touchpad connected both
+ * via PS/2 and i2c interface. This causes a race condition between the
+ * psmouse and i2c-hid driver. Since the full capability of the touchpad
+ * is available via the i2c interface and the device has no external
+ * PS/2 port, it is safe to just ignore all ps2 mouses here to avoid
+ * this issue. The known affected device is the
+ * TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU which comes with one of
+ * the two different dmi strings below. NS50MU is not a typo!
+ */
{
- /* Acer Aspire 5720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 9110 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 660 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
+ DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 2490 */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 4280 */
+ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-/*
- * Some laptops need keyboard reset before probing for the trackpad to get
- * it detected, initialised & finally work.
- */
-static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
{
- /* Gigabyte P35 v2 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- {
- /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ {
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Gigabyte P34 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Gigabyte P57 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Schenker XMG C504 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{ }
};
-static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
+#ifdef CONFIG_PNP
+static const struct dmi_system_id i8042_dmi_laptop_table[] __initconst = {
{
- /* ASUS ZenBook UX425UA */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
},
},
{
- /* ASUS ZenBook UM325UA */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
},
},
{ }
};
+#endif
#endif /* CONFIG_X86 */
@@ -1167,11 +1434,6 @@ static int __init i8042_pnp_init(void)
bool pnp_data_busted = false;
int err;
-#ifdef CONFIG_X86
- if (dmi_check_system(i8042_dmi_nopnp_table))
- i8042_nopnp = true;
-#endif
-
if (i8042_nopnp) {
pr_info("PNP detection disabled\n");
return 0;
@@ -1275,6 +1537,59 @@ static inline int i8042_pnp_init(void) { return 0; }
static inline void i8042_pnp_exit(void) { }
#endif /* CONFIG_PNP */
+
+#ifdef CONFIG_X86
+static void __init i8042_check_quirks(void)
+{
+ const struct dmi_system_id *device_quirk_info;
+ uintptr_t quirks;
+
+ device_quirk_info = dmi_first_match(i8042_dmi_quirk_table);
+ if (!device_quirk_info)
+ return;
+
+ quirks = (uintptr_t)device_quirk_info->driver_data;
+
+ if (quirks & SERIO_QUIRK_NOKBD)
+ i8042_nokbd = true;
+ if (quirks & SERIO_QUIRK_NOAUX)
+ i8042_noaux = true;
+ if (quirks & SERIO_QUIRK_NOMUX)
+ i8042_nomux = true;
+ if (quirks & SERIO_QUIRK_FORCEMUX)
+ i8042_nomux = false;
+ if (quirks & SERIO_QUIRK_UNLOCK)
+ i8042_unlock = true;
+ if (quirks & SERIO_QUIRK_PROBE_DEFER)
+ i8042_probe_defer = true;
+ /* Honor module parameter when value is not default */
+ if (i8042_reset == I8042_RESET_DEFAULT) {
+ if (quirks & SERIO_QUIRK_RESET_ALWAYS)
+ i8042_reset = I8042_RESET_ALWAYS;
+ if (quirks & SERIO_QUIRK_RESET_NEVER)
+ i8042_reset = I8042_RESET_NEVER;
+ }
+ if (quirks & SERIO_QUIRK_DIECT)
+ i8042_direct = true;
+ if (quirks & SERIO_QUIRK_DUMBKBD)
+ i8042_dumbkbd = true;
+ if (quirks & SERIO_QUIRK_NOLOOP)
+ i8042_noloop = true;
+ if (quirks & SERIO_QUIRK_NOTIMEOUT)
+ i8042_notimeout = true;
+ if (quirks & SERIO_QUIRK_KBDRESET)
+ i8042_kbdreset = true;
+ if (quirks & SERIO_QUIRK_DRITEK)
+ i8042_dritek = true;
+#ifdef CONFIG_PNP
+ if (quirks & SERIO_QUIRK_NOPNP)
+ i8042_nopnp = true;
+#endif
+}
+#else
+static inline void i8042_check_quirks(void) {}
+#endif
+
static int __init i8042_platform_init(void)
{
int retval;
@@ -1297,45 +1612,42 @@ static int __init i8042_platform_init(void)
i8042_kbd_irq = I8042_MAP_IRQ(1);
i8042_aux_irq = I8042_MAP_IRQ(12);
- retval = i8042_pnp_init();
- if (retval)
- return retval;
-
#if defined(__ia64__)
- i8042_reset = I8042_RESET_ALWAYS;
+ i8042_reset = I8042_RESET_ALWAYS;
#endif
+ i8042_check_quirks();
+
+ pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ i8042_nokbd ? " nokbd" : "",
+ i8042_noaux ? " noaux" : "",
+ i8042_nomux ? " nomux" : "",
+ i8042_unlock ? " unlock" : "",
+ i8042_probe_defer ? "probe_defer" : "",
+ i8042_reset == I8042_RESET_DEFAULT ?
+ "" : i8042_reset == I8042_RESET_ALWAYS ?
+ " reset_always" : " reset_never",
+ i8042_direct ? " direct" : "",
+ i8042_dumbkbd ? " dumbkbd" : "",
+ i8042_noloop ? " noloop" : "",
+ i8042_notimeout ? " notimeout" : "",
+ i8042_kbdreset ? " kbdreset" : "",
#ifdef CONFIG_X86
- /* Honor module parameter when value is not default */
- if (i8042_reset == I8042_RESET_DEFAULT) {
- if (dmi_check_system(i8042_dmi_reset_table))
- i8042_reset = I8042_RESET_ALWAYS;
-
- if (dmi_check_system(i8042_dmi_noselftest_table))
- i8042_reset = I8042_RESET_NEVER;
- }
-
- if (dmi_check_system(i8042_dmi_noloop_table))
- i8042_noloop = true;
-
- if (dmi_check_system(i8042_dmi_nomux_table))
- i8042_nomux = true;
-
- if (dmi_check_system(i8042_dmi_forcemux_table))
- i8042_nomux = false;
-
- if (dmi_check_system(i8042_dmi_notimeout_table))
- i8042_notimeout = true;
-
- if (dmi_check_system(i8042_dmi_dritek_table))
- i8042_dritek = true;
-
- if (dmi_check_system(i8042_dmi_kbdreset_table))
- i8042_kbdreset = true;
+ i8042_dritek ? " dritek" : "",
+#else
+ "",
+#endif
+#ifdef CONFIG_PNP
+ i8042_nopnp ? " nopnp" : "");
+#else
+ "");
+#endif
- if (dmi_check_system(i8042_dmi_probe_defer_table))
- i8042_probe_defer = true;
+ retval = i8042_pnp_init();
+ if (retval)
+ return retval;
+#ifdef CONFIG_X86
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index bb2e1cbffba7..82beddb28761 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/ratelimit.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -47,6 +48,8 @@
#define M09_REGISTER_NUM_X 0x94
#define M09_REGISTER_NUM_Y 0x95
+#define M12_REGISTER_REPORT_RATE 0x88
+
#define EV_REGISTER_THRESHOLD 0x40
#define EV_REGISTER_GAIN 0x41
#define EV_REGISTER_OFFSET_Y 0x45
@@ -127,9 +130,12 @@ struct edt_ft5x06_ts_data {
int max_support_points;
char name[EDT_NAME_LEN];
+ char fw_version[EDT_NAME_LEN];
struct edt_reg_addr reg_addr;
enum edt_ver version;
+ unsigned int crc_errors;
+ unsigned int header_errors;
};
struct edt_i2c_chip_data {
@@ -178,6 +184,7 @@ static bool edt_ft5x06_ts_check_crc(struct edt_ft5x06_ts_data *tsdata,
crc ^= buf[i];
if (crc != buf[buflen-1]) {
+ tsdata->crc_errors++;
dev_err_ratelimited(&tsdata->client->dev,
"crc error: 0x%02x expected, got 0x%02x\n",
crc, buf[buflen-1]);
@@ -235,6 +242,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
if (tsdata->version == EDT_M06) {
if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa ||
rdbuf[2] != datalen) {
+ tsdata->header_errors++;
dev_err_ratelimited(dev,
"Unexpected header: %02x%02x%02x!\n",
rdbuf[0], rdbuf[1], rdbuf[2]);
@@ -523,9 +531,55 @@ static EDT_ATTR(offset_y, S_IWUSR | S_IRUGO, NO_REGISTER, NO_REGISTER,
/* m06: range 20 to 80, m09: range 0 to 30, m12: range 1 to 255... */
static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
M09_REGISTER_THRESHOLD, EV_REGISTER_THRESHOLD, 0, 255);
-/* m06: range 3 to 14, m12: (0x64: 100Hz) */
+/* m06: range 3 to 14, m12: range 1 to 255 */
static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
- NO_REGISTER, NO_REGISTER, 0, 255);
+ M12_REGISTER_REPORT_RATE, NO_REGISTER, 0, 255);
+
+static ssize_t model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%s\n", tsdata->name);
+}
+
+static DEVICE_ATTR_RO(model);
+
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%s\n", tsdata->fw_version);
+}
+
+static DEVICE_ATTR_RO(fw_version);
+
+/* m06 only */
+static ssize_t header_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%d\n", tsdata->header_errors);
+}
+
+static DEVICE_ATTR_RO(header_errors);
+
+/* m06 only */
+static ssize_t crc_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%d\n", tsdata->crc_errors);
+}
+
+static DEVICE_ATTR_RO(crc_errors);
static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_gain.dattr.attr,
@@ -534,6 +588,10 @@ static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_offset_y.dattr.attr,
&edt_ft5x06_attr_threshold.dattr.attr,
&edt_ft5x06_attr_report_rate.dattr.attr,
+ &dev_attr_model.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_header_errors.attr,
+ &dev_attr_crc_errors.attr,
NULL
};
@@ -820,13 +878,13 @@ static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
#endif /* CONFIG_DEBUGFS */
static int edt_ft5x06_ts_identify(struct i2c_client *client,
- struct edt_ft5x06_ts_data *tsdata,
- char *fw_version)
+ struct edt_ft5x06_ts_data *tsdata)
{
u8 rdbuf[EDT_NAME_LEN];
char *p;
int error;
char *model_name = tsdata->name;
+ char *fw_version = tsdata->fw_version;
/* see what we find if we assume it is a M06 *
* if we get less than EDT_NAME_LEN, we don't want
@@ -1030,7 +1088,8 @@ static void edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
case EDT_M09:
case EDT_M12:
reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
- reg_addr->reg_report_rate = NO_REGISTER;
+ reg_addr->reg_report_rate = tsdata->version == EDT_M12 ?
+ M12_REGISTER_REPORT_RATE : NO_REGISTER;
reg_addr->reg_gain = M09_REGISTER_GAIN;
reg_addr->reg_offset = M09_REGISTER_OFFSET;
reg_addr->reg_offset_x = NO_REGISTER;
@@ -1081,7 +1140,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
struct input_dev *input;
unsigned long irq_flags;
int error;
- char fw_version[EDT_NAME_LEN];
+ u32 report_rate;
dev_dbg(&client->dev, "probing for EDT FT5x06 I2C\n");
@@ -1194,7 +1253,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
tsdata->input = input;
tsdata->factory_mode = false;
- error = edt_ft5x06_ts_identify(client, tsdata, fw_version);
+ error = edt_ft5x06_ts_identify(client, tsdata);
if (error) {
dev_err(&client->dev, "touchscreen probe failed\n");
return error;
@@ -1210,9 +1269,30 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
edt_ft5x06_ts_get_parameters(tsdata);
+ if (tsdata->reg_addr.reg_report_rate != NO_REGISTER &&
+ !device_property_read_u32(&client->dev,
+ "report-rate-hz", &report_rate)) {
+ if (tsdata->version == EDT_M06)
+ tsdata->report_rate = clamp_val(report_rate, 30, 140);
+ else
+ tsdata->report_rate = clamp_val(report_rate, 1, 255);
+
+ if (report_rate != tsdata->report_rate)
+ dev_warn(&client->dev,
+ "report-rate %dHz is unsupported, use %dHz\n",
+ report_rate, tsdata->report_rate);
+
+ if (tsdata->version == EDT_M06)
+ tsdata->report_rate /= 10;
+
+ edt_ft5x06_register_write(tsdata,
+ tsdata->reg_addr.reg_report_rate,
+ tsdata->report_rate);
+ }
+
dev_dbg(&client->dev,
"Model \"%s\", Rev. \"%s\", %dx%d sensors\n",
- tsdata->name, fw_version, tsdata->num_x, tsdata->num_y);
+ tsdata->name, tsdata->fw_version, tsdata->num_x, tsdata->num_y);
input->name = tsdata->name;
input->id.bustype = BUS_I2C;
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index cbe0dd412912..4b7eee01c6aa 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -220,6 +220,7 @@ static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
{
u8 buf[EXC3000_LEN_VENDOR_REQUEST] = { 0x67, 0x00, 0x42, 0x00, 0x03 };
int ret;
+ unsigned long time_left;
mutex_lock(&data->query_lock);
@@ -233,9 +234,9 @@ static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
goto out_unlock;
if (response) {
- ret = wait_for_completion_timeout(&data->wait_event,
- timeout * HZ);
- if (ret <= 0) {
+ time_left = wait_for_completion_timeout(&data->wait_event,
+ timeout * HZ);
+ if (time_left == 0) {
ret = -ETIMEDOUT;
goto out_unlock;
}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index aa45a9fee6a0..d016505fc081 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -822,22 +822,16 @@ static int goodix_resource(struct acpi_resource *ares, void *data)
struct device *dev = &ts->client->dev;
struct acpi_resource_gpio *gpio;
- switch (ares->type) {
- case ACPI_RESOURCE_TYPE_GPIO:
- gpio = &ares->data.gpio;
- if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) {
- if (ts->gpio_int_idx == -1) {
- ts->gpio_int_idx = ts->gpio_count;
- } else {
- dev_err(dev, "More then one GpioInt resource, ignoring ACPI GPIO resources\n");
- ts->gpio_int_idx = -2;
- }
+ if (acpi_gpio_get_irq_resource(ares, &gpio)) {
+ if (ts->gpio_int_idx == -1) {
+ ts->gpio_int_idx = ts->gpio_count;
+ } else {
+ dev_err(dev, "More then one GpioInt resource, ignoring ACPI GPIO resources\n");
+ ts->gpio_int_idx = -2;
}
ts->gpio_count++;
- break;
- default:
- break;
- }
+ } else if (acpi_gpio_get_io_resource(ares, &gpio))
+ ts->gpio_count++;
return 0;
}
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 8bd03278ad9a..52f9e9eaab14 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -15,75 +15,75 @@
/* Register Map */
-#define BT541_SWRESET_CMD 0x0000
-#define BT541_WAKEUP_CMD 0x0001
+#define ZINITIX_SWRESET_CMD 0x0000
+#define ZINITIX_WAKEUP_CMD 0x0001
-#define BT541_IDLE_CMD 0x0004
-#define BT541_SLEEP_CMD 0x0005
+#define ZINITIX_IDLE_CMD 0x0004
+#define ZINITIX_SLEEP_CMD 0x0005
-#define BT541_CLEAR_INT_STATUS_CMD 0x0003
-#define BT541_CALIBRATE_CMD 0x0006
-#define BT541_SAVE_STATUS_CMD 0x0007
-#define BT541_SAVE_CALIBRATION_CMD 0x0008
-#define BT541_RECALL_FACTORY_CMD 0x000f
+#define ZINITIX_CLEAR_INT_STATUS_CMD 0x0003
+#define ZINITIX_CALIBRATE_CMD 0x0006
+#define ZINITIX_SAVE_STATUS_CMD 0x0007
+#define ZINITIX_SAVE_CALIBRATION_CMD 0x0008
+#define ZINITIX_RECALL_FACTORY_CMD 0x000f
-#define BT541_THRESHOLD 0x0020
+#define ZINITIX_THRESHOLD 0x0020
-#define BT541_LARGE_PALM_REJECT_AREA_TH 0x003F
+#define ZINITIX_LARGE_PALM_REJECT_AREA_TH 0x003F
-#define BT541_DEBUG_REG 0x0115 /* 0~7 */
+#define ZINITIX_DEBUG_REG 0x0115 /* 0~7 */
-#define BT541_TOUCH_MODE 0x0010
-#define BT541_CHIP_REVISION 0x0011
-#define BT541_FIRMWARE_VERSION 0x0012
+#define ZINITIX_TOUCH_MODE 0x0010
+#define ZINITIX_CHIP_REVISION 0x0011
+#define ZINITIX_FIRMWARE_VERSION 0x0012
#define ZINITIX_USB_DETECT 0x116
-#define BT541_MINOR_FW_VERSION 0x0121
+#define ZINITIX_MINOR_FW_VERSION 0x0121
-#define BT541_VENDOR_ID 0x001C
-#define BT541_HW_ID 0x0014
+#define ZINITIX_VENDOR_ID 0x001C
+#define ZINITIX_HW_ID 0x0014
-#define BT541_DATA_VERSION_REG 0x0013
-#define BT541_SUPPORTED_FINGER_NUM 0x0015
-#define BT541_EEPROM_INFO 0x0018
-#define BT541_INITIAL_TOUCH_MODE 0x0019
+#define ZINITIX_DATA_VERSION_REG 0x0013
+#define ZINITIX_SUPPORTED_FINGER_NUM 0x0015
+#define ZINITIX_EEPROM_INFO 0x0018
+#define ZINITIX_INITIAL_TOUCH_MODE 0x0019
-#define BT541_TOTAL_NUMBER_OF_X 0x0060
-#define BT541_TOTAL_NUMBER_OF_Y 0x0061
+#define ZINITIX_TOTAL_NUMBER_OF_X 0x0060
+#define ZINITIX_TOTAL_NUMBER_OF_Y 0x0061
-#define BT541_DELAY_RAW_FOR_HOST 0x007f
+#define ZINITIX_DELAY_RAW_FOR_HOST 0x007f
-#define BT541_BUTTON_SUPPORTED_NUM 0x00B0
-#define BT541_BUTTON_SENSITIVITY 0x00B2
-#define BT541_DUMMY_BUTTON_SENSITIVITY 0X00C8
+#define ZINITIX_BUTTON_SUPPORTED_NUM 0x00B0
+#define ZINITIX_BUTTON_SENSITIVITY 0x00B2
+#define ZINITIX_DUMMY_BUTTON_SENSITIVITY 0X00C8
-#define BT541_X_RESOLUTION 0x00C0
-#define BT541_Y_RESOLUTION 0x00C1
+#define ZINITIX_X_RESOLUTION 0x00C0
+#define ZINITIX_Y_RESOLUTION 0x00C1
-#define BT541_POINT_STATUS_REG 0x0080
-#define BT541_ICON_STATUS_REG 0x00AA
+#define ZINITIX_POINT_STATUS_REG 0x0080
+#define ZINITIX_ICON_STATUS_REG 0x00AA
-#define BT541_POINT_COORD_REG (BT541_POINT_STATUS_REG + 2)
+#define ZINITIX_POINT_COORD_REG (ZINITIX_POINT_STATUS_REG + 2)
-#define BT541_AFE_FREQUENCY 0x0100
-#define BT541_DND_N_COUNT 0x0122
-#define BT541_DND_U_COUNT 0x0135
+#define ZINITIX_AFE_FREQUENCY 0x0100
+#define ZINITIX_DND_N_COUNT 0x0122
+#define ZINITIX_DND_U_COUNT 0x0135
-#define BT541_RAWDATA_REG 0x0200
+#define ZINITIX_RAWDATA_REG 0x0200
-#define BT541_EEPROM_INFO_REG 0x0018
+#define ZINITIX_EEPROM_INFO_REG 0x0018
-#define BT541_INT_ENABLE_FLAG 0x00f0
-#define BT541_PERIODICAL_INTERRUPT_INTERVAL 0x00f1
+#define ZINITIX_INT_ENABLE_FLAG 0x00f0
+#define ZINITIX_PERIODICAL_INTERRUPT_INTERVAL 0x00f1
-#define BT541_BTN_WIDTH 0x016d
+#define ZINITIX_BTN_WIDTH 0x016d
-#define BT541_CHECKSUM_RESULT 0x012c
+#define ZINITIX_CHECKSUM_RESULT 0x012c
-#define BT541_INIT_FLASH 0x01d0
-#define BT541_WRITE_FLASH 0x01d1
-#define BT541_READ_FLASH 0x01d2
+#define ZINITIX_INIT_FLASH 0x01d0
+#define ZINITIX_WRITE_FLASH 0x01d1
+#define ZINITIX_READ_FLASH 0x01d2
#define ZINITIX_INTERNAL_FLAG_02 0x011e
#define ZINITIX_INTERNAL_FLAG_03 0x011f
@@ -196,13 +196,13 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
int i;
int error;
- error = zinitix_write_cmd(client, BT541_SWRESET_CMD);
+ error = zinitix_write_cmd(client, ZINITIX_SWRESET_CMD);
if (error) {
dev_err(&client->dev, "Failed to write reset command\n");
return error;
}
- error = zinitix_write_u16(client, BT541_INT_ENABLE_FLAG, 0x0);
+ error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, 0x0);
if (error) {
dev_err(&client->dev,
"Failed to reset interrupt enable flag\n");
@@ -210,32 +210,32 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
}
/* initialize */
- error = zinitix_write_u16(client, BT541_X_RESOLUTION,
+ error = zinitix_write_u16(client, ZINITIX_X_RESOLUTION,
bt541->prop.max_x);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_Y_RESOLUTION,
+ error = zinitix_write_u16(client, ZINITIX_Y_RESOLUTION,
bt541->prop.max_y);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_SUPPORTED_FINGER_NUM,
+ error = zinitix_write_u16(client, ZINITIX_SUPPORTED_FINGER_NUM,
MAX_SUPPORTED_FINGER_NUM);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_INITIAL_TOUCH_MODE,
+ error = zinitix_write_u16(client, ZINITIX_INITIAL_TOUCH_MODE,
bt541->zinitix_mode);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_TOUCH_MODE,
+ error = zinitix_write_u16(client, ZINITIX_TOUCH_MODE,
bt541->zinitix_mode);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_INT_ENABLE_FLAG,
+ error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG,
BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE |
BIT_UP);
if (error)
@@ -243,7 +243,7 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
/* clear queue */
for (i = 0; i < 10; i++) {
- zinitix_write_cmd(client, BT541_CLEAR_INT_STATUS_CMD);
+ zinitix_write_cmd(client, ZINITIX_CLEAR_INT_STATUS_CMD);
udelay(10);
}
@@ -361,7 +361,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
memset(&touch_event, 0, sizeof(struct touch_event));
- error = zinitix_read_data(bt541->client, BT541_POINT_STATUS_REG,
+ error = zinitix_read_data(bt541->client, ZINITIX_POINT_STATUS_REG,
&touch_event, sizeof(struct touch_event));
if (error) {
dev_err(&client->dev, "Failed to read in touchpoint struct\n");
@@ -381,7 +381,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
input_sync(bt541->input_dev);
out:
- zinitix_write_cmd(bt541->client, BT541_CLEAR_INT_STATUS_CMD);
+ zinitix_write_cmd(bt541->client, ZINITIX_CLEAR_INT_STATUS_CMD);
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c79a0df090c0..5c5cb5bee8b6 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -363,6 +363,16 @@ config ARM_SMMU_QCOM
When running on a Qualcomm platform that has the custom variant
of the ARM SMMU, this needs to be built into the SMMU driver.
+config ARM_SMMU_QCOM_DEBUG
+ bool "ARM SMMU QCOM implementation defined debug support"
+ depends on ARM_SMMU_QCOM
+ help
+ Support for implementation specific debug features in ARM SMMU
+ hardware found in QTI platforms.
+
+ Say Y here to enable debug for issues such as TLB sync timeouts
+ which requires implementation defined register dumps.
+
config ARM_SMMU_V3
tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 1ab31074f5b3..84e5bb1bf01b 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -13,12 +13,13 @@
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
-extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern int amd_iommu_init_api(void);
+extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
@@ -114,10 +115,17 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
amd_iommu_domain_set_pt_root(domain, 0);
}
+static inline int get_pci_sbdf_id(struct pci_dev *pdev)
+{
+ int seg = pci_domain_nr(pdev->bus);
+ u16 devid = pci_dev_id(pdev);
+
+ return PCI_SEG_DEVID_TO_SBDF(seg, devid);
+}
extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct device *dev);
-extern int __init add_special_device(u8 type, u8 id, u16 *devid,
+extern int __init add_special_device(u8 type, u8 id, u32 *devid,
bool cmd_line);
#ifdef CONFIG_DMI
@@ -128,4 +136,10 @@ static inline void amd_iommu_apply_ivrs_quirks(void) { }
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
+extern struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
+
+extern u64 amd_iommu_efr;
+extern u64 amd_iommu_efr2;
+
+extern bool amd_iommu_snp_en;
#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 72d0f5e2f651..5b1019dab328 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -67,6 +67,7 @@
#define MMIO_INTCAPXT_EVT_OFFSET 0x0170
#define MMIO_INTCAPXT_PPR_OFFSET 0x0178
#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180
+#define MMIO_EXT_FEATURES2 0x01A0
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
@@ -102,6 +103,12 @@
#define FEATURE_GLXVAL_SHIFT 14
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
+/* Extended Feature 2 Bits */
+#define FEATURE_SNPAVICSUP_SHIFT 5
+#define FEATURE_SNPAVICSUP_MASK (0x07ULL << FEATURE_SNPAVICSUP_SHIFT)
+#define FEATURE_SNPAVICSUP_GAM(x) \
+ ((x & FEATURE_SNPAVICSUP_MASK) >> FEATURE_SNPAVICSUP_SHIFT == 0x1)
+
/* Note:
* The current driver only support 16-bit PASID.
* Currently, hardware only implement upto 16-bit PASID
@@ -143,27 +150,28 @@
#define EVENT_FLAG_I 0x008
/* feature control bits */
-#define CONTROL_IOMMU_EN 0x00ULL
-#define CONTROL_HT_TUN_EN 0x01ULL
-#define CONTROL_EVT_LOG_EN 0x02ULL
-#define CONTROL_EVT_INT_EN 0x03ULL
-#define CONTROL_COMWAIT_EN 0x04ULL
-#define CONTROL_INV_TIMEOUT 0x05ULL
-#define CONTROL_PASSPW_EN 0x08ULL
-#define CONTROL_RESPASSPW_EN 0x09ULL
-#define CONTROL_COHERENT_EN 0x0aULL
-#define CONTROL_ISOC_EN 0x0bULL
-#define CONTROL_CMDBUF_EN 0x0cULL
-#define CONTROL_PPRLOG_EN 0x0dULL
-#define CONTROL_PPRINT_EN 0x0eULL
-#define CONTROL_PPR_EN 0x0fULL
-#define CONTROL_GT_EN 0x10ULL
-#define CONTROL_GA_EN 0x11ULL
-#define CONTROL_GAM_EN 0x19ULL
-#define CONTROL_GALOG_EN 0x1CULL
-#define CONTROL_GAINT_EN 0x1DULL
-#define CONTROL_XT_EN 0x32ULL
-#define CONTROL_INTCAPXT_EN 0x33ULL
+#define CONTROL_IOMMU_EN 0
+#define CONTROL_HT_TUN_EN 1
+#define CONTROL_EVT_LOG_EN 2
+#define CONTROL_EVT_INT_EN 3
+#define CONTROL_COMWAIT_EN 4
+#define CONTROL_INV_TIMEOUT 5
+#define CONTROL_PASSPW_EN 8
+#define CONTROL_RESPASSPW_EN 9
+#define CONTROL_COHERENT_EN 10
+#define CONTROL_ISOC_EN 11
+#define CONTROL_CMDBUF_EN 12
+#define CONTROL_PPRLOG_EN 13
+#define CONTROL_PPRINT_EN 14
+#define CONTROL_PPR_EN 15
+#define CONTROL_GT_EN 16
+#define CONTROL_GA_EN 17
+#define CONTROL_GAM_EN 25
+#define CONTROL_GALOG_EN 28
+#define CONTROL_GAINT_EN 29
+#define CONTROL_XT_EN 50
+#define CONTROL_INTCAPXT_EN 51
+#define CONTROL_SNPAVIC_EN 61
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE 0
@@ -445,8 +453,6 @@ struct irq_remap_table {
u32 *table;
};
-extern struct irq_remap_table **irq_lookup_table;
-
/* Interrupt remapping feature used? */
extern bool amd_iommu_irq_remap;
@@ -456,6 +462,16 @@ extern bool amdr_ivrs_remap_support;
/* kmem_cache to get tables with 128 byte alignement */
extern struct kmem_cache *amd_iommu_irq_cache;
+#define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff)
+#define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff)
+#define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \
+ ((devid) & 0xffff))
+
+/* Make iterating over all pci segment easier */
+#define for_each_pci_segment(pci_seg) \
+ list_for_each_entry((pci_seg), &amd_iommu_pci_seg_list, list)
+#define for_each_pci_segment_safe(pci_seg, next) \
+ list_for_each_entry_safe((pci_seg), (next), &amd_iommu_pci_seg_list, list)
/*
* Make iterating over all IOMMUs easier
*/
@@ -478,13 +494,14 @@ extern struct kmem_cache *amd_iommu_irq_cache;
struct amd_iommu_fault {
u64 address; /* IO virtual address of the fault*/
u32 pasid; /* Address space identifier */
- u16 device_id; /* Originating PCI device id */
+ u32 sbdf; /* Originating PCI device id */
u16 tag; /* PPR tag */
u16 flags; /* Fault flags */
};
+struct amd_iommu;
struct iommu_domain;
struct irq_domain;
struct amd_irte_ops;
@@ -531,6 +548,75 @@ struct protection_domain {
};
/*
+ * This structure contains information about one PCI segment in the system.
+ */
+struct amd_iommu_pci_seg {
+ /* List with all PCI segments in the system */
+ struct list_head list;
+
+ /* List of all available dev_data structures */
+ struct llist_head dev_data_list;
+
+ /* PCI segment number */
+ u16 id;
+
+ /* Largest PCI device id we expect translation requests for */
+ u16 last_bdf;
+
+ /* Size of the device table */
+ u32 dev_table_size;
+
+ /* Size of the alias table */
+ u32 alias_table_size;
+
+ /* Size of the rlookup table */
+ u32 rlookup_table_size;
+
+ /*
+ * device table virtual address
+ *
+ * Pointer to the per PCI segment device table.
+ * It is indexed by the PCI device id or the HT unit id and contains
+ * information about the domain the device belongs to as well as the
+ * page table root pointer.
+ */
+ struct dev_table_entry *dev_table;
+
+ /*
+ * The rlookup iommu table is used to find the IOMMU which is
+ * responsible for a specific device. It is indexed by the PCI
+ * device id.
+ */
+ struct amd_iommu **rlookup_table;
+
+ /*
+ * This table is used to find the irq remapping table for a given
+ * device id quickly.
+ */
+ struct irq_remap_table **irq_lookup_table;
+
+ /*
+ * Pointer to a device table which the content of old device table
+ * will be copied to. It's only be used in kdump kernel.
+ */
+ struct dev_table_entry *old_dev_tbl_cpy;
+
+ /*
+ * The alias table is a driver specific data structure which contains the
+ * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
+ * More than one device can share the same requestor id.
+ */
+ u16 *alias_table;
+
+ /*
+ * A list of required unity mappings we find in ACPI. It is not locked
+ * because as runtime it is only read. It is created at ACPI table
+ * parsing time.
+ */
+ struct list_head unity_map;
+};
+
+/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
*/
@@ -567,6 +653,9 @@ struct amd_iommu {
/* Extended features */
u64 features;
+ /* Extended features 2 */
+ u64 features2;
+
/* IOMMUv2 */
bool is_iommu_v2;
@@ -581,7 +670,7 @@ struct amd_iommu {
u16 cap_ptr;
/* pci domain of this IOMMU */
- u16 pci_seg;
+ struct amd_iommu_pci_seg *pci_seg;
/* start of exclusion range of that IOMMU */
u64 exclusion_start;
@@ -666,8 +755,8 @@ struct acpihid_map_entry {
struct list_head list;
u8 uid[ACPIHID_UID_LEN];
u8 hid[ACPIHID_HID_LEN];
- u16 devid;
- u16 root_devid;
+ u32 devid;
+ u32 root_devid;
bool cmd_line;
struct iommu_group *group;
};
@@ -675,7 +764,7 @@ struct acpihid_map_entry {
struct devid_map {
struct list_head list;
u8 id;
- u16 devid;
+ u32 devid;
bool cmd_line;
};
@@ -689,7 +778,7 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
- struct pci_dev *pdev;
+ struct device *dev;
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
struct {
@@ -710,6 +799,12 @@ extern struct list_head hpet_map;
extern struct list_head acpihid_map;
/*
+ * List with all PCI segments in the system. This list is not locked because
+ * it is only written at driver initialization time
+ */
+extern struct list_head amd_iommu_pci_seg_list;
+
+/*
* List with all IOMMUs in the system. This list is not locked because it is
* only written and read at driver initialization or suspend time
*/
@@ -749,38 +844,12 @@ struct unity_map_entry {
};
/*
- * List of all unity mappings. It is not locked because as runtime it is only
- * read. It is created at ACPI table parsing time.
- */
-extern struct list_head amd_iommu_unity_map;
-
-/*
* Data structures for device handling
*/
-/*
- * Device table used by hardware. Read and write accesses by software are
- * locked with the amd_iommu_pd_table lock.
- */
-extern struct dev_table_entry *amd_iommu_dev_table;
-
-/*
- * Alias table to find requestor ids to device ids. Not locked because only
- * read on runtime.
- */
-extern u16 *amd_iommu_alias_table;
-
-/*
- * Reverse lookup table to find the IOMMU which translates a specific device.
- */
-extern struct amd_iommu **amd_iommu_rlookup_table;
-
/* size of the dma_ops aperture as power of 2 */
extern unsigned amd_iommu_aperture_order;
-/* largest PCI device id we expect translation requests for */
-extern u16 amd_iommu_last_bdf;
-
/* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap;
@@ -913,6 +982,7 @@ struct irq_2_irte {
struct amd_ir_data {
u32 cached_ga_tag;
+ struct amd_iommu *iommu;
struct irq_2_irte irq_2_irte;
struct msi_msg msi_entry;
void *entry; /* Pointer to union irte or struct irte_ga */
@@ -930,9 +1000,9 @@ struct amd_ir_data {
struct amd_irte_ops {
void (*prepare)(void *, u32, bool, u8, u32, int);
- void (*activate)(void *, u16, u16);
- void (*deactivate)(void *, u16, u16);
- void (*set_affinity)(void *, u16, u16, u8, u32);
+ void (*activate)(struct amd_iommu *iommu, void *, u16, u16);
+ void (*deactivate)(struct amd_iommu *iommu, void *, u16, u16);
+ void (*set_affinity)(struct amd_iommu *iommu, void *, u16, u16, u8, u32);
void *(*get)(struct irq_remap_table *, int);
void (*set_allocated)(struct irq_remap_table *, int);
bool (*is_allocated)(struct irq_remap_table *, int);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 1d08f87e734b..fdc642362c14 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -84,6 +84,10 @@
#define ACPI_DEVFLAG_ATSDIS 0x10000000
#define LOOP_TIMEOUT 2000000
+
+#define IVRS_GET_SBDF_ID(seg, bus, dev, fd) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
+ | ((dev & 0x1f) << 3) | (fn & 0x7))
+
/*
* ACPI table definitions
*
@@ -110,7 +114,7 @@ struct ivhd_header {
/* Following only valid on IVHD type 11h and 40h */
u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
- u64 res;
+ u64 efr_reg2;
} __attribute__((packed));
/*
@@ -141,7 +145,8 @@ struct ivmd_header {
u16 length;
u16 devid;
u16 aux;
- u64 resv;
+ u16 pci_seg;
+ u8 resv[6];
u64 range_start;
u64 range_length;
} __attribute__((packed));
@@ -159,11 +164,15 @@ static bool amd_iommu_disabled __initdata;
static bool amd_iommu_force_enable __initdata;
static int amd_iommu_target_ivhd_type;
-u16 amd_iommu_last_bdf; /* largest PCI device id we have
- to handle */
-LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
- we find in ACPI */
+/* Global EFR and EFR2 registers */
+u64 amd_iommu_efr;
+u64 amd_iommu_efr2;
+/* SNP is enabled on the system? */
+bool amd_iommu_snp_en;
+EXPORT_SYMBOL(amd_iommu_snp_en);
+
+LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
@@ -186,47 +195,11 @@ bool amdr_ivrs_remap_support __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
/*
- * Pointer to the device table which is shared by all AMD IOMMUs
- * it is indexed by the PCI device id or the HT unit id and contains
- * information about the domain the device belongs to as well as the
- * page table root pointer.
- */
-struct dev_table_entry *amd_iommu_dev_table;
-/*
- * Pointer to a device table which the content of old device table
- * will be copied to. It's only be used in kdump kernel.
- */
-static struct dev_table_entry *old_dev_tbl_cpy;
-
-/*
- * The alias table is a driver specific data structure which contains the
- * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
- * More than one device can share the same requestor id.
- */
-u16 *amd_iommu_alias_table;
-
-/*
- * The rlookup table is used to find the IOMMU which is responsible
- * for a specific device. It is also indexed by the PCI device id.
- */
-struct amd_iommu **amd_iommu_rlookup_table;
-
-/*
- * This table is used to find the irq remapping table for a given device id
- * quickly.
- */
-struct irq_remap_table **irq_lookup_table;
-
-/*
* AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
*/
unsigned long *amd_iommu_pd_alloc_bitmap;
-static u32 dev_table_size; /* size of the device table */
-static u32 alias_table_size; /* size of the alias table */
-static u32 rlookup_table_size; /* size if the rlookup table */
-
enum iommu_init_state {
IOMMU_START_STATE,
IOMMU_IVRS_DETECTED,
@@ -256,7 +229,7 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
-static void init_device_table_dma(void);
+static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
static bool amd_iommu_pre_enabled = true;
@@ -281,16 +254,10 @@ static void init_translation_status(struct amd_iommu *iommu)
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
-static inline void update_last_devid(u16 devid)
-{
- if (devid > amd_iommu_last_bdf)
- amd_iommu_last_bdf = devid;
-}
-
-static inline unsigned long tbl_size(int entry_size)
+static inline unsigned long tbl_size(int entry_size, int last_bdf)
{
unsigned shift = PAGE_SHIFT +
- get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
+ get_order((last_bdf + 1) * entry_size);
return 1UL << shift;
}
@@ -300,21 +267,46 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
-#ifdef CONFIG_IRQ_REMAP
-static bool check_feature_on_all_iommus(u64 mask)
+/*
+ * Iterate through all the IOMMUs to get common EFR
+ * masks among all IOMMUs and warn if found inconsistency.
+ */
+static void get_global_efr(void)
{
- bool ret = false;
struct amd_iommu *iommu;
for_each_iommu(iommu) {
- ret = iommu_feature(iommu, mask);
- if (!ret)
- return false;
+ u64 tmp = iommu->features;
+ u64 tmp2 = iommu->features2;
+
+ if (list_is_first(&iommu->list, &amd_iommu_list)) {
+ amd_iommu_efr = tmp;
+ amd_iommu_efr2 = tmp2;
+ continue;
+ }
+
+ if (amd_iommu_efr == tmp &&
+ amd_iommu_efr2 == tmp2)
+ continue;
+
+ pr_err(FW_BUG
+ "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
+ tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
+ iommu->index, iommu->pci_seg->id,
+ PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
+ PCI_FUNC(iommu->devid));
+
+ amd_iommu_efr &= tmp;
+ amd_iommu_efr2 &= tmp2;
}
- return true;
+ pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
+}
+
+static bool check_feature_on_all_iommus(u64 mask)
+{
+ return !!(amd_iommu_efr & mask);
}
-#endif
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
@@ -324,8 +316,10 @@ static bool check_feature_on_all_iommus(u64 mask)
static void __init early_iommu_features_init(struct amd_iommu *iommu,
struct ivhd_header *h)
{
- if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
iommu->features = h->efr_reg;
+ iommu->features2 = h->efr_reg2;
+ }
if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
amdr_ivrs_remap_support = true;
}
@@ -399,7 +393,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
u64 entry = start & PM_ADDR_MASK;
- if (!iommu_feature(iommu, FEATURE_SNP))
+ if (!check_feature_on_all_iommus(FEATURE_SNP))
return;
/* Note:
@@ -421,10 +415,12 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
static void iommu_set_device_table(struct amd_iommu *iommu)
{
u64 entry;
+ u32 dev_table_size = iommu->pci_seg->dev_table_size;
+ void *dev_table = (void *)get_dev_table(iommu);
BUG_ON(iommu->mmio_base == NULL);
- entry = iommu_virt_to_phys(amd_iommu_dev_table);
+ entry = iommu_virt_to_phys(dev_table);
entry |= (dev_table_size >> 12) - 1;
memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
&entry, sizeof(entry));
@@ -557,6 +553,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
{
u8 *p = (void *)h, *end = (void *)h;
struct ivhd_entry *dev;
+ int last_devid = -EINVAL;
u32 ivhd_size = get_ivhd_header_size(h);
@@ -573,14 +570,14 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
switch (dev->type) {
case IVHD_DEV_ALL:
/* Use maximum BDF value for DEV_ALL */
- update_last_devid(0xffff);
- break;
+ return 0xffff;
case IVHD_DEV_SELECT:
case IVHD_DEV_RANGE_END:
case IVHD_DEV_ALIAS:
case IVHD_DEV_EXT_SELECT:
/* all the above subfield types refer to device ids */
- update_last_devid(dev->devid);
+ if (dev->devid > last_devid)
+ last_devid = dev->devid;
break;
default:
break;
@@ -590,7 +587,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
WARN_ON(p != end);
- return 0;
+ return last_devid;
}
static int __init check_ivrs_checksum(struct acpi_table_header *table)
@@ -614,38 +611,125 @@ static int __init check_ivrs_checksum(struct acpi_table_header *table)
* id which we need to handle. This is the first of three functions which parse
* the ACPI table. So we check the checksum here.
*/
-static int __init find_last_devid_acpi(struct acpi_table_header *table)
+static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
{
u8 *p = (u8 *)table, *end = (u8 *)table;
struct ivhd_header *h;
+ int last_devid, last_bdf = 0;
p += IVRS_HEADER_LENGTH;
end += table->length;
while (p < end) {
h = (struct ivhd_header *)p;
- if (h->type == amd_iommu_target_ivhd_type) {
- int ret = find_last_devid_from_ivhd(h);
-
- if (ret)
- return ret;
+ if (h->pci_seg == pci_seg &&
+ h->type == amd_iommu_target_ivhd_type) {
+ last_devid = find_last_devid_from_ivhd(h);
+
+ if (last_devid < 0)
+ return -EINVAL;
+ if (last_devid > last_bdf)
+ last_bdf = last_devid;
}
p += h->length;
}
WARN_ON(p != end);
- return 0;
+ return last_bdf;
}
/****************************************************************************
*
* The following functions belong to the code path which parses the ACPI table
* the second time. In this ACPI parsing iteration we allocate IOMMU specific
- * data structures, initialize the device/alias/rlookup table and also
- * basically initialize the hardware.
+ * data structures, initialize the per PCI segment device/alias/rlookup table
+ * and also basically initialize the hardware.
*
****************************************************************************/
+/* Allocate per PCI segment device table */
+static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
+ get_order(pci_seg->dev_table_size));
+ if (!pci_seg->dev_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = NULL;
+}
+
+/* Allocate per PCI segment IOMMU rlookup table. */
+static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->rlookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(pci_seg->rlookup_table_size));
+ if (pci_seg->rlookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->rlookup_table,
+ get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = NULL;
+}
+
+static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->irq_lookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(pci_seg->rlookup_table_size));
+ kmemleak_alloc(pci_seg->irq_lookup_table,
+ pci_seg->rlookup_table_size, 1, GFP_KERNEL);
+ if (pci_seg->irq_lookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ kmemleak_free(pci_seg->irq_lookup_table);
+ free_pages((unsigned long)pci_seg->irq_lookup_table,
+ get_order(pci_seg->rlookup_table_size));
+ pci_seg->irq_lookup_table = NULL;
+}
+
+static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ int i;
+
+ pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(pci_seg->alias_table_size));
+ if (!pci_seg->alias_table)
+ return -ENOMEM;
+
+ /*
+ * let all alias entries point to itself
+ */
+ for (i = 0; i <= pci_seg->last_bdf; ++i)
+ pci_seg->alias_table[i] = i;
+
+ return 0;
+}
+
+static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->alias_table,
+ get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = NULL;
+}
+
/*
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
* write commands to that buffer later and the IOMMU will execute them
@@ -724,7 +808,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
void *buf = (void *)__get_free_pages(gfp, order);
if (buf &&
- iommu_feature(iommu, FEATURE_SNP) &&
+ check_feature_on_all_iommus(FEATURE_SNP) &&
set_memory_4k((unsigned long)buf, (1 << order))) {
free_pages((unsigned long)buf, order);
buf = NULL;
@@ -815,20 +899,15 @@ static void free_ga_log(struct amd_iommu *iommu)
#endif
}
+#ifdef CONFIG_IRQ_REMAP
static int iommu_ga_log_enable(struct amd_iommu *iommu)
{
-#ifdef CONFIG_IRQ_REMAP
u32 status, i;
u64 entry;
if (!iommu->ga_log)
return -EINVAL;
- /* Check if already running */
- status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
- return 0;
-
entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
&entry, sizeof(entry));
@@ -852,13 +931,12 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
if (WARN_ON(i >= LOOP_TIMEOUT))
return -EINVAL;
-#endif /* CONFIG_IRQ_REMAP */
+
return 0;
}
static int iommu_init_ga_log(struct amd_iommu *iommu)
{
-#ifdef CONFIG_IRQ_REMAP
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
@@ -876,10 +954,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
err_out:
free_ga_log(iommu);
return -EINVAL;
-#else
- return 0;
-#endif /* CONFIG_IRQ_REMAP */
}
+#endif /* CONFIG_IRQ_REMAP */
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
@@ -916,56 +992,59 @@ static void iommu_enable_gt(struct amd_iommu *iommu)
}
/* sets a specific bit in the device table entry. */
-static void set_dev_entry_bit(u16 devid, u8 bit)
+static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
+ u16 devid, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
+ dev_table[devid].data[i] |= (1UL << _bit);
+}
+
+static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ return __set_dev_entry_bit(dev_table, devid, bit);
}
-static int get_dev_entry_bit(u16 devid, u8 bit)
+static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
+ u16 devid, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
+ return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
}
+static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
-static bool copy_device_table(void)
+ return __get_dev_entry_bit(dev_table, devid, bit);
+}
+
+static bool __copy_device_table(struct amd_iommu *iommu)
{
- u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
+ u64 int_ctl, int_tab_len, entry = 0;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
struct dev_table_entry *old_devtb = NULL;
u32 lo, hi, devid, old_devtb_size;
phys_addr_t old_devtb_phys;
- struct amd_iommu *iommu;
u16 dom_id, dte_v, irq_v;
gfp_t gfp_flag;
u64 tmp;
- if (!amd_iommu_pre_enabled)
- return false;
-
- pr_warn("Translation is already enabled - trying to copy translation structures\n");
- for_each_iommu(iommu) {
- /* All IOMMUs should use the same device table with the same size */
- lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
- hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
- entry = (((u64) hi) << 32) + lo;
- if (last_entry && last_entry != entry) {
- pr_err("IOMMU:%d should use the same dev table as others!\n",
- iommu->index);
- return false;
- }
- last_entry = entry;
+ /* Each IOMMU use separate device table with the same size */
+ lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
+ hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
+ entry = (((u64) hi) << 32) + lo;
- old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
- if (old_devtb_size != dev_table_size) {
- pr_err("The device table size of IOMMU:%d is not expected!\n",
- iommu->index);
- return false;
- }
+ old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
+ if (old_devtb_size != pci_seg->dev_table_size) {
+ pr_err("The device table size of IOMMU:%d is not expected!\n",
+ iommu->index);
+ return false;
}
/*
@@ -981,38 +1060,38 @@ static bool copy_device_table(void)
}
old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
? (__force void *)ioremap_encrypted(old_devtb_phys,
- dev_table_size)
- : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
+ pci_seg->dev_table_size)
+ : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
if (!old_devtb)
return false;
gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
- old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
- get_order(dev_table_size));
- if (old_dev_tbl_cpy == NULL) {
+ pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
+ get_order(pci_seg->dev_table_size));
+ if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
return false;
}
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- old_dev_tbl_cpy[devid] = old_devtb[devid];
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
if (dte_v && dom_id) {
- old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
- old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
+ pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
+ pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
__set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
/* If gcr3 table existed, mask it out */
if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
- old_dev_tbl_cpy[devid].data[1] &= ~tmp;
+ pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
tmp |= DTE_FLAG_GV;
- old_dev_tbl_cpy[devid].data[0] &= ~tmp;
+ pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
}
}
@@ -1027,7 +1106,7 @@ static bool copy_device_table(void)
return false;
}
- old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
+ pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
}
}
memunmap(old_devtb);
@@ -1035,21 +1114,42 @@ static bool copy_device_table(void)
return true;
}
-void amd_iommu_apply_erratum_63(u16 devid)
+static bool copy_device_table(void)
{
- int sysmgt;
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
- sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
- (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
+ if (!amd_iommu_pre_enabled)
+ return false;
- if (sysmgt == 0x01)
- set_dev_entry_bit(devid, DEV_ENTRY_IW);
+ pr_warn("Translation is already enabled - trying to copy translation structures\n");
+
+ /*
+ * All IOMMUs within PCI segment shares common device table.
+ * Hence copy device table only once per PCI segment.
+ */
+ for_each_pci_segment(pci_seg) {
+ for_each_iommu(iommu) {
+ if (pci_seg->id != iommu->pci_seg->id)
+ continue;
+ if (!__copy_device_table(iommu))
+ return false;
+ break;
+ }
+ }
+
+ return true;
}
-/* Writes the specific IOMMU for a device into the rlookup table */
-static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
+void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
{
- amd_iommu_rlookup_table[devid] = iommu;
+ int sysmgt;
+
+ sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
+ (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
+
+ if (sysmgt == 0x01)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
}
/*
@@ -1060,26 +1160,26 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
u16 devid, u32 flags, u32 ext_flags)
{
if (flags & ACPI_DEVFLAG_INITPASS)
- set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
if (flags & ACPI_DEVFLAG_EXTINT)
- set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
if (flags & ACPI_DEVFLAG_NMI)
- set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
if (flags & ACPI_DEVFLAG_SYSMGT1)
- set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
if (flags & ACPI_DEVFLAG_SYSMGT2)
- set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
if (flags & ACPI_DEVFLAG_LINT0)
- set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
if (flags & ACPI_DEVFLAG_LINT1)
- set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
- amd_iommu_apply_erratum_63(devid);
+ amd_iommu_apply_erratum_63(iommu, devid);
- set_iommu_for_device(iommu, devid);
+ amd_iommu_set_rlookup_table(iommu, devid);
}
-int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
{
struct devid_map *entry;
struct list_head *list;
@@ -1116,7 +1216,7 @@ int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
return 0;
}
-static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
+static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
bool cmd_line)
{
struct acpihid_map_entry *entry;
@@ -1195,10 +1295,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
{
u8 *p = (u8 *)h;
u8 *end = p, flags = 0;
- u16 devid = 0, devid_start = 0, devid_to = 0;
+ u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
u32 dev_i, ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u32 ivhd_size;
int ret;
@@ -1230,19 +1331,21 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
while (p < end) {
e = (struct ivhd_entry *)p;
+ seg_id = pci_seg->id;
+
switch (e->type) {
case IVHD_DEV_ALL:
DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
- for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
+ for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
break;
case IVHD_DEV_SELECT:
- DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -1253,8 +1356,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_SELECT_RANGE_START:
DUMP_printk(" DEV_SELECT_RANGE_START\t "
- "devid: %02x:%02x.%x flags: %02x\n",
- PCI_BUS_NUM(e->devid),
+ "devid: %04x:%02x:%02x.%x flags: %02x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -1266,9 +1369,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_ALIAS:
- DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x devid_to: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
@@ -1280,18 +1383,18 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
devid_to = e->ext >> 8;
set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
- amd_iommu_alias_table[devid] = devid_to;
+ pci_seg->alias_table[devid] = devid_to;
break;
case IVHD_DEV_ALIAS_RANGE:
DUMP_printk(" DEV_ALIAS_RANGE\t\t "
- "devid: %02x:%02x.%x flags: %02x "
- "devid_to: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ "devid: %04x:%02x:%02x.%x flags: %02x "
+ "devid_to: %04x:%02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
- PCI_BUS_NUM(e->ext >> 8),
+ seg_id, PCI_BUS_NUM(e->ext >> 8),
PCI_SLOT(e->ext >> 8),
PCI_FUNC(e->ext >> 8));
@@ -1303,9 +1406,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_EXT_SELECT:
- DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x ext: %08x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -1317,8 +1420,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_EXT_SELECT_RANGE:
DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
- "%02x:%02x.%x flags: %02x ext: %08x\n",
- PCI_BUS_NUM(e->devid),
+ "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -1330,15 +1433,15 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_RANGE_END:
- DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid));
devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
if (alias) {
- amd_iommu_alias_table[dev_i] = devid_to;
+ pci_seg->alias_table[dev_i] = devid_to;
set_dev_entry_from_acpi(iommu,
devid_to, flags, ext_flags);
}
@@ -1349,11 +1452,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_SPECIAL: {
u8 handle, type;
const char *var;
- u16 devid;
+ u32 devid;
int ret;
handle = e->ext & 0xff;
- devid = (e->ext >> 8) & 0xffff;
+ devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
type = (e->ext >> 24) & 0xff;
if (type == IVHD_SPECIAL_IOAPIC)
@@ -1363,9 +1466,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
else
var = "UNKNOWN";
- DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
+ DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
var, (int)handle,
- PCI_BUS_NUM(devid),
+ seg_id, PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
@@ -1383,7 +1486,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
case IVHD_DEV_ACPI_HID: {
- u16 devid;
+ u32 devid;
u8 hid[ACPIHID_HID_LEN];
u8 uid[ACPIHID_UID_LEN];
int ret;
@@ -1426,9 +1529,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
- devid = e->devid;
- DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
- hid, uid,
+ devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
+ DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
+ hid, uid, seg_id,
PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
@@ -1458,6 +1561,74 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
return 0;
}
+/* Allocate PCI segment data structure */
+static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ int last_bdf;
+
+ /*
+ * First parse ACPI tables to find the largest Bus/Dev/Func we need to
+ * handle in this PCI segment. Upon this information the shared data
+ * structures for the PCI segments in the system will be allocated.
+ */
+ last_bdf = find_last_devid_acpi(ivrs_base, id);
+ if (last_bdf < 0)
+ return NULL;
+
+ pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
+ if (pci_seg == NULL)
+ return NULL;
+
+ pci_seg->last_bdf = last_bdf;
+ DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
+ pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
+
+ pci_seg->id = id;
+ init_llist_head(&pci_seg->dev_data_list);
+ INIT_LIST_HEAD(&pci_seg->unity_map);
+ list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
+
+ if (alloc_dev_table(pci_seg))
+ return NULL;
+ if (alloc_alias_table(pci_seg))
+ return NULL;
+ if (alloc_rlookup_table(pci_seg))
+ return NULL;
+
+ return pci_seg;
+}
+
+static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == id)
+ return pci_seg;
+ }
+
+ return alloc_pci_segment(id, ivrs_base);
+}
+
+static void __init free_pci_segments(void)
+{
+ struct amd_iommu_pci_seg *pci_seg, *next;
+
+ for_each_pci_segment_safe(pci_seg, next) {
+ list_del(&pci_seg->list);
+ free_irq_lookup_table(pci_seg);
+ free_rlookup_table(pci_seg);
+ free_alias_table(pci_seg);
+ free_dev_table(pci_seg);
+ kfree(pci_seg);
+ }
+}
+
static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_cwwb_sem(iommu);
@@ -1542,9 +1713,15 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
*/
-static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
+ struct acpi_table_header *ivrs_base)
{
- int ret;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
+ if (pci_seg == NULL)
+ return -ENOMEM;
+ iommu->pci_seg = pci_seg;
raw_spin_lock_init(&iommu->lock);
iommu->cmd_sem_val = 0;
@@ -1566,7 +1743,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
*/
iommu->devid = h->devid;
iommu->cap_ptr = h->cap_ptr;
- iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys;
switch (h->type) {
@@ -1621,6 +1797,13 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->mmio_base)
return -ENOMEM;
+ return init_iommu_from_acpi(iommu, h);
+}
+
+static int __init init_iommu_one_late(struct amd_iommu *iommu)
+{
+ int ret;
+
if (alloc_cwwb_sem(iommu))
return -ENOMEM;
@@ -1642,10 +1825,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (amd_iommu_pre_enabled)
amd_iommu_pre_enabled = translation_pre_enabled(iommu);
- ret = init_iommu_from_acpi(iommu, h);
- if (ret)
- return ret;
-
if (amd_iommu_irq_remap) {
ret = amd_iommu_create_irq_domain(iommu);
if (ret)
@@ -1656,7 +1835,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
* Make sure IOMMU is not considered to translate itself. The IVRS
* table tells us so, but this is a lie!
*/
- amd_iommu_rlookup_table[iommu->devid] = NULL;
+ iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
return 0;
}
@@ -1701,15 +1880,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
end += table->length;
p += IVRS_HEADER_LENGTH;
+ /* Phase 1: Process all IVHD blocks */
while (p < end) {
h = (struct ivhd_header *)p;
if (*p == amd_iommu_target_ivhd_type) {
- DUMP_printk("device: %02x:%02x.%01x cap: %04x "
- "seg: %d flags: %01x info %04x\n",
- PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
- PCI_FUNC(h->devid), h->cap_ptr,
- h->pci_seg, h->flags, h->info);
+ DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
+ "flags: %01x info %04x\n",
+ h->pci_seg, PCI_BUS_NUM(h->devid),
+ PCI_SLOT(h->devid), PCI_FUNC(h->devid),
+ h->cap_ptr, h->flags, h->info);
DUMP_printk(" mmio-addr: %016llx\n",
h->mmio_phys);
@@ -1717,7 +1897,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
if (iommu == NULL)
return -ENOMEM;
- ret = init_iommu_one(iommu, h);
+ ret = init_iommu_one(iommu, h, table);
if (ret)
return ret;
}
@@ -1726,6 +1906,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
}
WARN_ON(p != end);
+ /* Phase 2 : Early feature support check */
+ get_global_efr();
+
+ /* Phase 3 : Enabling IOMMU features */
+ for_each_iommu(iommu) {
+ ret = init_iommu_one_late(iommu);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1762,7 +1952,7 @@ static ssize_t amd_iommu_show_features(struct device *dev,
char *buf)
{
struct amd_iommu *iommu = dev_to_amd_iommu(dev);
- return sprintf(buf, "%llx\n", iommu->features);
+ return sprintf(buf, "%llx:%llx\n", iommu->features2, iommu->features);
}
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
@@ -1789,16 +1979,18 @@ static const struct attribute_group *amd_iommu_groups[] = {
*/
static void __init late_iommu_features_init(struct amd_iommu *iommu)
{
- u64 features;
+ u64 features, features2;
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
return;
/* read extended feature bits */
features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+ features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
if (!iommu->features) {
iommu->features = features;
+ iommu->features2 = features2;
return;
}
@@ -1806,9 +1998,13 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
* Sanity check and warn if EFR values from
* IVHD and MMIO conflict.
*/
- if (features != iommu->features)
- pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
- features, iommu->features);
+ if (features != iommu->features ||
+ features2 != iommu->features2) {
+ pr_warn(FW_WARN
+ "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
+ features, iommu->features,
+ features2, iommu->features2);
+ }
}
static int __init iommu_init_pci(struct amd_iommu *iommu)
@@ -1816,7 +2012,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
int cap_ptr = iommu->cap_ptr;
int ret;
- iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
+ iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ PCI_BUS_NUM(iommu->devid),
iommu->devid & 0xff);
if (!iommu->dev)
return -ENODEV;
@@ -1863,10 +2060,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM;
- ret = iommu_init_ga_log(iommu);
- if (ret)
- return ret;
-
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
pr_info("Using strict mode due to virtualization\n");
iommu_set_dma_strict();
@@ -1879,7 +2072,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
int i, j;
iommu->root_pdev =
- pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
+ pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ iommu->dev->bus->number,
PCI_DEVFN(0, 0));
/*
@@ -1906,8 +2100,11 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_erratum_746_workaround(iommu);
amd_iommu_ats_write_check_workaround(iommu);
- iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
+ ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
amd_iommu_groups, "ivhd%d", iommu->index);
+ if (ret)
+ return ret;
+
iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
return pci_enable_device(iommu->dev);
@@ -1928,7 +2125,7 @@ static void print_iommu_info(void)
pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- pr_info("Extended features (%#llx):", iommu->features);
+ pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
@@ -1938,13 +2135,14 @@ static void print_iommu_info(void)
if (iommu->features & FEATURE_GAM_VAPIC)
pr_cont(" GA_vAPIC");
+ if (iommu->features & FEATURE_SNP)
+ pr_cont(" SNP");
+
pr_cont("\n");
}
}
if (irq_remapping_enabled) {
pr_info("Interrupt remapping enabled\n");
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
- pr_info("Virtual APIC enabled\n");
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
@@ -1953,6 +2151,7 @@ static void print_iommu_info(void)
static int __init amd_iommu_init_pci(void)
{
struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
int ret;
for_each_iommu(iommu) {
@@ -1983,7 +2182,8 @@ static int __init amd_iommu_init_pci(void)
goto out;
}
- init_device_table_dma();
+ for_each_pci_segment(pci_seg)
+ init_device_table_dma(pci_seg);
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
@@ -2232,9 +2432,6 @@ enable_faults:
if (iommu->ppr_log != NULL)
iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
-
- iommu_ga_log_enable(iommu);
-
return 0;
}
@@ -2249,19 +2446,28 @@ enable_faults:
static void __init free_unity_maps(void)
{
struct unity_map_entry *entry, *next;
+ struct amd_iommu_pci_seg *p, *pci_seg;
- list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
- list_del(&entry->list);
- kfree(entry);
+ for_each_pci_segment_safe(pci_seg, p) {
+ list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
}
}
/* called for unity map ACPI definition */
-static int __init init_unity_map_range(struct ivmd_header *m)
+static int __init init_unity_map_range(struct ivmd_header *m,
+ struct acpi_table_header *ivrs_base)
{
struct unity_map_entry *e = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
char *s;
+ pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
+ if (pci_seg == NULL)
+ return -ENOMEM;
+
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
@@ -2277,7 +2483,7 @@ static int __init init_unity_map_range(struct ivmd_header *m)
case ACPI_IVMD_TYPE_ALL:
s = "IVMD_TYPE_ALL\t\t";
e->devid_start = 0;
- e->devid_end = amd_iommu_last_bdf;
+ e->devid_end = pci_seg->last_bdf;
break;
case ACPI_IVMD_TYPE_RANGE:
s = "IVMD_TYPE_RANGE\t\t";
@@ -2299,14 +2505,16 @@ static int __init init_unity_map_range(struct ivmd_header *m)
if (m->flags & IVMD_FLAG_EXCL_RANGE)
e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
- DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
- " range_start: %016llx range_end: %016llx flags: %x\n", s,
+ DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
+ "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
+ " flags: %x\n", s, m->pci_seg,
PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
- PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
+ PCI_FUNC(e->devid_start), m->pci_seg,
+ PCI_BUS_NUM(e->devid_end),
PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
e->address_start, e->address_end, m->flags);
- list_add_tail(&e->list, &amd_iommu_unity_map);
+ list_add_tail(&e->list, &pci_seg->unity_map);
return 0;
}
@@ -2323,7 +2531,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
while (p < end) {
m = (struct ivmd_header *)p;
if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
- init_unity_map_range(m);
+ init_unity_map_range(m, table);
p += m->length;
}
@@ -2334,35 +2542,48 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
/*
* Init the device table to not allow DMA access for devices
*/
-static void init_device_table_dma(void)
+static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{
u32 devid;
+ struct dev_table_entry *dev_table = pci_seg->dev_table;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- set_dev_entry_bit(devid, DEV_ENTRY_VALID);
- set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
+ if (dev_table == NULL)
+ return;
+
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
+ if (!amd_iommu_snp_en)
+ __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
}
}
-static void __init uninit_device_table_dma(void)
+static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{
u32 devid;
+ struct dev_table_entry *dev_table = pci_seg->dev_table;
+
+ if (dev_table == NULL)
+ return;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- amd_iommu_dev_table[devid].data[0] = 0ULL;
- amd_iommu_dev_table[devid].data[1] = 0ULL;
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ dev_table[devid].data[0] = 0ULL;
+ dev_table[devid].data[1] = 0ULL;
}
}
static void init_device_table(void)
{
+ struct amd_iommu_pci_seg *pci_seg;
u32 devid;
if (!amd_iommu_irq_remap)
return;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
- set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
+ for_each_pci_segment(pci_seg) {
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
+ __set_dev_entry_bit(pci_seg->dev_table,
+ devid, DEV_ENTRY_IRQ_TBL_EN);
+ }
}
static void iommu_init_flags(struct amd_iommu *iommu)
@@ -2440,8 +2661,6 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
#ifdef CONFIG_IRQ_REMAP
switch (amd_iommu_guest_ir) {
case AMD_IOMMU_GUEST_IR_VAPIC:
- iommu_feature_enable(iommu, CONTROL_GAM_EN);
- fallthrough;
case AMD_IOMMU_GUEST_IR_LEGACY_GA:
iommu_feature_enable(iommu, CONTROL_GA_EN);
iommu->irte_ops = &irte_128_ops;
@@ -2478,7 +2697,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
static void early_enable_iommus(void)
{
struct amd_iommu *iommu;
-
+ struct amd_iommu_pci_seg *pci_seg;
if (!copy_device_table()) {
/*
@@ -2488,9 +2707,14 @@ static void early_enable_iommus(void)
*/
if (amd_iommu_pre_enabled)
pr_err("Failed to copy DEV table from previous kernel.\n");
- if (old_dev_tbl_cpy != NULL)
- free_pages((unsigned long)old_dev_tbl_cpy,
- get_order(dev_table_size));
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->old_dev_tbl_cpy != NULL) {
+ free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = NULL;
+ }
+ }
for_each_iommu(iommu) {
clear_translation_pre_enabled(iommu);
@@ -2498,9 +2722,13 @@ static void early_enable_iommus(void)
}
} else {
pr_info("Copied DEV table from previous kernel.\n");
- free_pages((unsigned long)amd_iommu_dev_table,
- get_order(dev_table_size));
- amd_iommu_dev_table = old_dev_tbl_cpy;
+
+ for_each_pci_segment(pci_seg) {
+ free_pages((unsigned long)pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
+ }
+
for_each_iommu(iommu) {
iommu_disable_command_buffer(iommu);
iommu_disable_event_buffer(iommu);
@@ -2512,19 +2740,6 @@ static void early_enable_iommus(void)
iommu_flush_all_caches(iommu);
}
}
-
-#ifdef CONFIG_IRQ_REMAP
- /*
- * Note: We have already checked GASup from IVRS table.
- * Now, we need to make sure that GAMSup is set.
- */
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
- !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
-
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
- amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
-#endif
}
static void enable_iommus_v2(void)
@@ -2537,10 +2752,72 @@ static void enable_iommus_v2(void)
}
}
+static void enable_iommus_vapic(void)
+{
+#ifdef CONFIG_IRQ_REMAP
+ u32 status, i;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ /*
+ * Disable GALog if already running. It could have been enabled
+ * in the previous boot before kdump.
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
+ continue;
+
+ iommu_feature_disable(iommu, CONTROL_GALOG_EN);
+ iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+
+ /*
+ * Need to set and poll check the GALOGRun bit to zero before
+ * we can set/ modify GA Log registers safely.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
+ break;
+ udelay(10);
+ }
+
+ if (WARN_ON(i >= LOOP_TIMEOUT))
+ return;
+ }
+
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ return;
+ }
+
+ if (amd_iommu_snp_en &&
+ !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
+ pr_warn("Force to disable Virtual APIC due to SNP\n");
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ return;
+ }
+
+ /* Enabling GAM and SNPAVIC support */
+ for_each_iommu(iommu) {
+ if (iommu_init_ga_log(iommu) ||
+ iommu_ga_log_enable(iommu))
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_GAM_EN);
+ if (amd_iommu_snp_en)
+ iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
+ }
+
+ amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
+ pr_info("Virtual APIC enabled\n");
+#endif
+}
+
static void enable_iommus(void)
{
early_enable_iommus();
-
+ enable_iommus_vapic();
enable_iommus_v2();
}
@@ -2590,27 +2867,11 @@ static struct syscore_ops amd_iommu_syscore_ops = {
static void __init free_iommu_resources(void)
{
- kmemleak_free(irq_lookup_table);
- free_pages((unsigned long)irq_lookup_table,
- get_order(rlookup_table_size));
- irq_lookup_table = NULL;
-
kmem_cache_destroy(amd_iommu_irq_cache);
amd_iommu_irq_cache = NULL;
- free_pages((unsigned long)amd_iommu_rlookup_table,
- get_order(rlookup_table_size));
- amd_iommu_rlookup_table = NULL;
-
- free_pages((unsigned long)amd_iommu_alias_table,
- get_order(alias_table_size));
- amd_iommu_alias_table = NULL;
-
- free_pages((unsigned long)amd_iommu_dev_table,
- get_order(dev_table_size));
- amd_iommu_dev_table = NULL;
-
free_iommu_all();
+ free_pci_segments();
}
/* SB IOAPIC is always on this device in AMD systems */
@@ -2709,7 +2970,7 @@ static void __init ivinfo_init(void *ivrs)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- int i, remap_cache_sz, ret;
+ int remap_cache_sz, ret;
acpi_status status;
if (!amd_iommu_detected)
@@ -2737,42 +2998,8 @@ static int __init early_amd_iommu_init(void)
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
- /*
- * First parse ACPI tables to find the largest Bus/Dev/Func
- * we need to handle. Upon this information the shared data
- * structures for the IOMMUs in the system will be allocated
- */
- ret = find_last_devid_acpi(ivrs_base);
- if (ret)
- goto out;
-
- dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
- alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
- rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
-
/* Device table - directly used by all IOMMUs */
ret = -ENOMEM;
- amd_iommu_dev_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
- get_order(dev_table_size));
- if (amd_iommu_dev_table == NULL)
- goto out;
-
- /*
- * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
- * IOMMU see for that device
- */
- amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
- get_order(alias_table_size));
- if (amd_iommu_alias_table == NULL)
- goto out;
-
- /* IOMMU rlookup table - find the IOMMU for a specific device */
- amd_iommu_rlookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(rlookup_table_size));
- if (amd_iommu_rlookup_table == NULL)
- goto out;
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO,
@@ -2781,12 +3008,6 @@ static int __init early_amd_iommu_init(void)
goto out;
/*
- * let all alias entries point to itself
- */
- for (i = 0; i <= amd_iommu_last_bdf; ++i)
- amd_iommu_alias_table[i] = i;
-
- /*
* never allocate domain 0 because its used as the non-allocated and
* error value placeholder
*/
@@ -2808,6 +3029,7 @@ static int __init early_amd_iommu_init(void)
amd_iommu_irq_remap = check_ioapic_information();
if (amd_iommu_irq_remap) {
+ struct amd_iommu_pci_seg *pci_seg;
/*
* Interrupt remapping enabled, create kmem_cache for the
* remapping tables.
@@ -2824,13 +3046,10 @@ static int __init early_amd_iommu_init(void)
if (!amd_iommu_irq_cache)
goto out;
- irq_lookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(rlookup_table_size));
- kmemleak_alloc(irq_lookup_table, rlookup_table_size,
- 1, GFP_KERNEL);
- if (!irq_lookup_table)
- goto out;
+ for_each_pci_segment(pci_seg) {
+ if (alloc_irq_lookup_table(pci_seg))
+ goto out;
+ }
}
ret = init_memory_definitions(ivrs_base);
@@ -2937,6 +3156,7 @@ static int __init state_next(void)
register_syscore_ops(&amd_iommu_syscore_ops);
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
+ enable_iommus_vapic();
enable_iommus_v2();
break;
case IOMMU_PCI_INIT:
@@ -2967,8 +3187,11 @@ static int __init state_next(void)
free_iommu_resources();
} else {
struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg)
+ uninit_device_table_dma(pci_seg);
- uninit_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
}
@@ -3161,15 +3384,17 @@ static int __init parse_amd_iommu_options(char *str)
static int __init parse_ivrs_ioapic(char *str)
{
- unsigned int bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
int ret, id, i;
- u16 devid;
+ u32 devid;
ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
-
if (ret != 4) {
- pr_err("Invalid command line: ivrs_ioapic%s\n", str);
- return 1;
+ ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_ioapic%s\n", str);
+ return 1;
+ }
}
if (early_ioapic_map_size == EARLY_MAP_SIZE) {
@@ -3178,7 +3403,7 @@ static int __init parse_ivrs_ioapic(char *str)
return 1;
}
- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
cmdline_maps = true;
i = early_ioapic_map_size++;
@@ -3191,15 +3416,17 @@ static int __init parse_ivrs_ioapic(char *str)
static int __init parse_ivrs_hpet(char *str)
{
- unsigned int bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
int ret, id, i;
- u16 devid;
+ u32 devid;
ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
-
if (ret != 4) {
- pr_err("Invalid command line: ivrs_hpet%s\n", str);
- return 1;
+ ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_hpet%s\n", str);
+ return 1;
+ }
}
if (early_hpet_map_size == EARLY_MAP_SIZE) {
@@ -3208,7 +3435,7 @@ static int __init parse_ivrs_hpet(char *str)
return 1;
}
- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
cmdline_maps = true;
i = early_hpet_map_size++;
@@ -3221,15 +3448,18 @@ static int __init parse_ivrs_hpet(char *str)
static int __init parse_ivrs_acpihid(char *str)
{
- u32 bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
char *hid, *uid, *p;
char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
int ret, i;
ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
if (ret != 4) {
- pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
- return 1;
+ ret = sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
+ return 1;
+ }
}
p = acpiid;
@@ -3244,8 +3474,7 @@ static int __init parse_ivrs_acpihid(char *str)
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
- early_acpihid_map[i].devid =
- ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
early_acpihid_map[i].cmd_line = true;
return 1;
@@ -3260,7 +3489,12 @@ __setup("ivrs_acpihid", parse_ivrs_acpihid);
bool amd_iommu_v2_supported(void)
{
- return amd_iommu_v2_present;
+ /*
+ * Since DTE[Mode]=0 is prohibited on SNP-enabled system
+ * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
+ * setting up IOMMUv1 page table.
+ */
+ return amd_iommu_v2_present && !amd_iommu_snp_en;
}
EXPORT_SYMBOL(amd_iommu_v2_supported);
@@ -3363,3 +3597,41 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+int amd_iommu_snp_enable(void)
+{
+ /*
+ * The SNP support requires that IOMMU must be enabled, and is
+ * not configured in the passthrough mode.
+ */
+ if (no_iommu || iommu_default_passthrough()) {
+ pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported");
+ return -EINVAL;
+ }
+
+ /*
+ * Prevent enabling SNP after IOMMU_ENABLED state because this process
+ * affect how IOMMU driver sets up data structures and configures
+ * IOMMU hardware.
+ */
+ if (init_state > IOMMU_ENABLED) {
+ pr_err("SNP: Too late to enable SNP for IOMMU.\n");
+ return -EINVAL;
+ }
+
+ amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
+ if (!amd_iommu_snp_en)
+ return -EINVAL;
+
+ pr_info("SNP enabled\n");
+
+ /* Enforce IOMMU v1 pagetable when SNP is enabled. */
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 6608d1717574..7d4b61e5db47 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -258,7 +258,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
__npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
/* pte could have been changed somewhere. */
- if (cmpxchg64(pte, __pte, __npte) != __pte)
+ if (!try_cmpxchg64(pte, &__pte, __npte))
free_page((unsigned long)page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -341,10 +341,8 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
u64 *pt;
int mode;
- while (cmpxchg64(pte, pteval, 0) != pteval) {
+ while (!try_cmpxchg64(pte, &pteval, 0))
pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
- pteval = *pte;
- }
if (!IOMMU_PTE_PRESENT(pteval))
return;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 840831d5d2ad..65b8e4fd8217 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -62,9 +62,6 @@
static DEFINE_SPINLOCK(pd_bitmap_lock);
-/* List of all available dev_data structures */
-static LLIST_HEAD(dev_data_list);
-
LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
@@ -95,13 +92,6 @@ static void detach_device(struct device *dev);
*
****************************************************************************/
-static inline u16 get_pci_device_id(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- return pci_dev_id(pdev);
-}
-
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
@@ -122,16 +112,74 @@ static inline int get_acpihid_device_id(struct device *dev,
return -EINVAL;
}
-static inline int get_device_id(struct device *dev)
+static inline int get_device_sbdf_id(struct device *dev)
{
- int devid;
+ int sbdf;
if (dev_is_pci(dev))
- devid = get_pci_device_id(dev);
+ sbdf = get_pci_sbdf_id(to_pci_dev(dev));
else
- devid = get_acpihid_device_id(dev, NULL);
+ sbdf = get_acpihid_device_id(dev, NULL);
+
+ return sbdf;
+}
+
+struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
+{
+ struct dev_table_entry *dev_table;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ BUG_ON(pci_seg == NULL);
+ dev_table = pci_seg->dev_table;
+ BUG_ON(dev_table == NULL);
+
+ return dev_table;
+}
+
+static inline u16 get_device_segment(struct device *dev)
+{
+ u16 seg;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ seg = pci_domain_nr(pdev->bus);
+ } else {
+ u32 devid = get_acpihid_device_id(dev, NULL);
+
+ seg = PCI_SBDF_TO_SEGID(devid);
+ }
+
+ return seg;
+}
+
+/* Writes the specific IOMMU for a device into the PCI segment rlookup table */
+void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->rlookup_table[devid] = iommu;
+}
+
+static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == seg)
+ return pci_seg->rlookup_table[devid];
+ }
+ return NULL;
+}
- return devid;
+static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
+{
+ u16 seg = get_device_segment(dev);
+ int devid = get_device_sbdf_id(dev);
+
+ if (devid < 0)
+ return NULL;
+ return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
}
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
@@ -139,9 +187,10 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static struct iommu_dev_data *alloc_dev_data(u16 devid)
+static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
@@ -151,19 +200,20 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
dev_data->devid = devid;
ratelimit_default_init(&dev_data->rs);
- llist_add(&dev_data->dev_data_list, &dev_data_list);
+ llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list);
return dev_data;
}
-static struct iommu_dev_data *search_dev_data(u16 devid)
+static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
struct llist_node *node;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- if (llist_empty(&dev_data_list))
+ if (llist_empty(&pci_seg->dev_data_list))
return NULL;
- node = dev_data_list.first;
+ node = pci_seg->dev_data_list.first;
llist_for_each_entry(dev_data, node, dev_data_list) {
if (dev_data->devid == devid)
return dev_data;
@@ -174,67 +224,74 @@ static struct iommu_dev_data *search_dev_data(u16 devid)
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
+ struct amd_iommu *iommu;
+ struct dev_table_entry *dev_table;
u16 devid = pci_dev_id(pdev);
if (devid == alias)
return 0;
- amd_iommu_rlookup_table[alias] =
- amd_iommu_rlookup_table[devid];
- memcpy(amd_iommu_dev_table[alias].data,
- amd_iommu_dev_table[devid].data,
- sizeof(amd_iommu_dev_table[alias].data));
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return 0;
+
+ amd_iommu_set_rlookup_table(iommu, alias);
+ dev_table = get_dev_table(iommu);
+ memcpy(dev_table[alias].data,
+ dev_table[devid].data,
+ sizeof(dev_table[alias].data));
return 0;
}
-static void clone_aliases(struct pci_dev *pdev)
+static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
{
- if (!pdev)
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(dev))
return;
+ pdev = to_pci_dev(dev);
/*
* The IVRS alias stored in the alias table may not be
* part of the PCI DMA aliases if it's bus differs
* from the original device.
*/
- clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
+ clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
pci_for_each_dma_alias(pdev, clone_alias, NULL);
}
-static struct pci_dev *setup_aliases(struct device *dev)
+static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u16 ivrs_alias;
/* For ACPI HID devices, there are no aliases */
if (!dev_is_pci(dev))
- return NULL;
+ return;
/*
* Add the IVRS alias to the pci aliases if it is on the same
* bus. The IVRS table may know about a quirk that we don't.
*/
- ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+ ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
if (ivrs_alias != pci_dev_id(pdev) &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
- clone_aliases(pdev);
-
- return pdev;
+ clone_aliases(iommu, dev);
}
-static struct iommu_dev_data *find_dev_data(u16 devid)
+static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
- dev_data = search_dev_data(devid);
+ dev_data = search_dev_data(iommu, devid);
if (dev_data == NULL) {
- dev_data = alloc_dev_data(devid);
+ dev_data = alloc_dev_data(iommu, devid);
if (!dev_data)
return NULL;
@@ -296,42 +353,49 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
*/
static bool check_device(struct device *dev)
{
- int devid;
+ struct amd_iommu_pci_seg *pci_seg;
+ struct amd_iommu *iommu;
+ int devid, sbdf;
if (!dev)
return false;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
return false;
+ devid = PCI_SBDF_TO_DEVID(sbdf);
- /* Out of our scope? */
- if (devid > amd_iommu_last_bdf)
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
return false;
- if (amd_iommu_rlookup_table[devid] == NULL)
+ /* Out of our scope? */
+ pci_seg = iommu->pci_seg;
+ if (devid > pci_seg->last_bdf)
return false;
return true;
}
-static int iommu_init_device(struct device *dev)
+static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
{
struct iommu_dev_data *dev_data;
- int devid;
+ int devid, sbdf;
if (dev_iommu_priv_get(dev))
return 0;
- devid = get_device_id(dev);
- if (devid < 0)
- return devid;
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return sbdf;
- dev_data = find_dev_data(devid);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ dev_data = find_dev_data(iommu, devid);
if (!dev_data)
return -ENOMEM;
- dev_data->pdev = setup_aliases(dev);
+ dev_data->dev = dev;
+ setup_aliases(iommu, dev);
/*
* By default we use passthrough mode for IOMMUv2 capable device.
@@ -341,9 +405,6 @@ static int iommu_init_device(struct device *dev)
*/
if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
- struct amd_iommu *iommu;
-
- iommu = amd_iommu_rlookup_table[dev_data->devid];
dev_data->iommu_v2 = iommu->is_iommu_v2;
}
@@ -352,18 +413,21 @@ static int iommu_init_device(struct device *dev)
return 0;
}
-static void iommu_ignore_device(struct device *dev)
+static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
{
- int devid;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+ int devid, sbdf;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
return;
- amd_iommu_rlookup_table[devid] = NULL;
- memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ pci_seg->rlookup_table[devid] = NULL;
+ memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
- setup_aliases(dev);
+ setup_aliases(iommu, dev);
}
static void amd_iommu_uninit_device(struct device *dev)
@@ -391,13 +455,13 @@ static void amd_iommu_uninit_device(struct device *dev)
*
****************************************************************************/
-static void dump_dte_entry(u16 devid)
+static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
{
int i;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
for (i = 0; i < 4; ++i)
- pr_err("DTE[%d]: %016llx\n", i,
- amd_iommu_dev_table[devid].data[i]);
+ pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
}
static void dump_command(unsigned long phys_addr)
@@ -409,7 +473,7 @@ static void dump_command(unsigned long phys_addr)
pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
}
-static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
+static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
{
struct iommu_dev_data *dev_data = NULL;
int devid, vmg_tag, flags;
@@ -421,7 +485,7 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -432,8 +496,8 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
vmg_tag, spa, flags);
}
} else {
- pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
vmg_tag, spa, flags);
}
@@ -441,7 +505,7 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
pci_dev_put(pdev);
}
-static void amd_iommu_report_rmp_fault(volatile u32 *event)
+static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
{
struct iommu_dev_data *dev_data = NULL;
int devid, flags_rmp, vmg_tag, flags;
@@ -454,7 +518,7 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
gpa = ((u64)event[3] << 32) | event[2];
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -465,8 +529,8 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
vmg_tag, gpa, flags_rmp, flags);
}
} else {
- pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
vmg_tag, gpa, flags_rmp, flags);
}
@@ -480,13 +544,14 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
#define IS_WRITE_REQUEST(flags) \
((flags) & EVENT_FLAG_RW)
-static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
+static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
+ u16 devid, u16 domain_id,
u64 address, int flags)
{
struct iommu_dev_data *dev_data = NULL;
struct pci_dev *pdev;
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -511,8 +576,8 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
domain_id, address, flags);
}
} else {
- pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
domain_id, address, flags);
}
@@ -549,26 +614,26 @@ retry:
}
if (type == EVENT_TYPE_IO_FAULT) {
- amd_iommu_report_page_fault(devid, pasid, address, flags);
+ amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
return;
}
switch (type) {
case EVENT_TYPE_ILL_DEV:
- dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
- dump_dte_entry(devid);
+ dump_dte_entry(iommu, devid);
break;
case EVENT_TYPE_DEV_TAB_ERR:
- dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+ dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
"address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
case EVENT_TYPE_ILL_CMD:
@@ -580,26 +645,26 @@ retry:
address, flags);
break;
case EVENT_TYPE_IOTLB_INV_TO:
- dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address);
break;
case EVENT_TYPE_INV_DEV_REQ:
- dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
case EVENT_TYPE_RMP_FAULT:
- amd_iommu_report_rmp_fault(event);
+ amd_iommu_report_rmp_fault(iommu, event);
break;
case EVENT_TYPE_RMP_HW_ERR:
- amd_iommu_report_rmp_hw_error(event);
+ amd_iommu_report_rmp_hw_error(iommu, event);
break;
case EVENT_TYPE_INV_PPR_REQ:
pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
- dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags, tag);
break;
default:
@@ -636,7 +701,7 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
fault.address = raw[1];
fault.pasid = PPR_PASID(raw[0]);
- fault.device_id = PPR_DEVID(raw[0]);
+ fault.sbdf = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0]));
fault.tag = PPR_TAG(raw[0]);
fault.flags = PPR_FLAGS(raw[0]);
@@ -1125,8 +1190,9 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
{
u32 devid;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (devid = 0; devid <= 0xffff; ++devid)
+ for (devid = 0; devid <= last_bdf; ++devid)
iommu_flush_dte(iommu, devid);
iommu_completion_wait(iommu);
@@ -1139,8 +1205,9 @@ static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
{
u32 dom_id;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
+ for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
dom_id, 1);
@@ -1183,8 +1250,9 @@ static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
{
u32 devid;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
+ for (devid = 0; devid <= last_bdf; devid++)
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -1212,7 +1280,9 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
int qdep;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
@@ -1232,20 +1302,28 @@ static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
static int device_flush_dte(struct iommu_dev_data *dev_data)
{
struct amd_iommu *iommu;
+ struct pci_dev *pdev = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
u16 alias;
int ret;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
- if (dev_data->pdev)
- ret = pci_for_each_dma_alias(dev_data->pdev,
+ if (dev_is_pci(dev_data->dev))
+ pdev = to_pci_dev(dev_data->dev);
+
+ if (pdev)
+ ret = pci_for_each_dma_alias(pdev,
device_flush_dte_alias, iommu);
else
ret = iommu_flush_dte(iommu, dev_data->devid);
if (ret)
return ret;
- alias = amd_iommu_alias_table[dev_data->devid];
+ pci_seg = iommu->pci_seg;
+ alias = pci_seg->alias_table[dev_data->devid];
if (alias != dev_data->devid) {
ret = iommu_flush_dte(iommu, alias);
if (ret)
@@ -1461,28 +1539,35 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-static void set_dte_entry(u16 devid, struct protection_domain *domain,
- bool ats, bool ppr)
+static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
+ struct protection_domain *domain, bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
if (domain->iop.mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->iop.root);
pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
- pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
- flags = amd_iommu_dev_table[devid].data[1];
+ pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+
+ /*
+ * When SNP is enabled, Only set TV bit when IOMMU
+ * page translation is in use.
+ */
+ if (!amd_iommu_snp_en || (domain->id != 0))
+ pte_root |= DTE_FLAG_TV;
+
+ flags = dev_table[devid].data[1];
if (ats)
flags |= DTE_FLAG_IOTLB;
if (ppr) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
if (iommu_feature(iommu, FEATURE_EPHSUP))
pte_root |= 1ULL << DEV_ENTRY_PPR;
}
@@ -1516,9 +1601,9 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
flags &= ~DEV_DOMID_MASK;
flags |= domain->id;
- old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
- amd_iommu_dev_table[devid].data[1] = flags;
- amd_iommu_dev_table[devid].data[0] = pte_root;
+ old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
+ dev_table[devid].data[1] = flags;
+ dev_table[devid].data[0] = pte_root;
/*
* A kdump kernel might be replacing a domain ID that was copied from
@@ -1526,19 +1611,23 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
* entries for the old domain ID that is being overwritten
*/
if (old_domid) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
amd_iommu_flush_tlb_domid(iommu, old_domid);
}
}
-static void clear_dte_entry(u16 devid)
+static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
/* remove entry from the device table seen by the hardware */
- amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV;
- amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
+ dev_table[devid].data[0] = DTE_FLAG_V;
+
+ if (!amd_iommu_snp_en)
+ dev_table[devid].data[0] |= DTE_FLAG_TV;
+
+ dev_table[devid].data[1] &= DTE_FLAG_MASK;
- amd_iommu_apply_erratum_63(devid);
+ amd_iommu_apply_erratum_63(iommu, devid);
}
static void do_attach(struct iommu_dev_data *dev_data,
@@ -1547,7 +1636,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct amd_iommu *iommu;
bool ats;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -1559,9 +1650,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- set_dte_entry(dev_data->devid, domain,
+ set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
- clone_aliases(dev_data->pdev);
+ clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
}
@@ -1571,13 +1662,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
- clear_dte_entry(dev_data->devid);
- clone_aliases(dev_data->pdev);
+ clear_dte_entry(iommu, dev_data->devid);
+ clone_aliases(iommu, dev_data->dev);
/* Flush the DTE entry */
device_flush_dte(dev_data);
@@ -1749,23 +1842,24 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
- int ret, devid;
+ int ret;
if (!check_device(dev))
return ERR_PTR(-ENODEV);
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
- ret = iommu_init_device(dev);
+ ret = iommu_init_device(iommu, dev);
if (ret) {
if (ret != -ENOTSUPP)
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_dev = ERR_PTR(ret);
- iommu_ignore_device(dev);
+ iommu_ignore_device(iommu, dev);
} else {
amd_iommu_set_pci_msi_domain(dev, iommu);
iommu_dev = &iommu->iommu;
@@ -1785,13 +1879,14 @@ static void amd_iommu_probe_finalize(struct device *dev)
static void amd_iommu_release_device(struct device *dev)
{
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
return;
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return;
amd_iommu_uninit_device(dev);
iommu_completion_wait(iommu);
@@ -1816,9 +1911,13 @@ static void update_device_table(struct protection_domain *domain)
struct iommu_dev_data *dev_data;
list_for_each_entry(dev_data, &domain->dev_list, list) {
- set_dte_entry(dev_data->devid, domain,
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ if (!iommu)
+ continue;
+ set_dte_entry(iommu, dev_data->devid, domain,
dev_data->ats.enabled, dev_data->iommu_v2);
- clone_aliases(dev_data->pdev);
+ clone_aliases(iommu, dev_data->dev);
}
}
@@ -1969,6 +2068,13 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *domain;
+ /*
+ * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
+ * default to use IOMMU_DOMAIN_DMA[_FQ].
+ */
+ if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
+ return NULL;
+
domain = protection_domain_alloc(type);
if (!domain)
return NULL;
@@ -2004,7 +2110,6 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
@@ -2013,7 +2118,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
if (dev_data->domain != NULL)
detach_device(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return;
@@ -2040,7 +2145,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
dev_data = dev_iommu_priv_get(dev);
dev_data->defer_attach = false;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return -EINVAL;
@@ -2169,13 +2274,21 @@ static void amd_iommu_get_resv_regions(struct device *dev,
{
struct iommu_resv_region *region;
struct unity_map_entry *entry;
- int devid;
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+ int devid, sbdf;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return;
+
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
return;
+ pci_seg = iommu->pci_seg;
- list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+ list_for_each_entry(entry, &pci_seg->unity_map, list) {
int type, prot = 0;
size_t length;
@@ -2280,7 +2393,6 @@ const struct iommu_ops amd_iommu_ops = {
.probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.get_resv_regions = amd_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.def_domain_type = amd_iommu_def_domain_type,
@@ -2419,8 +2531,9 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
continue;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
-
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ continue;
build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
qdep, address, size);
@@ -2582,7 +2695,9 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
struct iommu_cmd cmd;
dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return -ENODEV;
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
tag, dev_data->pri_tlp);
@@ -2644,30 +2759,35 @@ EXPORT_SYMBOL(amd_iommu_device_info);
static struct irq_chip amd_ir_chip;
static DEFINE_SPINLOCK(iommu_table_lock);
-static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
+static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
+ struct irq_remap_table *table)
{
u64 dte;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
- dte = amd_iommu_dev_table[devid].data[2];
+ dte = dev_table[devid].data[2];
dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
dte |= iommu_virt_to_phys(table->table);
dte |= DTE_IRQ_REMAP_INTCTL;
dte |= DTE_INTTABLEN;
dte |= DTE_IRQ_REMAP_ENABLE;
- amd_iommu_dev_table[devid].data[2] = dte;
+ dev_table[devid].data[2] = dte;
}
-static struct irq_remap_table *get_irq_table(u16 devid)
+static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
{
struct irq_remap_table *table;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
- "%s: no iommu for devid %x\n", __func__, devid))
+ if (WARN_ONCE(!pci_seg->rlookup_table[devid],
+ "%s: no iommu for devid %x:%x\n",
+ __func__, pci_seg->id, devid))
return NULL;
- table = irq_lookup_table[devid];
- if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
+ table = pci_seg->irq_lookup_table[devid];
+ if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
+ __func__, pci_seg->id, devid))
return NULL;
return table;
@@ -2700,8 +2820,10 @@ static struct irq_remap_table *__alloc_irq_table(void)
static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
- irq_lookup_table[devid] = table;
- set_dte_irq_entry(devid, table);
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->irq_lookup_table[devid] = table;
+ set_dte_irq_entry(iommu, devid, table);
iommu_flush_dte(iommu, devid);
}
@@ -2709,35 +2831,38 @@ static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
void *data)
{
struct irq_remap_table *table = data;
+ struct amd_iommu_pci_seg *pci_seg;
+ struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
- irq_lookup_table[alias] = table;
- set_dte_irq_entry(alias, table);
+ if (!iommu)
+ return -EINVAL;
- iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
+ pci_seg = iommu->pci_seg;
+ pci_seg->irq_lookup_table[alias] = table;
+ set_dte_irq_entry(iommu, alias, table);
+ iommu_flush_dte(pci_seg->rlookup_table[alias], alias);
return 0;
}
-static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
+static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
+ u16 devid, struct pci_dev *pdev)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
- struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
u16 alias;
spin_lock_irqsave(&iommu_table_lock, flags);
- iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- goto out_unlock;
-
- table = irq_lookup_table[devid];
+ pci_seg = iommu->pci_seg;
+ table = pci_seg->irq_lookup_table[devid];
if (table)
goto out_unlock;
- alias = amd_iommu_alias_table[devid];
- table = irq_lookup_table[alias];
+ alias = pci_seg->alias_table[devid];
+ table = pci_seg->irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
goto out_wait;
@@ -2751,11 +2876,11 @@ static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
spin_lock_irqsave(&iommu_table_lock, flags);
- table = irq_lookup_table[devid];
+ table = pci_seg->irq_lookup_table[devid];
if (table)
goto out_unlock;
- table = irq_lookup_table[alias];
+ table = pci_seg->irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
goto out_wait;
@@ -2786,18 +2911,14 @@ out_unlock:
return table;
}
-static int alloc_irq_index(u16 devid, int count, bool align,
- struct pci_dev *pdev)
+static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
+ bool align, struct pci_dev *pdev)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
unsigned long flags;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- return -ENODEV;
-
- table = alloc_irq_table(devid, pdev);
+ table = alloc_irq_table(iommu, devid, pdev);
if (!table)
return -ENODEV;
@@ -2836,20 +2957,15 @@ out:
return index;
}
-static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
- struct amd_ir_data *data)
+static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte, struct amd_ir_data *data)
{
bool ret;
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
struct irte_ga *entry;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return -EINVAL;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENOMEM;
@@ -2880,17 +2996,13 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
return 0;
}
-static int modify_irte(u16 devid, int index, union irte *irte)
+static int modify_irte(struct amd_iommu *iommu,
+ u16 devid, int index, union irte *irte)
{
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return -EINVAL;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENOMEM;
@@ -2904,17 +3016,12 @@ static int modify_irte(u16 devid, int index, union irte *irte)
return 0;
}
-static void free_irte(u16 devid, int index)
+static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
{
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return;
@@ -2956,49 +3063,49 @@ static void irte_ga_prepare(void *entry,
irte->lo.fields_remap.valid = 1;
}
-static void irte_activate(void *entry, u16 devid, u16 index)
+static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
union irte *irte = (union irte *) entry;
irte->fields.valid = 1;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_activate(void *entry, u16 devid, u16 index)
+static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 1;
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
-static void irte_deactivate(void *entry, u16 devid, u16 index)
+static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
union irte *irte = (union irte *) entry;
irte->fields.valid = 0;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
+static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 0;
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
-static void irte_set_affinity(void *entry, u16 devid, u16 index,
+static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
union irte *irte = (union irte *) entry;
irte->fields.vector = vector;
irte->fields.destination = dest_apicid;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
+static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
@@ -3009,7 +3116,7 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
APICID_TO_IRTE_DEST_LO(dest_apicid);
irte->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(dest_apicid);
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
}
@@ -3068,7 +3175,7 @@ static int get_devid(struct irq_alloc_info *info)
return get_hpet_devid(info->devid);
case X86_IRQ_ALLOC_TYPE_PCI_MSI:
case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
- return get_device_id(msi_desc_to_dev(info->desc));
+ return get_device_sbdf_id(msi_desc_to_dev(info->desc));
default:
WARN_ON_ONCE(1);
return -1;
@@ -3097,7 +3204,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
int devid, int index, int sub_handle)
{
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+ struct amd_iommu *iommu = data->iommu;
if (!iommu)
return;
@@ -3148,8 +3255,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
struct amd_ir_data *data = NULL;
+ struct amd_iommu *iommu;
struct irq_cfg *cfg;
- int i, ret, devid;
+ int i, ret, devid, seg, sbdf;
int index;
if (!info)
@@ -3165,8 +3273,14 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
- devid = get_devid(info);
- if (devid < 0)
+ sbdf = get_devid(info);
+ if (sbdf < 0)
+ return -EINVAL;
+
+ seg = PCI_SBDF_TO_SEGID(sbdf);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ iommu = __rlookup_amd_iommu(seg, devid);
+ if (!iommu)
return -EINVAL;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
@@ -3175,9 +3289,8 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
struct irq_remap_table *table;
- struct amd_iommu *iommu;
- table = alloc_irq_table(devid, NULL);
+ table = alloc_irq_table(iommu, devid, NULL);
if (table) {
if (!table->min_index) {
/*
@@ -3185,7 +3298,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
* interrupts.
*/
table->min_index = 32;
- iommu = amd_iommu_rlookup_table[devid];
for (i = 0; i < 32; ++i)
iommu->irte_ops->set_allocated(table, i);
}
@@ -3198,10 +3310,10 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
- index = alloc_irq_index(devid, nr_irqs, align,
+ index = alloc_irq_index(iommu, devid, nr_irqs, align,
msi_desc_to_pci_dev(info->desc));
} else {
- index = alloc_irq_index(devid, nr_irqs, false, NULL);
+ index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
}
if (index < 0) {
@@ -3233,6 +3345,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
goto out_free_data;
}
+ data->iommu = iommu;
irq_data->hwirq = (devid << 16) + i;
irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip;
@@ -3249,7 +3362,7 @@ out_free_data:
kfree(irq_data->chip_data);
}
for (i = 0; i < nr_irqs; i++)
- free_irte(devid, index + i);
+ free_irte(iommu, devid, index + i);
out_free_parent:
irq_domain_free_irqs_common(domain, virq, nr_irqs);
return ret;
@@ -3268,7 +3381,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
if (irq_data && irq_data->chip_data) {
data = irq_data->chip_data;
irte_info = &data->irq_2_irte;
- free_irte(irte_info->devid, irte_info->index);
+ free_irte(data->iommu, irte_info->devid, irte_info->index);
kfree(data->entry);
kfree(data);
}
@@ -3286,13 +3399,13 @@ static int irq_remapping_activate(struct irq_domain *domain,
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = data->iommu;
struct irq_cfg *cfg = irqd_cfg(irq_data);
if (!iommu)
return 0;
- iommu->irte_ops->activate(data->entry, irte_info->devid,
+ iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
irte_info->index);
amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
return 0;
@@ -3303,10 +3416,10 @@ static void irq_remapping_deactivate(struct irq_domain *domain,
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = data->iommu;
if (iommu)
- iommu->irte_ops->deactivate(data->entry, irte_info->devid,
+ iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
irte_info->index);
}
@@ -3326,8 +3439,8 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
if (devid < 0)
return 0;
+ iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
- iommu = amd_iommu_rlookup_table[devid];
return iommu && iommu->ir_domain == d;
}
@@ -3361,7 +3474,7 @@ int amd_iommu_activate_guest_mode(void *data)
entry->hi.fields.vector = ir_data->ga_vector;
entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
- return modify_irte_ga(ir_data->irq_2_irte.devid,
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, ir_data);
}
EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
@@ -3391,7 +3504,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
entry->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
- return modify_irte_ga(ir_data->irq_2_irte.devid,
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, ir_data);
}
EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
@@ -3399,12 +3512,16 @@ EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
{
int ret;
- struct amd_iommu *iommu;
struct amd_iommu_pi_data *pi_data = vcpu_info;
struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
struct amd_ir_data *ir_data = data->chip_data;
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
- struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
+ struct iommu_dev_data *dev_data;
+
+ if (ir_data->iommu == NULL)
+ return -EINVAL;
+
+ dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
/* Note:
* This device has never been set up for guest mode.
@@ -3426,10 +3543,6 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
pi_data->is_guest_mode = false;
}
- iommu = amd_iommu_rlookup_table[irte_info->devid];
- if (iommu == NULL)
- return -EINVAL;
-
pi_data->prev_ga_tag = ir_data->cached_ga_tag;
if (pi_data->is_guest_mode) {
ir_data->ga_root_ptr = (pi_data->base >> 12);
@@ -3463,7 +3576,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
* Atomically updates the IRTE with the new destination, vector
* and flushes the interrupt entry cache.
*/
- iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
+ iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
irte_info->index, cfg->vector,
cfg->dest_apicid);
}
@@ -3475,7 +3588,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct irq_cfg *cfg = irqd_cfg(data);
struct irq_data *parent = data->parent_data;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = ir_data->iommu;
int ret;
if (!iommu)
@@ -3545,11 +3658,11 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
!ref || !entry || !entry->lo.fields_vapic.guest_mode)
return 0;
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = ir_data->iommu;
if (!iommu)
return -ENODEV;
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENODEV;
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index afb3efd565b7..696d5555be57 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -51,7 +51,7 @@ struct pasid_state {
struct device_state {
struct list_head list;
- u16 devid;
+ u32 sbdf;
atomic_t count;
struct pci_dev *pdev;
struct pasid_state **states;
@@ -83,35 +83,25 @@ static struct workqueue_struct *iommu_wq;
static void free_pasid_states(struct device_state *dev_state);
-static u16 device_id(struct pci_dev *pdev)
-{
- u16 devid;
-
- devid = pdev->bus->number;
- devid = (devid << 8) | pdev->devfn;
-
- return devid;
-}
-
-static struct device_state *__get_device_state(u16 devid)
+static struct device_state *__get_device_state(u32 sbdf)
{
struct device_state *dev_state;
list_for_each_entry(dev_state, &state_list, list) {
- if (dev_state->devid == devid)
+ if (dev_state->sbdf == sbdf)
return dev_state;
}
return NULL;
}
-static struct device_state *get_device_state(u16 devid)
+static struct device_state *get_device_state(u32 sbdf)
{
struct device_state *dev_state;
unsigned long flags;
spin_lock_irqsave(&state_lock, flags);
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state != NULL)
atomic_inc(&dev_state->count);
spin_unlock_irqrestore(&state_lock, flags);
@@ -528,15 +518,16 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
unsigned long flags;
struct fault *fault;
bool finish;
- u16 tag, devid;
+ u16 tag, devid, seg_id;
int ret;
iommu_fault = data;
tag = iommu_fault->tag & 0x1ff;
finish = (iommu_fault->tag >> 9) & 1;
- devid = iommu_fault->device_id;
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ seg_id = PCI_SBDF_TO_SEGID(iommu_fault->sbdf);
+ devid = PCI_SBDF_TO_DEVID(iommu_fault->sbdf);
+ pdev = pci_get_domain_bus_and_slot(seg_id, PCI_BUS_NUM(devid),
devid & 0xff);
if (!pdev)
return -ENODEV;
@@ -550,7 +541,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
goto out;
}
- dev_state = get_device_state(iommu_fault->device_id);
+ dev_state = get_device_state(iommu_fault->sbdf);
if (dev_state == NULL)
goto out;
@@ -609,7 +600,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
struct pasid_state *pasid_state;
struct device_state *dev_state;
struct mm_struct *mm;
- u16 devid;
+ u32 sbdf;
int ret;
might_sleep();
@@ -617,8 +608,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
- dev_state = get_device_state(devid);
+ sbdf = get_pci_sbdf_id(pdev);
+ dev_state = get_device_state(sbdf);
if (dev_state == NULL)
return -EINVAL;
@@ -692,15 +683,15 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
- u16 devid;
+ u32 sbdf;
might_sleep();
if (!amd_iommu_v2_supported())
return;
- devid = device_id(pdev);
- dev_state = get_device_state(devid);
+ sbdf = get_pci_sbdf_id(pdev);
+ dev_state = get_device_state(sbdf);
if (dev_state == NULL)
return;
@@ -742,7 +733,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
struct iommu_group *group;
unsigned long flags;
int ret, tmp;
- u16 devid;
+ u32 sbdf;
might_sleep();
@@ -759,7 +750,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (pasids <= 0 || pasids > (PASID_MASK + 1))
return -EINVAL;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
if (dev_state == NULL)
@@ -768,7 +759,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_init(&dev_state->lock);
init_waitqueue_head(&dev_state->wq);
dev_state->pdev = pdev;
- dev_state->devid = devid;
+ dev_state->sbdf = sbdf;
tmp = pasids;
for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
@@ -806,7 +797,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_irqsave(&state_lock, flags);
- if (__get_device_state(devid) != NULL) {
+ if (__get_device_state(sbdf) != NULL) {
spin_unlock_irqrestore(&state_lock, flags);
ret = -EBUSY;
goto out_free_domain;
@@ -838,16 +829,16 @@ void amd_iommu_free_device(struct pci_dev *pdev)
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
if (!amd_iommu_v2_supported())
return;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL) {
spin_unlock_irqrestore(&state_lock, flags);
return;
@@ -867,18 +858,18 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
int ret;
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL)
goto out_unlock;
@@ -898,18 +889,18 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
int ret;
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL)
goto out_unlock;
diff --git a/drivers/iommu/amd/quirks.c b/drivers/iommu/amd/quirks.c
index 5120ce4fdce3..79dbb8f33b47 100644
--- a/drivers/iommu/amd/quirks.c
+++ b/drivers/iommu/amd/quirks.c
@@ -15,7 +15,7 @@
struct ivrs_quirk_entry {
u8 id;
- u16 devid;
+ u32 devid;
};
enum {
@@ -49,7 +49,7 @@ static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
const struct ivrs_quirk_entry *i;
for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
- add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
+ add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u32 *)&i->devid, 0);
return 0;
}
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 8af0242a90d9..1b1725759262 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -564,9 +564,6 @@ static void apple_dart_release_device(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
- if (!cfg)
- return;
-
dev_iommu_priv_set(dev, NULL);
kfree(cfg);
}
@@ -771,7 +768,6 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.of_xlate = apple_dart_of_xlate,
.def_domain_type = apple_dart_def_domain_type,
.get_resv_regions = apple_dart_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 88817a3376ef..d32b02336411 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1380,12 +1380,21 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
-static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent)
+static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
{
unsigned int i;
+ u64 val = STRTAB_STE_0_V;
+
+ if (disable_bypass && !force)
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
+ else
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- arm_smmu_write_strtab_ent(NULL, -1, strtab);
+ strtab[0] = cpu_to_le64(val);
+ strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+ strtab[2] = 0;
strtab += STRTAB_STE_DWORDS;
}
}
@@ -1413,7 +1422,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
+ arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -2537,6 +2546,19 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
return sid < limit;
}
+static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
+{
+ /* Check the SIDs are in range of the SMMU and our stream table */
+ if (!arm_smmu_sid_in_range(smmu, sid))
+ return -ERANGE;
+
+ /* Ensure l2 strtab is initialised */
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
+ return arm_smmu_init_l2_strtab(smmu, sid);
+
+ return 0;
+}
+
static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
struct arm_smmu_master *master)
{
@@ -2560,20 +2582,9 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
new_stream->id = sid;
new_stream->master = master;
- /*
- * Check the SIDs are in range of the SMMU and our stream table
- */
- if (!arm_smmu_sid_in_range(smmu, sid)) {
- ret = -ERANGE;
+ ret = arm_smmu_init_sid_strtab(smmu, sid);
+ if (ret)
break;
- }
-
- /* Ensure l2 strtab is initialised */
- if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- ret = arm_smmu_init_l2_strtab(smmu, sid);
- if (ret)
- break;
- }
/* Insert into SID tree */
new_node = &(smmu->streams.rb_node);
@@ -2691,20 +2702,14 @@ err_free_master:
static void arm_smmu_release_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master *master;
-
- if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- master = dev_iommu_priv_get(dev);
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
kfree(master);
- iommu_fwspec_free(dev);
}
static struct iommu_group *arm_smmu_device_group(struct device *dev)
@@ -2760,58 +2765,27 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static bool arm_smmu_dev_has_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return false;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return arm_smmu_master_iopf_supported(master);
- case IOMMU_DEV_FEAT_SVA:
- return arm_smmu_master_sva_supported(master);
- default:
- return false;
- }
-}
-
-static bool arm_smmu_dev_feature_enabled(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return false;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return master->iopf_enabled;
- case IOMMU_DEV_FEAT_SVA:
- return arm_smmu_master_sva_enabled(master);
- default:
- return false;
- }
-}
-
static int arm_smmu_dev_enable_feature(struct device *dev,
enum iommu_dev_features feat)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (!arm_smmu_dev_has_feature(dev, feat))
+ if (!master)
return -ENODEV;
- if (arm_smmu_dev_feature_enabled(dev, feat))
- return -EBUSY;
-
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
+ if (!arm_smmu_master_iopf_supported(master))
+ return -EINVAL;
+ if (master->iopf_enabled)
+ return -EBUSY;
master->iopf_enabled = true;
return 0;
case IOMMU_DEV_FEAT_SVA:
+ if (!arm_smmu_master_sva_supported(master))
+ return -EINVAL;
+ if (arm_smmu_master_sva_enabled(master))
+ return -EBUSY;
return arm_smmu_master_enable_sva(master);
default:
return -EINVAL;
@@ -2823,16 +2797,20 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (!arm_smmu_dev_feature_enabled(dev, feat))
+ if (!master)
return -EINVAL;
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
+ if (!master->iopf_enabled)
+ return -EINVAL;
if (master->sva_enabled)
return -EBUSY;
master->iopf_enabled = false;
return 0;
case IOMMU_DEV_FEAT_SVA:
+ if (!arm_smmu_master_sva_enabled(master))
+ return -EINVAL;
return arm_smmu_master_disable_sva(master);
default:
return -EINVAL;
@@ -2847,9 +2825,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
- .dev_has_feat = arm_smmu_dev_has_feature,
- .dev_feat_enabled = arm_smmu_dev_feature_enabled,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
.sva_bind = arm_smmu_sva_bind,
@@ -3049,7 +3024,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
+ arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
return 0;
}
@@ -3743,6 +3718,36 @@ static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
return devm_ioremap_resource(dev, &res);
}
+static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
+{
+ struct list_head rmr_list;
+ struct iommu_resv_region *e;
+
+ INIT_LIST_HEAD(&rmr_list);
+ iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+
+ list_for_each_entry(e, &rmr_list, list) {
+ __le64 *step;
+ struct iommu_iort_rmr_data *rmr;
+ int ret, i;
+
+ rmr = container_of(e, struct iommu_iort_rmr_data, rr);
+ for (i = 0; i < rmr->num_sids; i++) {
+ ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]);
+ if (ret) {
+ dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n",
+ rmr->sids[i]);
+ continue;
+ }
+
+ step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
+ arm_smmu_init_bypass_stes(step, 1, true);
+ }
+ }
+
+ iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
@@ -3826,6 +3831,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Record our private device structure */
platform_set_drvdata(pdev, smmu);
+ /* Check for RMRs and install bypass STEs if any */
+ arm_smmu_rmr_install_bypass_ste(smmu);
+
/* Reset the device */
ret = arm_smmu_device_reset(smmu, bypass);
if (ret)
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
index b0cc01aa20c9..2a5a95e8e3f9 100644
--- a/drivers/iommu/arm/arm-smmu/Makefile
+++ b/drivers/iommu/arm/arm-smmu/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM_DEBUG) += arm-smmu-qcom-debug.o
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
new file mode 100644
index 000000000000..6eed8e67a0ca
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/of_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/ratelimit.h>
+
+#include "arm-smmu.h"
+#include "arm-smmu-qcom.h"
+
+enum qcom_smmu_impl_reg_offset {
+ QCOM_SMMU_TBU_PWR_STATUS,
+ QCOM_SMMU_STATS_SYNC_INV_TBU_ACK,
+ QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR,
+};
+
+struct qcom_smmu_config {
+ const u32 *reg_offset;
+};
+
+void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
+{
+ int ret;
+ u32 tbu_pwr_status, sync_inv_ack, sync_inv_progress;
+ struct qcom_smmu *qsmmu = container_of(smmu, struct qcom_smmu, smmu);
+ const struct qcom_smmu_config *cfg;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ if (__ratelimit(&rs)) {
+ dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n");
+
+ cfg = qsmmu->cfg;
+ if (!cfg)
+ return;
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_TBU_PWR_STATUS],
+ &tbu_pwr_status);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TBU power status: %d\n", ret);
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK],
+ &sync_inv_ack);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TBU sync/inv ack status: %d\n", ret);
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR],
+ &sync_inv_progress);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TCU syn/inv progress: %d\n", ret);
+
+ dev_err(smmu->dev,
+ "TBU: power_status %#x sync_inv_ack %#x sync_inv_progress %#x\n",
+ tbu_pwr_status, sync_inv_ack, sync_inv_progress);
+ }
+}
+
+/* Implementation Defined Register Space 0 register offsets */
+static const u32 qcom_smmu_impl0_reg_offset[] = {
+ [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204,
+ [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc,
+ [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670,
+};
+
+static const struct qcom_smmu_config qcm2290_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc7180_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc7280_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc8180x_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc8280xp_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm6125_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm6350_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8150_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8250_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8350_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8450_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct of_device_id __maybe_unused qcom_smmu_impl_debug_match[] = {
+ { .compatible = "qcom,msm8998-smmu-v2" },
+ { .compatible = "qcom,qcm2290-smmu-500", .data = &qcm2290_smmu_cfg },
+ { .compatible = "qcom,sc7180-smmu-500", .data = &sc7180_smmu_cfg },
+ { .compatible = "qcom,sc7280-smmu-500", .data = &sc7280_smmu_cfg},
+ { .compatible = "qcom,sc8180x-smmu-500", .data = &sc8180x_smmu_cfg },
+ { .compatible = "qcom,sc8280xp-smmu-500", .data = &sc8280xp_smmu_cfg },
+ { .compatible = "qcom,sdm630-smmu-v2" },
+ { .compatible = "qcom,sdm845-smmu-500" },
+ { .compatible = "qcom,sm6125-smmu-500", .data = &sm6125_smmu_cfg},
+ { .compatible = "qcom,sm6350-smmu-500", .data = &sm6350_smmu_cfg},
+ { .compatible = "qcom,sm8150-smmu-500", .data = &sm8150_smmu_cfg },
+ { .compatible = "qcom,sm8250-smmu-500", .data = &sm8250_smmu_cfg },
+ { .compatible = "qcom,sm8350-smmu-500", .data = &sm8350_smmu_cfg },
+ { .compatible = "qcom,sm8450-smmu-500", .data = &sm8450_smmu_cfg },
+ { }
+};
+
+const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
+{
+ const struct of_device_id *match;
+ const struct device_node *np = smmu->dev->of_node;
+
+ match = of_match_node(qcom_smmu_impl_debug_match, np);
+ if (!match)
+ return NULL;
+
+ return match->data;
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 7820711c4560..b2708de25ea3 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -5,23 +5,40 @@
#include <linux/acpi.h>
#include <linux/adreno-smmu-priv.h>
+#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/qcom_scm.h>
#include "arm-smmu.h"
+#include "arm-smmu-qcom.h"
-struct qcom_smmu {
- struct arm_smmu_device smmu;
- bool bypass_quirk;
- u8 bypass_cbndx;
- u32 stall_enabled;
-};
+#define QCOM_DUMMY_VAL -1
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
{
return container_of(smmu, struct qcom_smmu, smmu);
}
+static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+ int sync, int status)
+{
+ unsigned int spin_cnt, delay;
+ u32 reg;
+
+ arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ reg = arm_smmu_readl(smmu, page, status);
+ if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
+ return;
+ cpu_relax();
+ }
+ udelay(delay);
+ }
+
+ qcom_smmu_tlb_sync_debug(smmu);
+}
+
static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
u32 reg)
{
@@ -233,6 +250,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7280-mdss" },
{ .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" },
+ { .compatible = "qcom,sm8250-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
{ }
@@ -374,6 +392,7 @@ static const struct arm_smmu_impl qcom_smmu_impl = {
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
+ .tlb_sync = qcom_smmu_tlb_sync,
};
static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
@@ -382,6 +401,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
.reset = qcom_smmu500_reset,
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
+ .tlb_sync = qcom_smmu_tlb_sync,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
@@ -398,6 +418,7 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
return ERR_PTR(-ENOMEM);
qsmmu->smmu.impl = impl;
+ qsmmu->cfg = qcom_smmu_impl_data(smmu);
return &qsmmu->smmu;
}
@@ -413,6 +434,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm6125-smmu-500" },
{ .compatible = "qcom,sm6350-smmu-500" },
+ { .compatible = "qcom,sm6375-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
{ .compatible = "qcom,sm8350-smmu-500" },
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
new file mode 100644
index 000000000000..99ec8f8629a0
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ARM_SMMU_QCOM_H
+#define _ARM_SMMU_QCOM_H
+
+struct qcom_smmu {
+ struct arm_smmu_device smmu;
+ const struct qcom_smmu_config *cfg;
+ bool bypass_quirk;
+ u8 bypass_cbndx;
+ u32 stall_enabled;
+};
+
+#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
+void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
+const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu);
+#else
+static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { }
+static inline const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _ARM_SMMU_QCOM_H */
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 2ed3594f384e..dfa82df00342 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1432,27 +1432,19 @@ out_free:
static void arm_smmu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master_cfg *cfg;
- struct arm_smmu_device *smmu;
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
int ret;
- if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return;
-
- cfg = dev_iommu_priv_get(dev);
- smmu = cfg->smmu;
-
- ret = arm_smmu_rpm_get(smmu);
+ ret = arm_smmu_rpm_get(cfg->smmu);
if (ret < 0)
return;
arm_smmu_master_free_smes(cfg, fwspec);
- arm_smmu_rpm_put(smmu);
+ arm_smmu_rpm_put(cfg->smmu);
dev_iommu_priv_set(dev, NULL);
kfree(cfg);
- iommu_fwspec_free(dev);
}
static void arm_smmu_probe_finalize(struct device *dev)
@@ -1592,7 +1584,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
@@ -2071,10 +2062,57 @@ err_reset_platform_ops: __maybe_unused;
return err;
}
+static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
+{
+ struct list_head rmr_list;
+ struct iommu_resv_region *e;
+ int idx, cnt = 0;
+ u32 reg;
+
+ INIT_LIST_HEAD(&rmr_list);
+ iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+
+ /*
+ * Rather than trying to look at existing mappings that
+ * are setup by the firmware and then invalidate the ones
+ * that do no have matching RMR entries, just disable the
+ * SMMU until it gets enabled again in the reset routine.
+ */
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
+ reg |= ARM_SMMU_sCR0_CLIENTPD;
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
+
+ list_for_each_entry(e, &rmr_list, list) {
+ struct iommu_iort_rmr_data *rmr;
+ int i;
+
+ rmr = container_of(e, struct iommu_iort_rmr_data, rr);
+ for (i = 0; i < rmr->num_sids; i++) {
+ idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
+ if (idx < 0)
+ continue;
+
+ if (smmu->s2crs[idx].count == 0) {
+ smmu->smrs[idx].id = rmr->sids[i];
+ smmu->smrs[idx].mask = 0;
+ smmu->smrs[idx].valid = true;
+ }
+ smmu->s2crs[idx].count++;
+ smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
+ smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
+
+ cnt++;
+ }
+ }
+
+ dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
+ cnt == 1 ? "" : "s");
+ iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
struct resource *res;
- resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
@@ -2098,7 +2136,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
- ioaddr = res->start;
+ smmu->ioaddr = res->start;
+
/*
* The resource size should effectively match the value of SMMU_TOP;
* stash that temporarily until we know PAGESIZE to validate it with.
@@ -2178,7 +2217,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
- "smmu.%pa", &ioaddr);
+ "smmu.%pa", &smmu->ioaddr);
if (err) {
dev_err(dev, "Failed to register iommu in sysfs\n");
return err;
@@ -2191,6 +2230,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, smmu);
+
+ /* Check for RMRs and install bypass SMRs if any */
+ arm_smmu_rmr_install_bypass_smr(smmu);
+
arm_smmu_device_reset(smmu);
arm_smmu_test_smr_masks(smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 2b9b42fb6f30..703fd5817ec1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -278,6 +278,7 @@ struct arm_smmu_device {
struct device *dev;
void __iomem *base;
+ phys_addr_t ioaddr;
unsigned int numpage;
unsigned int pgshift;
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 4c077c38fbd6..17235116d3bb 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -532,16 +532,6 @@ static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
return &qcom_iommu->iommu;
}
-static void qcom_iommu_release_device(struct device *dev)
-{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
-
- if (!qcom_iommu)
- return;
-
- iommu_fwspec_free(dev);
-}
-
static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
struct qcom_iommu_dev *qcom_iommu;
@@ -591,7 +581,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.capable = qcom_iommu_capable,
.domain_alloc = qcom_iommu_domain_alloc,
.probe_device = qcom_iommu_probe_device,
- .release_device = qcom_iommu_release_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
@@ -750,9 +739,12 @@ static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
{
struct device_node *child;
- for_each_child_of_node(qcom_iommu->dev->of_node, child)
- if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
+ for_each_child_of_node(qcom_iommu->dev->of_node, child) {
+ if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
+ of_node_put(child);
return true;
+ }
+ }
return false;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f90251572a5d..17dd683b2fce 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -21,6 +21,7 @@
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/list_sort.h>
+#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -64,6 +65,7 @@ struct iommu_dma_cookie {
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
+ struct mutex mutex;
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
@@ -310,6 +312,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
if (!domain->iova_cookie)
return -ENOMEM;
+ mutex_init(&domain->iova_cookie->mutex);
return 0;
}
@@ -385,7 +388,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
- iort_iommu_msi_get_resv_regions(dev, list);
+ iort_iommu_get_resv_regions(dev, list);
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
@@ -560,26 +563,33 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
/* start_pfn is always nonzero for an already-initialised domain */
+ mutex_lock(&cookie->mutex);
if (iovad->start_pfn) {
if (1UL << order != iovad->granule ||
base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n");
- return -EFAULT;
+ ret = -EFAULT;
+ goto done_unlock;
}
- return 0;
+ ret = 0;
+ goto done_unlock;
}
init_iova_domain(iovad, 1UL << order, base_pfn);
ret = iova_domain_init_rcaches(iovad);
if (ret)
- return ret;
+ goto done_unlock;
/* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
domain->type = IOMMU_DOMAIN_DMA;
- return iova_reserve_iommu_regions(dev, domain);
+ ret = iova_reserve_iommu_regions(dev, domain);
+
+done_unlock:
+ mutex_unlock(&cookie->mutex);
+ return ret;
}
/**
@@ -1053,15 +1063,30 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
+ dma_addr_t s_dma_addr = sg_dma_address(s);
unsigned int s_iova_off = sg_dma_address(s);
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
- s->offset += s_iova_off;
- s->length = s_length;
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
+ if (sg_is_dma_bus_address(s)) {
+ if (i > 0)
+ cur = sg_next(cur);
+
+ sg_dma_unmark_bus_address(s);
+ sg_dma_address(cur) = s_dma_addr;
+ sg_dma_len(cur) = s_length;
+ sg_dma_mark_bus_address(cur);
+ count++;
+ cur_len = 0;
+ continue;
+ }
+
+ s->offset += s_iova_off;
+ s->length = s_length;
+
/*
* Now fill in the real DMA data. If...
* - there is a valid output segment to append to
@@ -1102,10 +1127,14 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != DMA_MAPPING_ERROR)
- s->offset += sg_dma_address(s);
- if (sg_dma_len(s))
- s->length = sg_dma_len(s);
+ if (sg_is_dma_bus_address(s)) {
+ sg_dma_unmark_bus_address(s);
+ } else {
+ if (sg_dma_address(s) != DMA_MAPPING_ERROR)
+ s->offset += sg_dma_address(s);
+ if (sg_dma_len(s))
+ s->length = sg_dma_len(s);
+ }
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
@@ -1158,6 +1187,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ enum pci_p2pdma_map_type map;
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
@@ -1187,6 +1218,30 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
+ if (is_pci_p2pdma_page(sg_page(s))) {
+ map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
+ switch (map) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * iommu_map_sg() will skip this segment as
+ * it is marked as a bus address,
+ * __finalise_sg() will copy the dma address
+ * into the output segment.
+ */
+ continue;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * Mapping through host bridge should be
+ * mapped with regular IOVAs, thus we
+ * do nothing here and continue below.
+ */
+ break;
+ default:
+ ret = -EREMOTEIO;
+ goto out_restore_sg;
+ }
+ }
+
sg_dma_address(s) = s_iova_off;
sg_dma_len(s) = s_length;
s->offset -= s_iova_off;
@@ -1215,6 +1270,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
+ if (!iova_len)
+ return __finalise_sg(dev, sg, nents, 0);
+
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova) {
ret = -ENOMEM;
@@ -1236,7 +1294,7 @@ out_free_iova:
out_restore_sg:
__invalidate_sg(sg, nents);
out:
- if (ret != -ENOMEM)
+ if (ret != -ENOMEM && ret != -EREMOTEIO)
return -EINVAL;
return ret;
}
@@ -1244,7 +1302,7 @@ out:
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
- dma_addr_t start, end;
+ dma_addr_t end = 0, start;
struct scatterlist *tmp;
int i;
@@ -1258,16 +1316,37 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
/*
* The scatterlist segments are mapped into a single
- * contiguous IOVA allocation, so this is incredibly easy.
+ * contiguous IOVA allocation, the start and end points
+ * just have to be determined.
*/
- start = sg_dma_address(sg);
- for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+ for_each_sg(sg, tmp, nents, i) {
+ if (sg_is_dma_bus_address(tmp)) {
+ sg_dma_unmark_bus_address(tmp);
+ continue;
+ }
+
+ if (sg_dma_len(tmp) == 0)
+ break;
+
+ start = sg_dma_address(tmp);
+ break;
+ }
+
+ nents -= i;
+ for_each_sg(tmp, tmp, nents, i) {
+ if (sg_is_dma_bus_address(tmp)) {
+ sg_dma_unmark_bus_address(tmp);
+ continue;
+ }
+
if (sg_dma_len(tmp) == 0)
break;
- sg = tmp;
+
+ end = sg_dma_address(tmp) + sg_dma_len(tmp);
}
- end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(dev, start, end - start);
+
+ if (end)
+ __iommu_dma_unmap(dev, start, end - start);
}
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -1459,7 +1538,13 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
}
+static size_t iommu_dma_opt_mapping_size(void)
+{
+ return iova_rcache_range();
+}
+
static const struct dma_map_ops iommu_dma_ops = {
+ .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
@@ -1479,6 +1564,7 @@ static const struct dma_map_ops iommu_dma_ops = {
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary,
+ .opt_mapping_size = iommu_dma_opt_mapping_size,
};
/*
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 71f2018e23fe..8e18984a0c4f 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -135,6 +135,11 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
+#define CTRL_VM_ENABLE BIT(0)
+#define CTRL_VM_FAULT_MODE_STALL BIT(3)
+#define CAPA0_CAPA1_EXIST BIT(11)
+#define CAPA1_VCR_ENABLED BIT(14)
+
/* common registers */
#define REG_MMU_CTRL 0x000
#define REG_MMU_CFG 0x004
@@ -148,29 +153,20 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
/* v1.x - v3.x registers */
-#define REG_MMU_FLUSH 0x00C
-#define REG_MMU_FLUSH_ENTRY 0x010
-#define REG_PT_BASE_ADDR 0x014
-#define REG_INT_STATUS 0x018
-#define REG_INT_CLEAR 0x01C
-
#define REG_PAGE_FAULT_ADDR 0x024
#define REG_AW_FAULT_ADDR 0x028
#define REG_AR_FAULT_ADDR 0x02C
#define REG_DEFAULT_SLAVE_ADDR 0x030
/* v5.x registers */
-#define REG_V5_PT_BASE_PFN 0x00C
-#define REG_V5_MMU_FLUSH_ALL 0x010
-#define REG_V5_MMU_FLUSH_ENTRY 0x014
-#define REG_V5_MMU_FLUSH_RANGE 0x018
-#define REG_V5_MMU_FLUSH_START 0x020
-#define REG_V5_MMU_FLUSH_END 0x024
-#define REG_V5_INT_STATUS 0x060
-#define REG_V5_INT_CLEAR 0x064
#define REG_V5_FAULT_AR_VA 0x070
#define REG_V5_FAULT_AW_VA 0x080
+/* v7.x registers */
+#define REG_V7_CAPA0 0x870
+#define REG_V7_CAPA1 0x874
+#define REG_V7_CTRL_VM 0x8000
+
#define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
static struct device *dma_dev;
@@ -251,6 +247,21 @@ struct exynos_iommu_domain {
};
/*
+ * SysMMU version specific data. Contains offsets for the registers which can
+ * be found in different SysMMU variants, but have different offset values.
+ */
+struct sysmmu_variant {
+ u32 pt_base; /* page table base address (physical) */
+ u32 flush_all; /* invalidate all TLB entries */
+ u32 flush_entry; /* invalidate specific TLB entry */
+ u32 flush_range; /* invalidate TLB entries in specified range */
+ u32 flush_start; /* start address of range invalidation */
+ u32 flush_end; /* end address of range invalidation */
+ u32 int_status; /* interrupt status information */
+ u32 int_clear; /* clear the interrupt */
+};
+
+/*
* This structure hold all data of a single SYSMMU controller, this includes
* hw resources like registers and clocks, pointers and list nodes to connect
* it to all other structures, internal state and parameters read from device
@@ -274,6 +285,45 @@ struct sysmmu_drvdata {
unsigned int version; /* our version */
struct iommu_device iommu; /* IOMMU core handle */
+ const struct sysmmu_variant *variant; /* version specific data */
+
+ /* v7 fields */
+ bool has_vcr; /* virtual machine control register */
+};
+
+#define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
+
+/* SysMMU v1..v3 */
+static const struct sysmmu_variant sysmmu_v1_variant = {
+ .flush_all = 0x0c,
+ .flush_entry = 0x10,
+ .pt_base = 0x14,
+ .int_status = 0x18,
+ .int_clear = 0x1c,
+};
+
+/* SysMMU v5 and v7 (non-VM capable) */
+static const struct sysmmu_variant sysmmu_v5_variant = {
+ .pt_base = 0x0c,
+ .flush_all = 0x10,
+ .flush_entry = 0x14,
+ .flush_range = 0x18,
+ .flush_start = 0x20,
+ .flush_end = 0x24,
+ .int_status = 0x60,
+ .int_clear = 0x64,
+};
+
+/* SysMMU v7: VM capable register set */
+static const struct sysmmu_variant sysmmu_v7_vm_variant = {
+ .pt_base = 0x800c,
+ .flush_all = 0x8010,
+ .flush_entry = 0x8014,
+ .flush_range = 0x8018,
+ .flush_start = 0x8020,
+ .flush_end = 0x8024,
+ .int_status = 0x60,
+ .int_clear = 0x64,
};
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -304,10 +354,7 @@ static bool sysmmu_block(struct sysmmu_drvdata *data)
static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
{
- if (MMU_MAJ_VER(data->version) < 5)
- writel(0x1, data->sfrbase + REG_MMU_FLUSH);
- else
- writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
+ writel(0x1, SYSMMU_REG(data, flush_all));
}
static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -315,34 +362,30 @@ static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
{
unsigned int i;
- if (MMU_MAJ_VER(data->version) < 5) {
+ if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) {
for (i = 0; i < num_inv; i++) {
writel((iova & SPAGE_MASK) | 1,
- data->sfrbase + REG_MMU_FLUSH_ENTRY);
+ SYSMMU_REG(data, flush_entry));
iova += SPAGE_SIZE;
}
} else {
- if (num_inv == 1) {
- writel((iova & SPAGE_MASK) | 1,
- data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
- } else {
- writel((iova & SPAGE_MASK),
- data->sfrbase + REG_V5_MMU_FLUSH_START);
- writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
- data->sfrbase + REG_V5_MMU_FLUSH_END);
- writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
- }
+ writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start));
+ writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
+ SYSMMU_REG(data, flush_end));
+ writel(0x1, SYSMMU_REG(data, flush_range));
}
}
static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
{
+ u32 pt_base;
+
if (MMU_MAJ_VER(data->version) < 5)
- writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
+ pt_base = pgd;
else
- writel(pgd >> PAGE_SHIFT,
- data->sfrbase + REG_V5_PT_BASE_PFN);
+ pt_base = pgd >> SPAGE_ORDER;
+ writel(pt_base, SYSMMU_REG(data, pt_base));
__sysmmu_tlb_invalidate(data);
}
@@ -362,6 +405,20 @@ static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
clk_disable_unprepare(data->clk_master);
}
+static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data)
+{
+ u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0);
+
+ return capa0 & CAPA0_CAPA1_EXIST;
+}
+
+static void __sysmmu_get_vcr(struct sysmmu_drvdata *data)
+{
+ u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1);
+
+ data->has_vcr = capa1 & CAPA1_VCR_ENABLED;
+}
+
static void __sysmmu_get_version(struct sysmmu_drvdata *data)
{
u32 ver;
@@ -379,6 +436,19 @@ static void __sysmmu_get_version(struct sysmmu_drvdata *data)
dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
+ if (MMU_MAJ_VER(data->version) < 5) {
+ data->variant = &sysmmu_v1_variant;
+ } else if (MMU_MAJ_VER(data->version) < 7) {
+ data->variant = &sysmmu_v5_variant;
+ } else {
+ if (__sysmmu_has_capa1(data))
+ __sysmmu_get_vcr(data);
+ if (data->has_vcr)
+ data->variant = &sysmmu_v7_vm_variant;
+ else
+ data->variant = &sysmmu_v5_variant;
+ }
+
__sysmmu_disable_clocks(data);
}
@@ -406,19 +476,14 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
const struct sysmmu_fault_info *finfo;
unsigned int i, n, itype;
sysmmu_iova_t fault_addr;
- unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
WARN_ON(!data->active);
if (MMU_MAJ_VER(data->version) < 5) {
- reg_status = REG_INT_STATUS;
- reg_clear = REG_INT_CLEAR;
finfo = sysmmu_faults;
n = ARRAY_SIZE(sysmmu_faults);
} else {
- reg_status = REG_V5_INT_STATUS;
- reg_clear = REG_V5_INT_CLEAR;
finfo = sysmmu_v5_faults;
n = ARRAY_SIZE(sysmmu_v5_faults);
}
@@ -427,7 +492,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
clk_enable(data->clk_master);
- itype = __ffs(readl(data->sfrbase + reg_status));
+ itype = __ffs(readl(SYSMMU_REG(data, int_status)));
for (i = 0; i < n; i++, finfo++)
if (finfo->bit == itype)
break;
@@ -444,7 +509,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
/* fault is not recovered by fault handler */
BUG_ON(ret != 0);
- writel(1 << itype, data->sfrbase + reg_clear);
+ writel(1 << itype, SYSMMU_REG(data, int_clear));
sysmmu_unblock(data);
@@ -486,6 +551,18 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
writel(cfg, data->sfrbase + REG_MMU_CFG);
}
+static void __sysmmu_enable_vid(struct sysmmu_drvdata *data)
+{
+ u32 ctrl;
+
+ if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr)
+ return;
+
+ ctrl = readl(data->sfrbase + REG_V7_CTRL_VM);
+ ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL;
+ writel(ctrl, data->sfrbase + REG_V7_CTRL_VM);
+}
+
static void __sysmmu_enable(struct sysmmu_drvdata *data)
{
unsigned long flags;
@@ -496,6 +573,7 @@ static void __sysmmu_enable(struct sysmmu_drvdata *data)
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
__sysmmu_init_config(data);
__sysmmu_set_ptbase(data, data->pgtable);
+ __sysmmu_enable_vid(data);
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
data->active = true;
spin_unlock_irqrestore(&data->lock, flags);
@@ -551,7 +629,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
* 64KB page can be one of 16 consecutive sets.
*/
if (MMU_MAJ_VER(data->version) == 2)
- num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
+ num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64);
if (sysmmu_block(data)) {
__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
@@ -623,6 +701,8 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
data->sysmmu = dev;
spin_lock_init(&data->lock);
+ __sysmmu_get_version(data);
+
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
dev_name(data->sysmmu));
if (ret)
@@ -630,11 +710,10 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
if (ret)
- return ret;
+ goto err_iommu_register;
platform_set_drvdata(pdev, data);
- __sysmmu_get_version(data);
if (PG_ENT_SHIFT < 0) {
if (MMU_MAJ_VER(data->version) < 5) {
PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
@@ -647,6 +726,14 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
}
}
+ if (MMU_MAJ_VER(data->version) >= 5) {
+ ret = dma_set_mask(dev, DMA_BIT_MASK(36));
+ if (ret) {
+ dev_err(dev, "Unable to set DMA mask: %d\n", ret);
+ goto err_dma_set_mask;
+ }
+ }
+
/*
* use the first registered sysmmu device for performing
* dma mapping operations on iommu page tables (cpu cache flush)
@@ -657,6 +744,12 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
return 0;
+
+err_dma_set_mask:
+ iommu_device_unregister(&data->iommu);
+err_iommu_register:
+ iommu_device_sysfs_remove(&data->iommu);
+ return ret;
}
static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
@@ -1251,9 +1344,6 @@ static void exynos_iommu_release_device(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
- if (!has_sysmmu(dev))
- return;
-
if (owner->domain) {
struct iommu_group *group = iommu_group_get(dev);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 94b4589dc67c..011f9ab7f743 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -447,15 +447,10 @@ static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
return &pamu_iommu;
}
-static void fsl_pamu_release_device(struct device *dev)
-{
-}
-
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc,
.probe_device = fsl_pamu_probe_device,
- .release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = fsl_pamu_attach_device,
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 51bd66a45a11..e190bb8c225c 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -68,7 +68,6 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
{
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
- struct irq_desc *desc;
int ret = 0;
if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
@@ -90,8 +89,7 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
* Hypver-V IO APIC irq affinity should be in the scope of
* ioapic_max_cpumask because no irq remapping support.
*/
- desc = irq_data_to_desc(irq_data);
- cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask);
+ irq_data_update_affinity(irq_data, &ioapic_max_cpumask);
return 0;
}
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
index 71596fc62822..3ee68393122f 100644
--- a/drivers/iommu/intel/cap_audit.c
+++ b/drivers/iommu/intel/cap_audit.c
@@ -10,7 +10,7 @@
#define pr_fmt(fmt) "DMAR: " fmt
-#include <linux/intel-iommu.h>
+#include "iommu.h"
#include "cap_audit.h"
static u64 intel_iommu_cap_sanity;
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index ed796eea4581..1f925285104e 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -10,11 +10,11 @@
#include <linux/debugfs.h>
#include <linux/dmar.h>
-#include <linux/intel-iommu.h>
#include <linux/pci.h>
#include <asm/irq_remapping.h>
+#include "iommu.h"
#include "pasid.h"
#include "perf.h"
@@ -263,10 +263,9 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
{
- unsigned long flags;
u16 bus;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
(u64)virt_to_phys(iommu->root_entry));
seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
@@ -278,8 +277,7 @@ static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
*/
for (bus = 0; bus < 256; bus++)
ctx_tbl_walk(m, iommu, bus);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
}
static int dmar_translation_struct_show(struct seq_file *m, void *unused)
@@ -342,13 +340,13 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
}
}
-static int show_device_domain_translation(struct device *dev, void *data)
+static int __show_device_domain_translation(struct device *dev, void *data)
{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct dmar_domain *domain = info->domain;
+ struct dmar_domain *domain;
struct seq_file *m = data;
u64 path[6] = { 0 };
+ domain = to_dmar_domain(iommu_get_domain_for_dev(dev));
if (!domain)
return 0;
@@ -359,20 +357,39 @@ static int show_device_domain_translation(struct device *dev, void *data)
pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
seq_putc(m, '\n');
- return 0;
+ /* Don't iterate */
+ return 1;
}
-static int domain_translation_struct_show(struct seq_file *m, void *unused)
+static int show_device_domain_translation(struct device *dev, void *data)
{
- unsigned long flags;
- int ret;
+ struct iommu_group *group;
- spin_lock_irqsave(&device_domain_lock, flags);
- ret = bus_for_each_dev(&pci_bus_type, NULL, m,
- show_device_domain_translation);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ group = iommu_group_get(dev);
+ if (group) {
+ /*
+ * The group->mutex is held across the callback, which will
+ * block calls to iommu_attach/detach_group/device. Hence,
+ * the domain of the device will not change during traversal.
+ *
+ * All devices in an iommu group share a single domain, hence
+ * we only dump the domain of the first device. Even though,
+ * this code still possibly races with the iommu_unmap()
+ * interface. This could be solved by RCU-freeing the page
+ * table pages in the iommu_unmap() path.
+ */
+ iommu_group_for_each_dev(group, data,
+ __show_device_domain_translation);
+ iommu_group_put(group);
+ }
- return ret;
+ return 0;
+}
+
+static int domain_translation_struct_show(struct seq_file *m, void *unused)
+{
+ return bus_for_each_dev(&pci_bus_type, NULL, m,
+ show_device_domain_translation);
}
DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 9699ca101c62..5a8f780e7ffd 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -19,7 +19,6 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/iova.h>
-#include <linux/intel-iommu.h>
#include <linux/timer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -30,10 +29,11 @@
#include <linux/numa.h>
#include <linux/limits.h>
#include <asm/irq_remapping.h>
-#include <trace/events/intel_iommu.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "perf.h"
+#include "trace.h"
typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
struct dmar_res_callback {
@@ -60,7 +60,7 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static int dmar_dev_scope_status = 1;
-static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
+static DEFINE_IDA(dmar_seq_ids);
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
@@ -494,7 +494,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
if (drhd->reg_base_addr == rhsa->base_address) {
int node = pxm_to_node(rhsa->proximity_domain);
- if (!node_online(node))
+ if (node != NUMA_NO_NODE && !node_online(node))
node = NUMA_NO_NODE;
drhd->iommu->node = node;
return 0;
@@ -1023,28 +1023,6 @@ out:
return err;
}
-static int dmar_alloc_seq_id(struct intel_iommu *iommu)
-{
- iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
- DMAR_UNITS_SUPPORTED);
- if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
- iommu->seq_id = -1;
- } else {
- set_bit(iommu->seq_id, dmar_seq_ids);
- sprintf(iommu->name, "dmar%d", iommu->seq_id);
- }
-
- return iommu->seq_id;
-}
-
-static void dmar_free_seq_id(struct intel_iommu *iommu)
-{
- if (iommu->seq_id >= 0) {
- clear_bit(iommu->seq_id, dmar_seq_ids);
- iommu->seq_id = -1;
- }
-}
-
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
@@ -1062,11 +1040,14 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (!iommu)
return -ENOMEM;
- if (dmar_alloc_seq_id(iommu) < 0) {
+ iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
+ DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
+ if (iommu->seq_id < 0) {
pr_err("Failed to allocate seq_id\n");
- err = -ENOSPC;
+ err = iommu->seq_id;
goto error;
}
+ sprintf(iommu->name, "dmar%d", iommu->seq_id);
err = map_iommu(iommu, drhd->reg_base_addr);
if (err) {
@@ -1150,7 +1131,7 @@ err_sysfs:
err_unmap:
unmap_iommu(iommu);
error_free_seq_id:
- dmar_free_seq_id(iommu);
+ ida_free(&dmar_seq_ids, iommu->seq_id);
error:
kfree(iommu);
return err;
@@ -1183,7 +1164,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
- dmar_free_seq_id(iommu);
+ ida_free(&dmar_seq_ids, iommu->seq_id);
kfree(iommu);
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 5c0dce78586a..7cca030a508e 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -17,7 +17,6 @@
#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/dmi.h>
-#include <linux/intel-iommu.h>
#include <linux/intel-svm.h>
#include <linux/memory.h>
#include <linux/pci.h>
@@ -26,6 +25,7 @@
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
#include "pasid.h"
@@ -126,13 +126,8 @@ static inline unsigned long virt_to_dma_pfn(void *p)
return page_to_dma_pfn(virt_to_page(p));
}
-/* global iommu list, set NULL for ignored DMAR units */
-static struct intel_iommu **g_iommus;
-
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
-static inline struct device_domain_info *
-dmar_search_domain_by_dev_info(int segment, int bus, int devfn);
/*
* set to 1 to panic kernel if can't successfully enable VT-d
@@ -256,10 +251,6 @@ static inline void context_clear_entry(struct context_entry *context)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
-#define for_each_domain_iommu(idx, domain) \
- for (idx = 0; idx < g_num_of_iommus; idx++) \
- if (domain->iommu_refcnt[idx])
-
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -293,12 +284,7 @@ static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
-/* bitmap for indexing intel_iommus */
-static int g_num_of_iommus;
-
-static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
-static void __dmar_remove_one_dev_info(struct device_domain_info *info);
int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
@@ -314,12 +300,6 @@ static int iommu_skip_te_disable;
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-int intel_iommu_gfx_mapped;
-EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
-
-DEFINE_SPINLOCK(device_domain_lock);
-static LIST_HEAD(device_domain_list);
-
const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
@@ -455,24 +435,6 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}
-/* This functionin only returns single iommu in a domain */
-struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
-{
- int iommu_id;
-
- /* si_domain and vm domain should not get here. */
- if (WARN_ON(!iommu_is_dma_domain(&domain->domain)))
- return NULL;
-
- for_each_domain_iommu(iommu_id, domain)
- break;
-
- if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
- return NULL;
-
- return g_iommus[iommu_id];
-}
-
static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{
return sm_supported(iommu) ?
@@ -481,16 +443,16 @@ static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
+ struct iommu_domain_info *info;
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool found = false;
- int i;
+ unsigned long i;
domain->iommu_coherency = true;
-
- for_each_domain_iommu(i, domain) {
+ xa_for_each(&domain->iommu_array, i, info) {
found = true;
- if (!iommu_paging_structure_coherency(g_iommus[i])) {
+ if (!iommu_paging_structure_coherency(info->iommu)) {
domain->iommu_coherency = false;
break;
}
@@ -544,15 +506,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
- assert_spin_locked(&device_domain_lock);
-
- if (list_empty(&domain->devices))
- return NUMA_NO_NODE;
-
+ spin_lock(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
- if (!info->dev)
- continue;
-
/*
* There could possibly be multiple device numa nodes as devices
* within the same domain may sit behind different IOMMUs. There
@@ -563,6 +518,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
+ spin_unlock(&domain->lock);
return nid;
}
@@ -804,26 +760,23 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct context_entry *context;
int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (context)
ret = context_present(context);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
return ret;
}
static void free_context_table(struct intel_iommu *iommu)
{
- int i;
- unsigned long flags;
struct context_entry *context;
+ int i;
+
+ if (!iommu->root_entry)
+ return;
- spin_lock_irqsave(&iommu->lock, flags);
- if (!iommu->root_entry) {
- goto out;
- }
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
@@ -835,12 +788,10 @@ static void free_context_table(struct intel_iommu *iommu)
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
free_pgtable_page(context);
-
}
+
free_pgtable_page(iommu->root_entry);
iommu->root_entry = NULL;
-out:
- spin_unlock_irqrestore(&iommu->lock, flags);
}
#ifdef CONFIG_DMAR_DEBUG
@@ -849,9 +800,14 @@ static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, u8 bus, u
struct device_domain_info *info;
struct dma_pte *parent, *pte;
struct dmar_domain *domain;
+ struct pci_dev *pdev;
int offset, level;
- info = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
+ pdev = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn);
+ if (!pdev)
+ return;
+
+ info = dev_iommu_priv_get(&pdev->dev);
if (!info || !info->domain) {
pr_info("device [%02x:%02x.%d] not probed\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -1234,7 +1190,6 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root) {
@@ -1244,10 +1199,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
}
__iommu_flush_cache(iommu, root, ROOT_SIZE);
-
- spin_lock_irqsave(&iommu->lock, flags);
iommu->root_entry = root;
- spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
@@ -1389,23 +1341,23 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
}
static struct device_domain_info *
-iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
- u8 bus, u8 devfn)
+iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
+ u8 bus, u8 devfn)
{
struct device_domain_info *info;
- assert_spin_locked(&device_domain_lock);
-
if (!iommu->qi)
return NULL;
- list_for_each_entry(info, &domain->devices, link)
+ spin_lock(&domain->lock);
+ list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- if (info->ats_supported && info->dev)
- return info;
- break;
+ spin_unlock(&domain->lock);
+ return info->ats_supported ? info : NULL;
}
+ }
+ spin_unlock(&domain->lock);
return NULL;
}
@@ -1415,23 +1367,21 @@ static void domain_update_iotlb(struct dmar_domain *domain)
struct device_domain_info *info;
bool has_iotlb_device = false;
- assert_spin_locked(&device_domain_lock);
-
- list_for_each_entry(info, &domain->devices, link)
+ spin_lock(&domain->lock);
+ list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
-
+ }
domain->has_iotlb_device = has_iotlb_device;
+ spin_unlock(&domain->lock);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
-
if (!info || !dev_is_pci(info->dev))
return;
@@ -1477,8 +1427,6 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
-
if (!dev_is_pci(info->dev))
return;
@@ -1518,17 +1466,15 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
- unsigned long flags;
struct device_domain_info *info;
if (!domain->has_iotlb_device)
return;
- spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&domain->lock);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
-
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&domain->lock);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -1539,7 +1485,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
unsigned int aligned_pages = __roundup_pow_of_two(pages);
unsigned int mask = ilog2(aligned_pages);
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
- u16 did = domain->iommu_did[iommu->seq_id];
+ u16 did = domain_id_iommu(domain, iommu);
BUG_ON(pages == 0);
@@ -1609,11 +1555,12 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- int idx;
+ struct iommu_domain_info *info;
+ unsigned long idx;
- for_each_domain_iommu(idx, dmar_domain) {
- struct intel_iommu *iommu = g_iommus[idx];
- u16 did = dmar_domain->iommu_did[iommu->seq_id];
+ xa_for_each(&dmar_domain->iommu_array, idx, info) {
+ struct intel_iommu *iommu = info->iommu;
+ u16 did = domain_id_iommu(dmar_domain, iommu);
if (domain_use_first_level(dmar_domain))
qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0);
@@ -1719,23 +1666,16 @@ static int iommu_init_domains(struct intel_iommu *iommu)
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
- struct device_domain_info *info, *tmp;
- unsigned long flags;
-
if (!iommu->domain_ids)
return;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
- if (info->iommu != iommu)
- continue;
-
- if (!info->dev || !info->domain)
- continue;
-
- __dmar_remove_one_dev_info(info);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ /*
+ * All iommu domains must have been detached from the devices,
+ * hence there should be no domain IDs in use.
+ */
+ if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))
+ > NUM_RESERVED_DID))
+ return;
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
@@ -1748,8 +1688,6 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
iommu->domain_ids = NULL;
}
- g_iommus[iommu->seq_id] = NULL;
-
/* free context mapping */
free_context_table(iommu);
@@ -1795,55 +1733,77 @@ static struct dmar_domain *alloc_domain(unsigned int type)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ spin_lock_init(&domain->lock);
+ xa_init(&domain->iommu_array);
return domain;
}
-/* Must be called with iommu->lock */
static int domain_attach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
+ struct iommu_domain_info *info, *curr;
unsigned long ndomains;
- int num;
+ int num, ret = -ENOSPC;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- domain->iommu_refcnt[iommu->seq_id] += 1;
- if (domain->iommu_refcnt[iommu->seq_id] == 1) {
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ spin_lock(&iommu->lock);
+ curr = xa_load(&domain->iommu_array, iommu->seq_id);
+ if (curr) {
+ curr->refcnt++;
+ spin_unlock(&iommu->lock);
+ kfree(info);
+ return 0;
+ }
- if (num >= ndomains) {
- pr_err("%s: No free domain ids\n", iommu->name);
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- return -ENOSPC;
- }
+ ndomains = cap_ndoms(iommu->cap);
+ num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ if (num >= ndomains) {
+ pr_err("%s: No free domain ids\n", iommu->name);
+ goto err_unlock;
+ }
- set_bit(num, iommu->domain_ids);
- domain->iommu_did[iommu->seq_id] = num;
- domain->nid = iommu->node;
- domain_update_iommu_cap(domain);
+ set_bit(num, iommu->domain_ids);
+ info->refcnt = 1;
+ info->did = num;
+ info->iommu = iommu;
+ curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
+ NULL, info, GFP_ATOMIC);
+ if (curr) {
+ ret = xa_err(curr) ? : -EBUSY;
+ goto err_clear;
}
+ domain_update_iommu_cap(domain);
+ spin_unlock(&iommu->lock);
return 0;
+
+err_clear:
+ clear_bit(info->did, iommu->domain_ids);
+err_unlock:
+ spin_unlock(&iommu->lock);
+ kfree(info);
+ return ret;
}
static void domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- int num;
+ struct iommu_domain_info *info;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
-
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- if (domain->iommu_refcnt[iommu->seq_id] == 0) {
- num = domain->iommu_did[iommu->seq_id];
- clear_bit(num, iommu->domain_ids);
+ spin_lock(&iommu->lock);
+ info = xa_load(&domain->iommu_array, iommu->seq_id);
+ if (--info->refcnt == 0) {
+ clear_bit(info->did, iommu->domain_ids);
+ xa_erase(&domain->iommu_array, iommu->seq_id);
+ domain->nid = NUMA_NO_NODE;
domain_update_iommu_cap(domain);
- domain->iommu_did[iommu->seq_id] = 0;
+ kfree(info);
}
+ spin_unlock(&iommu->lock);
}
static inline int guestwidth_to_adjustwidth(int gaw)
@@ -1862,10 +1822,6 @@ static inline int guestwidth_to_adjustwidth(int gaw)
static void domain_exit(struct dmar_domain *domain)
{
-
- /* Remove associated devices and clear attached or cached domains */
- domain_remove_dev_info(domain);
-
if (domain->pgd) {
LIST_HEAD(freelist);
@@ -1873,6 +1829,9 @@ static void domain_exit(struct dmar_domain *domain)
put_pages_list(&freelist);
}
+ if (WARN_ON(!list_empty(&domain->devices)))
+ return;
+
kfree(domain);
}
@@ -1930,11 +1889,11 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct pasid_table *table,
u8 bus, u8 devfn)
{
- u16 did = domain->iommu_did[iommu->seq_id];
+ struct device_domain_info *info =
+ iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+ u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
- struct device_domain_info *info = NULL;
struct context_entry *context;
- unsigned long flags;
int ret;
WARN_ON(did == 0);
@@ -1947,9 +1906,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
BUG_ON(!domain->pgd);
- spin_lock_irqsave(&device_domain_lock, flags);
spin_lock(&iommu->lock);
-
ret = -ENOMEM;
context = iommu_context_addr(iommu, bus, devfn, 1);
if (!context)
@@ -2000,7 +1957,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* Setup the Device-TLB enable bit and Page request
* Enable bit:
*/
- info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
context_set_sm_dte(context);
if (info && info->pri_supported)
@@ -2023,7 +1979,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
goto out_unlock;
}
- info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
translation = CONTEXT_TT_DEV_IOTLB;
else
@@ -2069,7 +2024,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
out_unlock:
spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
@@ -2186,8 +2140,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
unsigned long end_pfn, int level)
{
unsigned long lvl_pages = lvl_to_nr_pages(level);
+ struct iommu_domain_info *info;
struct dma_pte *pte = NULL;
- int i;
+ unsigned long i;
while (start_pfn <= end_pfn) {
if (!pte)
@@ -2198,8 +2153,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
start_pfn + lvl_pages - 1,
level + 1);
- for_each_domain_iommu(i, domain)
- iommu_flush_iotlb_psi(g_iommus[i], domain,
+ xa_for_each(&domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
}
@@ -2313,16 +2268,15 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
{
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
- unsigned long flags;
u16 did_old;
if (!iommu)
return;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) {
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
return;
}
@@ -2330,14 +2284,14 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
if (hw_pass_through && domain_type_is_si(info->domain))
did_old = FLPT_DEFAULT_DID;
else
- did_old = info->domain->iommu_did[iommu->seq_id];
+ did_old = domain_id_iommu(info->domain, iommu);
} else {
did_old = context_domain_id(context);
}
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
iommu->flush.flush_context(iommu,
did_old,
(((u16)bus) << 8) | devfn,
@@ -2356,30 +2310,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
__iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
}
-static void domain_remove_dev_info(struct dmar_domain *domain)
-{
- struct device_domain_info *info, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &domain->devices, link)
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-}
-
-static inline struct device_domain_info *
-dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
-{
- struct device_domain_info *info;
-
- list_for_each_entry(info, &device_domain_list, global)
- if (info->segment == segment && info->bus == bus &&
- info->devfn == devfn)
- return info;
-
- return NULL;
-}
-
static int domain_setup_first_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev,
@@ -2412,7 +2342,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
flags |= PASID_FLAG_PAGE_SNOOP;
return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
- domain->iommu_did[iommu->seq_id],
+ domain_id_iommu(domain, iommu),
flags);
}
@@ -2499,7 +2429,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
- unsigned long flags;
u8 bus, devfn;
int ret;
@@ -2507,17 +2436,13 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (!iommu)
return -ENODEV;
- spin_lock_irqsave(&device_domain_lock, flags);
- info->domain = domain;
- spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
- if (ret) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (ret)
return ret;
- }
+ info->domain = domain;
+ spin_lock(&domain->lock);
list_add(&info->link, &domain->devices);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&domain->lock);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -2529,7 +2454,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
}
/* Setup the PASID entry for requests without PASID: */
- spin_lock_irqsave(&iommu->lock, flags);
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
dev, PASID_RID2PASID);
@@ -2539,7 +2463,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
else
ret = intel_pasid_setup_second_level(iommu, domain,
dev, PASID_RID2PASID);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
dmar_remove_one_dev_info(dev);
@@ -2807,7 +2730,6 @@ static int copy_translation_tables(struct intel_iommu *iommu)
struct root_entry *old_rt;
phys_addr_t old_rt_phys;
int ctxt_table_entries;
- unsigned long flags;
u64 rtaddr_reg;
int bus, ret;
bool new_ext, ext;
@@ -2850,7 +2772,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
}
}
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
/* Context tables are copied, now write them to the root_entry table */
for (bus = 0; bus < 256; bus++) {
@@ -2869,7 +2791,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
iommu->root_entry[bus].hi = val;
}
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
kfree(ctxt_tbls);
@@ -2968,36 +2890,6 @@ static int __init init_dmars(void)
struct intel_iommu *iommu;
int ret;
- /*
- * for each drhd
- * allocate root
- * initialize and program root entry to not present
- * endfor
- */
- for_each_drhd_unit(drhd) {
- /*
- * lock not needed as this is only incremented in the single
- * threaded kernel __init code path all other access are read
- * only
- */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
- g_num_of_iommus++;
- continue;
- }
- pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
- }
-
- /* Preallocate enough resources for IOMMU hot-addition */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
- g_num_of_iommus = DMAR_UNITS_SUPPORTED;
-
- g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
- GFP_KERNEL);
- if (!g_iommus) {
- ret = -ENOMEM;
- goto error;
- }
-
ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
if (ret)
goto free_iommu;
@@ -3020,8 +2912,6 @@ static int __init init_dmars(void)
intel_pasid_max_id);
}
- g_iommus[iommu->seq_id] = iommu;
-
intel_iommu_init_qi(iommu);
ret = iommu_init_domains(iommu);
@@ -3147,9 +3037,6 @@ free_iommu:
free_dmar_iommu(iommu);
}
- kfree(g_iommus);
-
-error:
return ret;
}
@@ -3530,9 +3417,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
int sp, ret;
struct intel_iommu *iommu = dmaru->iommu;
- if (g_iommus[iommu->seq_id])
- return 0;
-
ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
if (ret)
goto out;
@@ -3556,7 +3440,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- g_iommus[iommu->seq_id] = iommu;
ret = iommu_init_domains(iommu);
if (ret == 0)
ret = iommu_alloc_root_entry(iommu);
@@ -4022,6 +3905,20 @@ static int __init probe_acpi_namespace_devices(void)
return 0;
}
+static __init int tboot_force_iommu(void)
+{
+ if (!tboot_enabled())
+ return 0;
+
+ if (no_iommu || dmar_disabled)
+ pr_warn("Forcing Intel-IOMMU to enabled\n");
+
+ dmar_disabled = 0;
+ no_iommu = 0;
+
+ return 1;
+}
+
int __init intel_iommu_init(void)
{
int ret = -ENODEV;
@@ -4093,9 +3990,6 @@ int __init intel_iommu_init(void)
if (list_empty(&dmar_satc_units))
pr_info("No SATC found\n");
- if (dmar_map_gfx)
- intel_iommu_gfx_mapped = 1;
-
init_no_remapping_devices();
ret = init_dmars();
@@ -4181,21 +4075,13 @@ static void domain_context_clear(struct device_domain_info *info)
&domain_context_clear_one_cb, info);
}
-static void __dmar_remove_one_dev_info(struct device_domain_info *info)
+static void dmar_remove_one_dev_info(struct device *dev)
{
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
- unsigned long flags;
-
- assert_spin_locked(&device_domain_lock);
-
- if (WARN_ON(!info))
- return;
-
- iommu = info->iommu;
- domain = info->domain;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct dmar_domain *domain = info->domain;
+ struct intel_iommu *iommu = info->iommu;
- if (info->dev && !dev_is_real_dma_subdevice(info->dev)) {
+ if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, info->dev,
PASID_RID2PASID, false);
@@ -4205,23 +4091,12 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
intel_pasid_free_table(info->dev);
}
+ spin_lock(&domain->lock);
list_del(&info->link);
+ spin_unlock(&domain->lock);
- spin_lock_irqsave(&iommu->lock, flags);
domain_detach_iommu(domain, iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static void dmar_remove_one_dev_info(struct device *dev)
-{
- struct device_domain_info *info;
- unsigned long flags;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dev_iommu_priv_get(dev);
- if (info)
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ info->domain = NULL;
}
static int md_domain_init(struct dmar_domain *domain, int guest_width)
@@ -4466,15 +4341,16 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long iova_pfn = IOVA_PFN(gather->start);
size_t size = gather->end - gather->start;
+ struct iommu_domain_info *info;
unsigned long start_pfn;
unsigned long nrpages;
- int iommu_id;
+ unsigned long i;
nrpages = aligned_nrpages(gather->start, size);
start_pfn = mm_to_dma_pfn(iova_pfn);
- for_each_domain_iommu(iommu_id, dmar_domain)
- iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+ xa_for_each(&dmar_domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, dmar_domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
@@ -4503,7 +4379,7 @@ static bool domain_support_force_snooping(struct dmar_domain *domain)
struct device_domain_info *info;
bool support = true;
- assert_spin_locked(&device_domain_lock);
+ assert_spin_locked(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
if (!ecap_sc_support(info->iommu->ecap)) {
support = false;
@@ -4518,8 +4394,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
{
struct device_domain_info *info;
- assert_spin_locked(&device_domain_lock);
-
+ assert_spin_locked(&domain->lock);
/*
* Second level page table supports per-PTE snoop control. The
* iommu_map() interface will handle this by setting SNP bit.
@@ -4537,20 +4412,19 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long flags;
if (dmar_domain->force_snooping)
return true;
- spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&dmar_domain->lock);
if (!domain_support_force_snooping(dmar_domain)) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&dmar_domain->lock);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&dmar_domain->lock);
return true;
}
@@ -4572,7 +4446,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
struct device_domain_info *info;
struct intel_iommu *iommu;
- unsigned long flags;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
@@ -4615,10 +4488,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
}
- spin_lock_irqsave(&device_domain_lock, flags);
- list_add(&info->global, &device_domain_list);
dev_iommu_priv_set(dev, info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return &iommu->iommu;
}
@@ -4626,15 +4496,9 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- unsigned long flags;
dmar_remove_one_dev_info(dev);
-
- spin_lock_irqsave(&device_domain_lock, flags);
dev_iommu_priv_set(dev, NULL);
- list_del(&info->global);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
kfree(info);
set_dma_ops(dev, NULL);
}
@@ -4707,7 +4571,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct context_entry *context;
struct dmar_domain *domain;
- unsigned long flags;
u64 ctx_lo;
int ret;
@@ -4715,9 +4578,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
if (!domain)
return -EINVAL;
- spin_lock_irqsave(&device_domain_lock, flags);
spin_lock(&iommu->lock);
-
ret = -EINVAL;
if (!info->pasid_supported)
goto out;
@@ -4733,7 +4594,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
context[0].lo = ctx_lo;
wmb();
iommu->flush.flush_context(iommu,
- domain->iommu_did[iommu->seq_id],
+ domain_id_iommu(domain, iommu),
PCI_DEVID(info->bus, info->devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
@@ -4747,7 +4608,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
out:
spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
@@ -4871,13 +4731,11 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long pages = aligned_nrpages(iova, size);
unsigned long pfn = iova >> VTD_PAGE_SHIFT;
- struct intel_iommu *iommu;
- int iommu_id;
+ struct iommu_domain_info *info;
+ unsigned long i;
- for_each_domain_iommu(iommu_id, dmar_domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, dmar_domain, pfn, pages);
- }
+ xa_for_each(&dmar_domain->iommu_array, i, info)
+ __mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
}
const struct iommu_ops intel_iommu_ops = {
@@ -4887,7 +4745,6 @@ const struct iommu_ops intel_iommu_ops = {
.probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.device_group = intel_iommu_device_group,
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
new file mode 100644
index 000000000000..fae45bbb0c7f
--- /dev/null
+++ b/drivers/iommu/intel/iommu.h
@@ -0,0 +1,839 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2006-2015, Intel Corporation.
+ *
+ * Authors: Ashok Raj <ashok.raj@intel.com>
+ * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * David Woodhouse <David.Woodhouse@intel.com>
+ */
+
+#ifndef _INTEL_IOMMU_H_
+#define _INTEL_IOMMU_H_
+
+#include <linux/types.h>
+#include <linux/iova.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/mmu_notifier.h>
+#include <linux/list.h>
+#include <linux/iommu.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmar.h>
+#include <linux/ioasid.h>
+#include <linux/bitfield.h>
+#include <linux/xarray.h>
+
+#include <asm/cacheflush.h>
+#include <asm/iommu.h>
+
+/*
+ * VT-d hardware uses 4KiB page size regardless of host page size.
+ */
+#define VTD_PAGE_SHIFT (12)
+#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
+#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
+#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
+
+#define VTD_STRIDE_SHIFT (9)
+#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
+
+#define DMA_PTE_READ BIT_ULL(0)
+#define DMA_PTE_WRITE BIT_ULL(1)
+#define DMA_PTE_LARGE_PAGE BIT_ULL(7)
+#define DMA_PTE_SNP BIT_ULL(11)
+
+#define DMA_FL_PTE_PRESENT BIT_ULL(0)
+#define DMA_FL_PTE_US BIT_ULL(2)
+#define DMA_FL_PTE_ACCESS BIT_ULL(5)
+#define DMA_FL_PTE_DIRTY BIT_ULL(6)
+#define DMA_FL_PTE_XD BIT_ULL(63)
+
+#define ADDR_WIDTH_5LEVEL (57)
+#define ADDR_WIDTH_4LEVEL (48)
+
+#define CONTEXT_TT_MULTI_LEVEL 0
+#define CONTEXT_TT_DEV_IOTLB 1
+#define CONTEXT_TT_PASS_THROUGH 2
+#define CONTEXT_PASIDE BIT_ULL(3)
+
+/*
+ * Intel IOMMU register specification per version 1.0 public spec.
+ */
+#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
+#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
+#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
+#define DMAR_GCMD_REG 0x18 /* Global command register */
+#define DMAR_GSTS_REG 0x1c /* Global status register */
+#define DMAR_RTADDR_REG 0x20 /* Root entry table */
+#define DMAR_CCMD_REG 0x28 /* Context command reg */
+#define DMAR_FSTS_REG 0x34 /* Fault Status register */
+#define DMAR_FECTL_REG 0x38 /* Fault control register */
+#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
+#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
+#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
+#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
+#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
+#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
+#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
+#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
+#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
+#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
+#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
+#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
+#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
+#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
+#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */
+#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
+#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
+#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
+#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
+#define DMAR_PRS_REG 0xdc /* Page request status register */
+#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
+#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
+#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
+#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
+#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
+#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
+#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
+#define DMAR_MTRR_FIX16K_80000_REG 0x128
+#define DMAR_MTRR_FIX16K_A0000_REG 0x130
+#define DMAR_MTRR_FIX4K_C0000_REG 0x138
+#define DMAR_MTRR_FIX4K_C8000_REG 0x140
+#define DMAR_MTRR_FIX4K_D0000_REG 0x148
+#define DMAR_MTRR_FIX4K_D8000_REG 0x150
+#define DMAR_MTRR_FIX4K_E0000_REG 0x158
+#define DMAR_MTRR_FIX4K_E8000_REG 0x160
+#define DMAR_MTRR_FIX4K_F0000_REG 0x168
+#define DMAR_MTRR_FIX4K_F8000_REG 0x170
+#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
+#define DMAR_MTRR_PHYSMASK0_REG 0x188
+#define DMAR_MTRR_PHYSBASE1_REG 0x190
+#define DMAR_MTRR_PHYSMASK1_REG 0x198
+#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
+#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
+#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
+#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
+#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
+#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
+#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
+#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
+#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
+#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
+#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
+#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
+#define DMAR_MTRR_PHYSBASE8_REG 0x200
+#define DMAR_MTRR_PHYSMASK8_REG 0x208
+#define DMAR_MTRR_PHYSBASE9_REG 0x210
+#define DMAR_MTRR_PHYSMASK9_REG 0x218
+#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
+#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
+#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
+
+#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
+#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
+#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg)
+
+#define OFFSET_STRIDE (9)
+
+#define dmar_readq(a) readq(a)
+#define dmar_writeq(a,v) writeq(v,a)
+#define dmar_readl(a) readl(a)
+#define dmar_writel(a, v) writel(v, a)
+
+#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
+#define DMAR_VER_MINOR(v) ((v) & 0x0f)
+
+/*
+ * Decoding Capability Register
+ */
+#define cap_5lp_support(c) (((c) >> 60) & 1)
+#define cap_pi_support(c) (((c) >> 59) & 1)
+#define cap_fl1gp_support(c) (((c) >> 56) & 1)
+#define cap_read_drain(c) (((c) >> 55) & 1)
+#define cap_write_drain(c) (((c) >> 54) & 1)
+#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
+#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
+#define cap_pgsel_inv(c) (((c) >> 39) & 1)
+
+#define cap_super_page_val(c) (((c) >> 34) & 0xf)
+#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
+ * OFFSET_STRIDE) + 21)
+
+#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
+#define cap_max_fault_reg_offset(c) \
+ (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
+
+#define cap_zlr(c) (((c) >> 22) & 1)
+#define cap_isoch(c) (((c) >> 23) & 1)
+#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
+#define cap_sagaw(c) (((c) >> 8) & 0x1f)
+#define cap_caching_mode(c) (((c) >> 7) & 1)
+#define cap_phmr(c) (((c) >> 6) & 1)
+#define cap_plmr(c) (((c) >> 5) & 1)
+#define cap_rwbf(c) (((c) >> 4) & 1)
+#define cap_afl(c) (((c) >> 3) & 1)
+#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
+/*
+ * Extended Capability Register
+ */
+
+#define ecap_rps(e) (((e) >> 49) & 0x1)
+#define ecap_smpwc(e) (((e) >> 48) & 0x1)
+#define ecap_flts(e) (((e) >> 47) & 0x1)
+#define ecap_slts(e) (((e) >> 46) & 0x1)
+#define ecap_slads(e) (((e) >> 45) & 0x1)
+#define ecap_vcs(e) (((e) >> 44) & 0x1)
+#define ecap_smts(e) (((e) >> 43) & 0x1)
+#define ecap_dit(e) (((e) >> 41) & 0x1)
+#define ecap_pds(e) (((e) >> 42) & 0x1)
+#define ecap_pasid(e) (((e) >> 40) & 0x1)
+#define ecap_pss(e) (((e) >> 35) & 0x1f)
+#define ecap_eafs(e) (((e) >> 34) & 0x1)
+#define ecap_nwfs(e) (((e) >> 33) & 0x1)
+#define ecap_srs(e) (((e) >> 31) & 0x1)
+#define ecap_ers(e) (((e) >> 30) & 0x1)
+#define ecap_prs(e) (((e) >> 29) & 0x1)
+#define ecap_broken_pasid(e) (((e) >> 28) & 0x1)
+#define ecap_dis(e) (((e) >> 27) & 0x1)
+#define ecap_nest(e) (((e) >> 26) & 0x1)
+#define ecap_mts(e) (((e) >> 25) & 0x1)
+#define ecap_ecs(e) (((e) >> 24) & 0x1)
+#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
+#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
+#define ecap_coherent(e) ((e) & 0x1)
+#define ecap_qis(e) ((e) & 0x2)
+#define ecap_pass_through(e) (((e) >> 6) & 0x1)
+#define ecap_eim_support(e) (((e) >> 4) & 0x1)
+#define ecap_ir_support(e) (((e) >> 3) & 0x1)
+#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
+#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
+#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
+
+/* Virtual command interface capability */
+#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
+
+/* IOTLB_REG */
+#define DMA_TLB_FLUSH_GRANU_OFFSET 60
+#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
+#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
+#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
+#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
+#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
+#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
+#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
+#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
+#define DMA_TLB_IVT (((u64)1) << 63)
+#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_TLB_MAX_SIZE (0x3f)
+
+/* INVALID_DESC */
+#define DMA_CCMD_INVL_GRANU_OFFSET 61
+#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
+#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
+#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
+#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
+#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
+#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
+#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_ID_TLB_ADDR(addr) (addr)
+#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
+
+/* PMEN_REG */
+#define DMA_PMEN_EPM (((u32)1)<<31)
+#define DMA_PMEN_PRS (((u32)1)<<0)
+
+/* GCMD_REG */
+#define DMA_GCMD_TE (((u32)1) << 31)
+#define DMA_GCMD_SRTP (((u32)1) << 30)
+#define DMA_GCMD_SFL (((u32)1) << 29)
+#define DMA_GCMD_EAFL (((u32)1) << 28)
+#define DMA_GCMD_WBF (((u32)1) << 27)
+#define DMA_GCMD_QIE (((u32)1) << 26)
+#define DMA_GCMD_SIRTP (((u32)1) << 24)
+#define DMA_GCMD_IRE (((u32) 1) << 25)
+#define DMA_GCMD_CFI (((u32) 1) << 23)
+
+/* GSTS_REG */
+#define DMA_GSTS_TES (((u32)1) << 31)
+#define DMA_GSTS_RTPS (((u32)1) << 30)
+#define DMA_GSTS_FLS (((u32)1) << 29)
+#define DMA_GSTS_AFLS (((u32)1) << 28)
+#define DMA_GSTS_WBFS (((u32)1) << 27)
+#define DMA_GSTS_QIES (((u32)1) << 26)
+#define DMA_GSTS_IRTPS (((u32)1) << 24)
+#define DMA_GSTS_IRES (((u32)1) << 25)
+#define DMA_GSTS_CFIS (((u32)1) << 23)
+
+/* DMA_RTADDR_REG */
+#define DMA_RTADDR_RTT (((u64)1) << 11)
+#define DMA_RTADDR_SMT (((u64)1) << 10)
+
+/* CCMD_REG */
+#define DMA_CCMD_ICC (((u64)1) << 63)
+#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
+#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
+#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
+#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
+#define DMA_CCMD_MASK_NOBIT 0
+#define DMA_CCMD_MASK_1BIT 1
+#define DMA_CCMD_MASK_2BIT 2
+#define DMA_CCMD_MASK_3BIT 3
+#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
+#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
+
+/* FECTL_REG */
+#define DMA_FECTL_IM (((u32)1) << 31)
+
+/* FSTS_REG */
+#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
+#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
+#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
+#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
+#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
+#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
+#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
+
+/* FRCD_REG, 32 bits access */
+#define DMA_FRCD_F (((u32)1) << 31)
+#define dma_frcd_type(d) ((d >> 30) & 1)
+#define dma_frcd_fault_reason(c) (c & 0xff)
+#define dma_frcd_source_id(c) (c & 0xffff)
+#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
+#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
+/* low 64 bit */
+#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
+
+/* PRS_REG */
+#define DMA_PRS_PPR ((u32)1)
+#define DMA_PRS_PRO ((u32)2)
+
+#define DMA_VCS_PAS ((u64)1)
+
+#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
+do { \
+ cycles_t start_time = get_cycles(); \
+ while (1) { \
+ sts = op(iommu->reg + offset); \
+ if (cond) \
+ break; \
+ if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
+ panic("DMAR hardware is malfunctioning\n"); \
+ cpu_relax(); \
+ } \
+} while (0)
+
+#define QI_LENGTH 256 /* queue length */
+
+enum {
+ QI_FREE,
+ QI_IN_USE,
+ QI_DONE,
+ QI_ABORT
+};
+
+#define QI_CC_TYPE 0x1
+#define QI_IOTLB_TYPE 0x2
+#define QI_DIOTLB_TYPE 0x3
+#define QI_IEC_TYPE 0x4
+#define QI_IWD_TYPE 0x5
+#define QI_EIOTLB_TYPE 0x6
+#define QI_PC_TYPE 0x7
+#define QI_DEIOTLB_TYPE 0x8
+#define QI_PGRP_RESP_TYPE 0x9
+#define QI_PSTRM_RESP_TYPE 0xa
+
+#define QI_IEC_SELECTIVE (((u64)1) << 4)
+#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
+#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
+
+#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
+#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
+#define QI_IWD_FENCE (((u64)1) << 6)
+#define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
+
+#define QI_IOTLB_DID(did) (((u64)did) << 16)
+#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
+#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
+#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
+#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
+#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_IOTLB_AM(am) (((u8)am) & 0x3f)
+
+#define QI_CC_FM(fm) (((u64)fm) << 48)
+#define QI_CC_SID(sid) (((u64)sid) << 32)
+#define QI_CC_DID(did) (((u64)did) << 16)
+#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
+
+#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
+#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
+#define QI_DEV_IOTLB_SIZE 1
+#define QI_DEV_IOTLB_MAX_INVS 32
+
+#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
+#define QI_PC_DID(did) (((u64)did) << 16)
+#define QI_PC_GRAN(gran) (((u64)gran) << 4)
+
+/* PASID cache invalidation granu */
+#define QI_PC_ALL_PASIDS 0
+#define QI_PC_PASID_SEL 1
+#define QI_PC_GLOBAL 3
+
+#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f)
+#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
+#define QI_EIOTLB_DID(did) (((u64)did) << 16)
+#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
+
+/* QI Dev-IOTLB inv granu */
+#define QI_DEV_IOTLB_GRAN_ALL 1
+#define QI_DEV_IOTLB_GRAN_PASID_SEL 0
+
+#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
+#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
+#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
+#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
+#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
+#define QI_DEV_EIOTLB_MAX_INVS 32
+
+/* Page group response descriptor QW0 */
+#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
+#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
+#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
+#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
+#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
+
+/* Page group response descriptor QW1 */
+#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
+#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
+
+
+#define QI_RESP_SUCCESS 0x0
+#define QI_RESP_INVALID 0x1
+#define QI_RESP_FAILURE 0xf
+
+#define QI_GRAN_NONG_PASID 2
+#define QI_GRAN_PSI_PASID 3
+
+#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
+
+struct qi_desc {
+ u64 qw0;
+ u64 qw1;
+ u64 qw2;
+ u64 qw3;
+};
+
+struct q_inval {
+ raw_spinlock_t q_lock;
+ void *desc; /* invalidation queue */
+ int *desc_status; /* desc status */
+ int free_head; /* first free entry */
+ int free_tail; /* last free entry */
+ int free_cnt;
+};
+
+struct dmar_pci_notify_info;
+
+#ifdef CONFIG_IRQ_REMAP
+/* 1MB - maximum possible interrupt remapping table size */
+#define INTR_REMAP_PAGE_ORDER 8
+#define INTR_REMAP_TABLE_REG_SIZE 0xf
+#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
+
+#define INTR_REMAP_TABLE_ENTRIES 65536
+
+struct irq_domain;
+
+struct ir_table {
+ struct irte *base;
+ unsigned long *bitmap;
+};
+
+void intel_irq_remap_add_device(struct dmar_pci_notify_info *info);
+#else
+static inline void
+intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { }
+#endif
+
+struct iommu_flush {
+ void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+ void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+};
+
+enum {
+ SR_DMAR_FECTL_REG,
+ SR_DMAR_FEDATA_REG,
+ SR_DMAR_FEADDR_REG,
+ SR_DMAR_FEUADDR_REG,
+ MAX_SR_DMAR_REGS
+};
+
+#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
+#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+#define VTD_FLAG_SVM_CAPABLE (1 << 2)
+
+extern int intel_iommu_sm;
+
+#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
+#define pasid_supported(iommu) (sm_supported(iommu) && \
+ ecap_pasid((iommu)->ecap))
+
+struct pasid_entry;
+struct pasid_state_entry;
+struct page_req_dsc;
+
+/*
+ * 0: Present
+ * 1-11: Reserved
+ * 12-63: Context Ptr (12 - (haw-1))
+ * 64-127: Reserved
+ */
+struct root_entry {
+ u64 lo;
+ u64 hi;
+};
+
+/*
+ * low 64 bits:
+ * 0: present
+ * 1: fault processing disable
+ * 2-3: translation type
+ * 12-63: address space root
+ * high 64 bits:
+ * 0-2: address width
+ * 3-6: aval
+ * 8-23: domain id
+ */
+struct context_entry {
+ u64 lo;
+ u64 hi;
+};
+
+/*
+ * When VT-d works in the scalable mode, it allows DMA translation to
+ * happen through either first level or second level page table. This
+ * bit marks that the DMA translation for the domain goes through the
+ * first level page table, otherwise, it goes through the second level.
+ */
+#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
+
+struct iommu_domain_info {
+ struct intel_iommu *iommu;
+ unsigned int refcnt; /* Refcount of devices per iommu */
+ u16 did; /* Domain ids per IOMMU. Use u16 since
+ * domain ids are 16 bit wide according
+ * to VT-d spec, section 9.3 */
+};
+
+struct dmar_domain {
+ int nid; /* node id */
+ struct xarray iommu_array; /* Attached IOMMU array */
+
+ u8 has_iotlb_device: 1;
+ u8 iommu_coherency: 1; /* indicate coherency of iommu access */
+ u8 force_snooping : 1; /* Create IOPTEs with snoop control */
+ u8 set_pte_snp:1;
+
+ spinlock_t lock; /* Protect device tracking lists */
+ struct list_head devices; /* all devices' list */
+
+ struct dma_pte *pgd; /* virtual address */
+ int gaw; /* max guest address width */
+
+ /* adjusted guest address width, 0 is level 2 30-bit */
+ int agaw;
+
+ int flags; /* flags to find out type of domain */
+ int iommu_superpage;/* Level of superpages supported:
+ 0 == 4KiB (no superpages), 1 == 2MiB,
+ 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
+ u64 max_addr; /* maximum mapped address */
+
+ struct iommu_domain domain; /* generic domain data structure for
+ iommu core */
+};
+
+struct intel_iommu {
+ void __iomem *reg; /* Pointer to hardware regs, virtual addr */
+ u64 reg_phys; /* physical address of hw register set */
+ u64 reg_size; /* size of hw register set */
+ u64 cap;
+ u64 ecap;
+ u64 vccap;
+ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
+ raw_spinlock_t register_lock; /* protect register handling */
+ int seq_id; /* sequence id of the iommu */
+ int agaw; /* agaw of this iommu */
+ int msagaw; /* max sagaw of this iommu */
+ unsigned int irq, pr_irq;
+ u16 segment; /* PCI segment# */
+ unsigned char name[13]; /* Device Name */
+
+#ifdef CONFIG_INTEL_IOMMU
+ unsigned long *domain_ids; /* bitmap of domains */
+ spinlock_t lock; /* protect context, domain ids */
+ struct root_entry *root_entry; /* virtual address */
+
+ struct iommu_flush flush;
+#endif
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ struct page_req_dsc *prq;
+ unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ struct completion prq_complete;
+ struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
+#endif
+ struct iopf_queue *iopf_queue;
+ unsigned char iopfq_name[16];
+ struct q_inval *qi; /* Queued invalidation info */
+ u32 *iommu_state; /* Store iommu states between suspend and resume.*/
+
+#ifdef CONFIG_IRQ_REMAP
+ struct ir_table *ir_table; /* Interrupt remapping info */
+ struct irq_domain *ir_domain;
+ struct irq_domain *ir_msi_domain;
+#endif
+ struct iommu_device iommu; /* IOMMU core code handle */
+ int node;
+ u32 flags; /* Software defined flags */
+
+ struct dmar_drhd_unit *drhd;
+ void *perf_statistic;
+};
+
+/* PCI domain-device relationship */
+struct device_domain_info {
+ struct list_head link; /* link to domain siblings */
+ u32 segment; /* PCI segment number */
+ u8 bus; /* PCI bus number */
+ u8 devfn; /* PCI devfn number */
+ u16 pfsid; /* SRIOV physical function source ID */
+ u8 pasid_supported:3;
+ u8 pasid_enabled:1;
+ u8 pri_supported:1;
+ u8 pri_enabled:1;
+ u8 ats_supported:1;
+ u8 ats_enabled:1;
+ u8 ats_qdep;
+ struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
+ struct intel_iommu *iommu; /* IOMMU used by this device */
+ struct dmar_domain *domain; /* pointer to domain */
+ struct pasid_table *pasid_table; /* pasid table */
+};
+
+static inline void __iommu_flush_cache(
+ struct intel_iommu *iommu, void *addr, int size)
+{
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(addr, size);
+}
+
+/* Convert generic struct iommu_domain to private struct dmar_domain */
+static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct dmar_domain, domain);
+}
+
+/* Retrieve the domain ID which has allocated to the domain */
+static inline u16
+domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
+{
+ struct iommu_domain_info *info =
+ xa_load(&domain->iommu_array, iommu->seq_id);
+
+ return info->did;
+}
+
+/*
+ * 0: readable
+ * 1: writable
+ * 2-6: reserved
+ * 7: super page
+ * 8-10: available
+ * 11: snoop behavior
+ * 12-63: Host physical address
+ */
+struct dma_pte {
+ u64 val;
+};
+
+static inline void dma_clear_pte(struct dma_pte *pte)
+{
+ pte->val = 0;
+}
+
+static inline u64 dma_pte_addr(struct dma_pte *pte)
+{
+#ifdef CONFIG_64BIT
+ return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
+#else
+ /* Must have a full atomic 64-bit read */
+ return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
+ VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
+#endif
+}
+
+static inline bool dma_pte_present(struct dma_pte *pte)
+{
+ return (pte->val & 3) != 0;
+}
+
+static inline bool dma_pte_superpage(struct dma_pte *pte)
+{
+ return (pte->val & DMA_PTE_LARGE_PAGE);
+}
+
+static inline bool first_pte_in_page(struct dma_pte *pte)
+{
+ return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
+}
+
+static inline int nr_pte_to_next_page(struct dma_pte *pte)
+{
+ return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
+ (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
+}
+
+extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
+
+extern int dmar_enable_qi(struct intel_iommu *iommu);
+extern void dmar_disable_qi(struct intel_iommu *iommu);
+extern int dmar_reenable_qi(struct intel_iommu *iommu);
+extern void qi_global_iec(struct intel_iommu *iommu);
+
+extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask);
+
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+ unsigned long npages, bool ih);
+
+void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u32 pasid, u16 qdep, u64 addr,
+ unsigned int size_order);
+void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
+ u32 pasid);
+
+int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ unsigned int count, unsigned long options);
+/*
+ * Options used in qi_submit_sync:
+ * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
+ */
+#define QI_OPT_WAIT_DRAIN BIT(0)
+
+extern int dmar_ir_support(void);
+
+void *alloc_pgtable_page(int node);
+void free_pgtable_page(void *vaddr);
+void iommu_flush_write_buffer(struct intel_iommu *iommu);
+int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
+struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+extern void intel_svm_check(struct intel_iommu *iommu);
+extern int intel_svm_enable_prq(struct intel_iommu *iommu);
+extern int intel_svm_finish_prq(struct intel_iommu *iommu);
+struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
+ void *drvdata);
+void intel_svm_unbind(struct iommu_sva *handle);
+u32 intel_svm_get_pasid(struct iommu_sva *handle);
+int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
+ struct iommu_page_response *msg);
+
+struct intel_svm_dev {
+ struct list_head list;
+ struct rcu_head rcu;
+ struct device *dev;
+ struct intel_iommu *iommu;
+ struct iommu_sva sva;
+ unsigned long prq_seq_number;
+ u32 pasid;
+ int users;
+ u16 did;
+ u16 dev_iotlb:1;
+ u16 sid, qdep;
+};
+
+struct intel_svm {
+ struct mmu_notifier notifier;
+ struct mm_struct *mm;
+
+ unsigned int flags;
+ u32 pasid;
+ struct list_head devs;
+};
+#else
+static inline void intel_svm_check(struct intel_iommu *iommu) {}
+#endif
+
+#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
+void intel_iommu_debugfs_init(void);
+#else
+static inline void intel_iommu_debugfs_init(void) {}
+#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
+
+extern const struct attribute_group *intel_iommu_groups[];
+bool context_present(struct context_entry *context);
+struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
+ u8 devfn, int alloc);
+
+extern const struct iommu_ops intel_iommu_ops;
+
+#ifdef CONFIG_INTEL_IOMMU
+extern int iommu_calculate_agaw(struct intel_iommu *iommu);
+extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+extern int dmar_disabled;
+extern int intel_iommu_enabled;
+#else
+static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
+static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
+#define dmar_disabled (1)
+#define intel_iommu_enabled (0)
+#endif
+
+static inline const char *decode_prq_descriptor(char *str, size_t size,
+ u64 dw0, u64 dw1, u64 dw2, u64 dw3)
+{
+ char *buf = str;
+ int bytes;
+
+ bytes = snprintf(buf, size,
+ "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx",
+ FIELD_GET(GENMASK_ULL(31, 16), dw0),
+ FIELD_GET(GENMASK_ULL(63, 12), dw1),
+ dw1 & BIT_ULL(0) ? 'r' : '-',
+ dw1 & BIT_ULL(1) ? 'w' : '-',
+ dw0 & BIT_ULL(52) ? 'x' : '-',
+ dw0 & BIT_ULL(53) ? 'p' : '-',
+ dw1 & BIT_ULL(2) ? 'l' : '-',
+ FIELD_GET(GENMASK_ULL(51, 32), dw0),
+ FIELD_GET(GENMASK_ULL(11, 3), dw1));
+
+ /* Private Data */
+ if (dw0 & BIT_ULL(9)) {
+ size -= bytes;
+ buf += bytes;
+ snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3);
+ }
+
+ return str;
+}
+
+#endif
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index a67319597884..2e9683e970f8 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -10,7 +10,6 @@
#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
-#include <linux/intel-iommu.h>
#include <linux/acpi.h>
#include <linux/irqdomain.h>
#include <linux/crash_dump.h>
@@ -21,6 +20,7 @@
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "cap_audit.h"
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 17cad7c1f62d..c5e7e8b020a5 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -12,13 +12,13 @@
#include <linux/bitops.h>
#include <linux/cpufeature.h>
#include <linux/dmar.h>
-#include <linux/intel-iommu.h>
#include <linux/iommu.h>
#include <linux/memory.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/spinlock.h>
+#include "iommu.h"
#include "pasid.h"
/*
@@ -450,17 +450,17 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
struct pasid_entry *pte;
u16 did, pgtt;
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte))
- return;
-
- if (!pasid_pte_is_present(pte))
+ if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return;
+ }
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
-
intel_pasid_clear_entry(dev, pasid, fault_ignore);
+ spin_unlock(&iommu->lock);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
@@ -496,22 +496,6 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
}
}
-static inline int pasid_enable_wpe(struct pasid_entry *pte)
-{
-#ifdef CONFIG_X86
- unsigned long cr0 = read_cr0();
-
- /* CR0.WP is normally set but just to be sure */
- if (unlikely(!(cr0 & X86_CR0_WP))) {
- pr_err_ratelimited("No CPU write protect!\n");
- return -EINVAL;
- }
-#endif
- pasid_set_wpe(pte);
-
- return 0;
-};
-
/*
* Set up the scalable mode pasid table entry for first only
* translation type.
@@ -528,39 +512,52 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
return -EINVAL;
}
- pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte))
+ if (flags & PASID_FLAG_SUPERVISOR_MODE) {
+#ifdef CONFIG_X86
+ unsigned long cr0 = read_cr0();
+
+ /* CR0.WP is normally set but just to be sure */
+ if (unlikely(!(cr0 & X86_CR0_WP))) {
+ pr_err("No CPU write protect!\n");
+ return -EINVAL;
+ }
+#endif
+ if (!ecap_srs(iommu->ecap)) {
+ pr_err("No supervisor request support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+ }
+
+ if ((flags & PASID_FLAG_FL5LP) && !cap_5lp_support(iommu->cap)) {
+ pr_err("No 5-level paging support for first-level on %s\n",
+ iommu->name);
return -EINVAL;
+ }
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ spin_lock(&iommu->lock);
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (!pte) {
+ spin_unlock(&iommu->lock);
+ return -ENODEV;
+ }
+
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
/* Setup the first level page table pointer: */
pasid_set_flptr(pte, (u64)__pa(pgd));
if (flags & PASID_FLAG_SUPERVISOR_MODE) {
- if (!ecap_srs(iommu->ecap)) {
- pr_err("No supervisor request support on %s\n",
- iommu->name);
- return -EINVAL;
- }
pasid_set_sre(pte);
- if (pasid_enable_wpe(pte))
- return -EINVAL;
-
+ pasid_set_wpe(pte);
}
- if (flags & PASID_FLAG_FL5LP) {
- if (cap_5lp_support(iommu->cap)) {
- pasid_set_flpm(pte, 1);
- } else {
- pr_err("No 5-level paging support for first-level\n");
- pasid_clear_entry(pte);
- return -EINVAL;
- }
- }
+ if (flags & PASID_FLAG_FL5LP)
+ pasid_set_flpm(pte, 1);
if (flags & PASID_FLAG_PAGE_SNOOP)
pasid_set_pgsnp(pte);
@@ -572,6 +569,8 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
/* Setup Present and PASID Granular Transfer Type: */
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
@@ -627,17 +626,19 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
}
pgd_val = virt_to_phys(pgd);
- did = domain->iommu_did[iommu->seq_id];
+ did = domain_id_iommu(domain, iommu);
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
if (!pte) {
- dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
+ spin_unlock(&iommu->lock);
return -ENODEV;
}
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
@@ -654,6 +655,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
if (pasid != PASID_RID2PASID)
pasid_set_sre(pte);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
@@ -669,15 +672,17 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
u16 did = FLPT_DEFAULT_DID;
struct pasid_entry *pte;
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
if (!pte) {
- dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
+ spin_unlock(&iommu->lock);
return -ENODEV;
}
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
@@ -692,6 +697,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
*/
pasid_set_sre(pte);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index bf5b937848b4..20c54e50f533 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -39,6 +39,7 @@
* only and pass-through transfer modes.
*/
#define FLPT_DEFAULT_DID 1
+#define NUM_RESERVED_DID 2
/*
* The SUPERVISOR_MODE flag indicates a first level translation which
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index 0e8e03252d92..94ee70ac38e3 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -9,8 +9,8 @@
*/
#include <linux/spinlock.h>
-#include <linux/intel-iommu.h>
+#include "iommu.h"
#include "perf.h"
static DEFINE_SPINLOCK(latency_lock);
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 7ee37d996e15..8bcfb93dda56 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -5,7 +5,6 @@
* Authors: David Woodhouse <dwmw2@infradead.org>
*/
-#include <linux/intel-iommu.h>
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
@@ -21,11 +20,12 @@
#include <linux/ioasid.h>
#include <asm/page.h>
#include <asm/fpu/api.h>
-#include <trace/events/intel_iommu.h>
+#include "iommu.h"
#include "pasid.h"
#include "perf.h"
#include "../iommu-sva-lib.h"
+#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
static void intel_svm_drain_prq(struct device *dev, u32 pasid);
@@ -328,9 +328,9 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
unsigned int flags)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- unsigned long iflags, sflags;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
+ unsigned long sflags;
int ret = 0;
svm = pasid_private_find(mm->pasid);
@@ -394,11 +394,8 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ?
PASID_FLAG_SUPERVISOR_MODE : 0;
sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
- spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
FLPT_DEFAULT_DID, sflags);
- spin_unlock_irqrestore(&iommu->lock, iflags);
-
if (ret)
goto free_sdev;
@@ -544,7 +541,7 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid)
domain = info->domain;
pdev = to_pci_dev(dev);
sid = PCI_DEVID(info->bus, info->devfn);
- did = domain->iommu_did[iommu->seq_id];
+ did = domain_id_iommu(domain, iommu);
qdep = pci_ats_queue_depth(pdev);
/*
diff --git a/drivers/iommu/intel/trace.c b/drivers/iommu/intel/trace.c
index bfb6a6e37a88..117e626e3ea9 100644
--- a/drivers/iommu/intel/trace.c
+++ b/drivers/iommu/intel/trace.c
@@ -11,4 +11,4 @@
#include <linux/types.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/intel_iommu.h>
+#include "trace.h"
diff --git a/drivers/iommu/intel/trace.h b/drivers/iommu/intel/trace.h
new file mode 100644
index 000000000000..93d96f93a89b
--- /dev/null
+++ b/drivers/iommu/intel/trace.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel IOMMU trace support
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM intel_iommu
+
+#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INTEL_IOMMU_H
+
+#include <linux/tracepoint.h>
+
+#include "iommu.h"
+
+#define MSG_MAX 256
+
+TRACE_EVENT(qi_submit,
+ TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3),
+
+ TP_ARGS(iommu, qw0, qw1, qw2, qw3),
+
+ TP_STRUCT__entry(
+ __field(u64, qw0)
+ __field(u64, qw1)
+ __field(u64, qw2)
+ __field(u64, qw3)
+ __string(iommu, iommu->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(iommu, iommu->name);
+ __entry->qw0 = qw0;
+ __entry->qw1 = qw1;
+ __entry->qw2 = qw2;
+ __entry->qw3 = qw3;
+ ),
+
+ TP_printk("%s %s: 0x%llx 0x%llx 0x%llx 0x%llx",
+ __print_symbolic(__entry->qw0 & 0xf,
+ { QI_CC_TYPE, "cc_inv" },
+ { QI_IOTLB_TYPE, "iotlb_inv" },
+ { QI_DIOTLB_TYPE, "dev_tlb_inv" },
+ { QI_IEC_TYPE, "iec_inv" },
+ { QI_IWD_TYPE, "inv_wait" },
+ { QI_EIOTLB_TYPE, "p_iotlb_inv" },
+ { QI_PC_TYPE, "pc_inv" },
+ { QI_DEIOTLB_TYPE, "p_dev_tlb_inv" },
+ { QI_PGRP_RESP_TYPE, "page_grp_resp" }),
+ __get_str(iommu),
+ __entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3
+ )
+);
+
+TRACE_EVENT(prq_report,
+ TP_PROTO(struct intel_iommu *iommu, struct device *dev,
+ u64 dw0, u64 dw1, u64 dw2, u64 dw3,
+ unsigned long seq),
+
+ TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq),
+
+ TP_STRUCT__entry(
+ __field(u64, dw0)
+ __field(u64, dw1)
+ __field(u64, dw2)
+ __field(u64, dw3)
+ __field(unsigned long, seq)
+ __string(iommu, iommu->name)
+ __string(dev, dev_name(dev))
+ __dynamic_array(char, buff, MSG_MAX)
+ ),
+
+ TP_fast_assign(
+ __entry->dw0 = dw0;
+ __entry->dw1 = dw1;
+ __entry->dw2 = dw2;
+ __entry->dw3 = dw3;
+ __entry->seq = seq;
+ __assign_str(iommu, iommu->name);
+ __assign_str(dev, dev_name(dev));
+ ),
+
+ TP_printk("%s/%s seq# %ld: %s",
+ __get_str(iommu), __get_str(dev), __entry->seq,
+ decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0,
+ __entry->dw1, __entry->dw2, __entry->dw3)
+ )
+);
+#endif /* _TRACE_INTEL_IOMMU_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/iommu/intel/
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index be066c1503d3..ba3115fd0f86 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -182,14 +182,8 @@ static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg)
(cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT);
}
-static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
- struct io_pgtable_cfg *cfg)
+static arm_v7s_iopte to_mtk_iopte(phys_addr_t paddr, arm_v7s_iopte pte)
{
- arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
-
- if (!arm_v7s_is_mtk_enabled(cfg))
- return pte;
-
if (paddr & BIT_ULL(32))
pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
if (paddr & BIT_ULL(33))
@@ -199,6 +193,17 @@ static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
return pte;
}
+static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
+ struct io_pgtable_cfg *cfg)
+{
+ arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
+
+ if (arm_v7s_is_mtk_enabled(cfg))
+ return to_mtk_iopte(paddr, pte);
+
+ return pte;
+}
+
static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
struct io_pgtable_cfg *cfg)
{
@@ -240,10 +245,17 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
dma_addr_t dma;
size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
void *table = NULL;
+ gfp_t gfp_l1;
+
+ /*
+ * ARM_MTK_TTBR_EXT extend the translation table base support larger
+ * memory address.
+ */
+ gfp_l1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ GFP_KERNEL : ARM_V7S_TABLE_GFP_DMA;
if (lvl == 1)
- table = (void *)__get_free_pages(
- __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
+ table = (void *)__get_free_pages(gfp_l1 | __GFP_ZERO, get_order(size));
else if (lvl == 2)
table = kmem_cache_zalloc(data->l2_tables, gfp);
@@ -251,7 +263,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
return NULL;
phys = virt_to_phys(table);
- if (phys != (arm_v7s_iopte)phys) {
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ phys >= (1ULL << cfg->oas) : phys != (arm_v7s_iopte)phys) {
/* Doesn't fit in PTE */
dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
@@ -457,9 +470,14 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
arm_v7s_iopte curr,
struct io_pgtable_cfg *cfg)
{
+ phys_addr_t phys = virt_to_phys(table);
arm_v7s_iopte old, new;
- new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE;
+ new = phys | ARM_V7S_PTE_TYPE_TABLE;
+
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT)
+ new = to_mtk_iopte(phys, new);
+
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
new |= ARM_V7S_ATTR_NS_TABLE;
@@ -779,6 +797,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
void *cookie)
{
struct arm_v7s_io_pgtable *data;
+ slab_flags_t slab_flag;
+ phys_addr_t paddr;
if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
return NULL;
@@ -788,7 +808,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
- IO_PGTABLE_QUIRK_ARM_MTK_EXT))
+ IO_PGTABLE_QUIRK_ARM_MTK_EXT |
+ IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
@@ -796,15 +817,27 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
!(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
return NULL;
+ if ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT) &&
+ !arm_v7s_is_mtk_enabled(cfg))
+ return NULL;
+
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
spin_lock_init(&data->split_lock);
+
+ /*
+ * ARM_MTK_TTBR_EXT extend the translation table base support larger
+ * memory address.
+ */
+ slab_flag = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ 0 : ARM_V7S_TABLE_SLAB_FLAGS;
+
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
ARM_V7S_TABLE_SIZE(2, cfg),
ARM_V7S_TABLE_SIZE(2, cfg),
- ARM_V7S_TABLE_SLAB_FLAGS, NULL);
+ slab_flag, NULL);
if (!data->l2_tables)
goto out_free_data;
@@ -850,12 +883,16 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
wmb();
/* TTBR */
- cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
- (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
- ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
- (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
+ paddr = virt_to_phys(data->pgd);
+ if (arm_v7s_is_mtk_enabled(cfg))
+ cfg->arm_v7s_cfg.ttbr = paddr | upper_32_bits(paddr);
+ else
+ cfg->arm_v7s_cfg.ttbr = paddr | ARM_V7S_TTBR_S |
+ (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
+ ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
+ (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
return &data->iop;
out_free_data:
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 847ad47a2dfd..780fb7071577 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -259,7 +259,8 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
return 0;
out_release:
- ops->release_device(dev);
+ if (ops->release_device)
+ ops->release_device(dev);
out_module_put:
module_put(ops->owner);
@@ -272,7 +273,7 @@ err_free:
int iommu_probe_device(struct device *dev)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops;
struct iommu_group *group;
int ret;
@@ -313,6 +314,7 @@ int iommu_probe_device(struct device *dev)
mutex_unlock(&group->mutex);
iommu_group_put(group);
+ ops = dev_iommu_ops(dev);
if (ops->probe_finalize)
ops->probe_finalize(dev);
@@ -336,7 +338,8 @@ void iommu_release_device(struct device *dev)
iommu_device_unlink(dev->iommu->iommu_dev, dev);
ops = dev_iommu_ops(dev);
- ops->release_device(dev);
+ if (ops->release_device)
+ ops->release_device(dev);
iommu_group_remove_device(dev);
module_put(ops->owner);
@@ -600,7 +603,7 @@ static void iommu_group_release(struct kobject *kobj)
if (group->iommu_data_release)
group->iommu_data_release(group->iommu_data);
- ida_simple_remove(&iommu_group_ida, group->id);
+ ida_free(&iommu_group_ida, group->id);
if (group->default_domain)
iommu_domain_free(group->default_domain);
@@ -641,7 +644,7 @@ struct iommu_group *iommu_group_alloc(void)
INIT_LIST_HEAD(&group->devices);
INIT_LIST_HEAD(&group->entry);
- ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
if (ret < 0) {
kfree(group);
return ERR_PTR(ret);
@@ -651,7 +654,7 @@ struct iommu_group *iommu_group_alloc(void)
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
- ida_simple_remove(&iommu_group_ida, group->id);
+ ida_free(&iommu_group_ida, group->id);
kobject_put(&group->kobj);
return ERR_PTR(ret);
}
@@ -2457,6 +2460,9 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
len = 0;
}
+ if (sg_is_dma_bus_address(sg))
+ goto next;
+
if (len) {
len += sg->length;
} else {
@@ -2464,6 +2470,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
start = s_phys;
}
+next:
if (++i < nents)
sg = sg_next(sg);
}
@@ -2576,32 +2583,25 @@ void iommu_get_resv_regions(struct device *dev, struct list_head *list)
ops->get_resv_regions(dev, list);
}
-void iommu_put_resv_regions(struct device *dev, struct list_head *list)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->put_resv_regions)
- ops->put_resv_regions(dev, list);
-}
-
/**
- * generic_iommu_put_resv_regions - Reserved region driver helper
+ * iommu_put_resv_regions - release resered regions
* @dev: device for which to free reserved regions
* @list: reserved region list for device
*
- * IOMMU drivers can use this to implement their .put_resv_regions() callback
- * for simple reservations. Memory allocated for each reserved region will be
- * freed. If an IOMMU driver allocates additional resources per region, it is
- * going to have to implement a custom callback.
+ * This releases a reserved region list acquired by iommu_get_resv_regions().
*/
-void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
+void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
struct iommu_resv_region *entry, *next;
- list_for_each_entry_safe(entry, next, list, list)
- kfree(entry);
+ list_for_each_entry_safe(entry, next, list, list) {
+ if (entry->free)
+ entry->free(dev, entry);
+ else
+ kfree(entry);
+ }
}
-EXPORT_SYMBOL(generic_iommu_put_resv_regions);
+EXPORT_SYMBOL(iommu_put_resv_regions);
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
size_t length, int prot,
@@ -2751,19 +2751,6 @@ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
}
EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
-bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev->iommu && dev->iommu->iommu_dev) {
- const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
-
- if (ops->dev_feat_enabled)
- return ops->dev_feat_enabled(dev, feat);
- }
-
- return false;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
-
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index db77aa675145..47d1983dfa2a 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -26,6 +26,11 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
+unsigned long iova_rcache_range(void)
+{
+ return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
+}
+
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
{
struct iova_domain *iovad;
@@ -614,7 +619,12 @@ EXPORT_SYMBOL_GPL(reserve_iova);
* dynamic size tuning described in the paper.
*/
-#define IOVA_MAG_SIZE 128
+/*
+ * As kmalloc's buffer size is fixed to power of 2, 127 is chosen to
+ * assure size of 'iova_magazine' to be 1024 bytes, so that no memory
+ * will be wasted.
+ */
+#define IOVA_MAG_SIZE 127
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
struct iova_magazine {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index f09aedfdd462..6a24aa804ea3 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -394,10 +394,6 @@ static struct iommu_device *msm_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
-static void msm_iommu_release_device(struct device *dev)
-{
-}
-
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
@@ -603,7 +599,7 @@ static int insert_iommu_master(struct device *dev,
for (sid = 0; sid < master->num_mids; sid++)
if (master->mids[sid] == spec->args[0]) {
- dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
+ dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n",
sid);
return 0;
}
@@ -677,7 +673,6 @@ fail:
static struct iommu_ops msm_iommu_ops = {
.domain_alloc = msm_iommu_domain_alloc,
.probe_device = msm_iommu_probe_device,
- .release_device = msm_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index bb9dd92c9898..7e363b1f24df 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -34,7 +34,6 @@
#include <dt-bindings/memory/mtk-memory-port.h>
#define REG_MMU_PT_BASE_ADDR 0x000
-#define MMU_PT_ADDR_MASK GENMASK(31, 7)
#define REG_MMU_INVALIDATE 0x020
#define F_ALL_INVLD 0x2
@@ -138,6 +137,7 @@
/* PM and clock always on. e.g. infra iommu */
#define PM_CLK_AO BIT(15)
#define IFA_IOMMU_PCIE_SUPPORT BIT(16)
+#define PGTABLE_PA_35_EN BIT(17)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -596,6 +596,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
.iommu_dev = data->dev,
};
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN))
+ dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT;
+
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
dom->cfg.oas = data->enable_4GB ? 33 : 32;
else
@@ -684,8 +687,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
goto err_unlock;
}
bank->m4u_dom = dom;
- writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
- bank->base + REG_MMU_PT_BASE_ADDR);
+ writel(dom->cfg.arm_v7s_cfg.ttbr, bank->base + REG_MMU_PT_BASE_ADDR);
pm_runtime_put(m4udev);
}
@@ -819,17 +821,12 @@ static void mtk_iommu_release_device(struct device *dev)
struct device *larbdev;
unsigned int larbid;
- if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return;
-
data = dev_iommu_priv_get(dev);
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
larbdev = data->larb_imu[larbid].dev;
device_link_remove(dev, larbdev);
}
-
- iommu_fwspec_free(dev);
}
static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data)
@@ -933,7 +930,6 @@ static const struct iommu_ops mtk_iommu_ops = {
.device_group = mtk_iommu_device_group,
.of_xlate = mtk_iommu_of_xlate,
.get_resv_regions = mtk_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
@@ -1140,22 +1136,32 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
- switch (data->plat_data->m4u_plat) {
- case M4U_MT2712:
- p = "mediatek,mt2712-infracfg";
- break;
- case M4U_MT8173:
- p = "mediatek,mt8173-infracfg";
- break;
- default:
- p = NULL;
+ infracfg = syscon_regmap_lookup_by_phandle(dev->of_node, "mediatek,infracfg");
+ if (IS_ERR(infracfg)) {
+ /*
+ * Legacy devicetrees will not specify a phandle to
+ * mediatek,infracfg: in that case, we use the older
+ * way to retrieve a syscon to infra.
+ *
+ * This is for retrocompatibility purposes only, hence
+ * no more compatibles shall be added to this.
+ */
+ switch (data->plat_data->m4u_plat) {
+ case M4U_MT2712:
+ p = "mediatek,mt2712-infracfg";
+ break;
+ case M4U_MT8173:
+ p = "mediatek,mt8173-infracfg";
+ break;
+ default:
+ p = NULL;
+ }
+
+ infracfg = syscon_regmap_lookup_by_compatible(p);
+ if (IS_ERR(infracfg))
+ return PTR_ERR(infracfg);
}
- infracfg = syscon_regmap_lookup_by_compatible(p);
-
- if (IS_ERR(infracfg))
- return PTR_ERR(infracfg);
-
ret = regmap_read(infracfg, REG_INFRA_MISC, &val);
if (ret)
return ret;
@@ -1204,18 +1210,16 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = mtk_iommu_mm_dts_parse(dev, &match, data);
if (ret) {
- dev_err(dev, "mm dts parse fail(%d).", ret);
+ dev_err_probe(dev, ret, "mm dts parse fail\n");
goto out_runtime_disable;
}
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
- data->plat_data->pericfg_comp_str) {
- infracfg = syscon_regmap_lookup_by_compatible(data->plat_data->pericfg_comp_str);
- if (IS_ERR(infracfg)) {
- ret = PTR_ERR(infracfg);
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
+ p = data->plat_data->pericfg_comp_str;
+ data->pericfg = syscon_regmap_lookup_by_compatible(p);
+ if (IS_ERR(data->pericfg)) {
+ ret = PTR_ERR(data->pericfg);
goto out_runtime_disable;
}
-
- data->pericfg = infracfg;
}
platform_set_drvdata(pdev, data);
@@ -1366,8 +1370,7 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0);
writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR);
- writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
- base + REG_MMU_PT_BASE_ADDR);
+ writel(m4u_dom->cfg.arm_v7s_cfg.ttbr, base + REG_MMU_PT_BASE_ADDR);
} while (++i < data->plat_data->banks_num);
/*
@@ -1401,7 +1404,7 @@ static const struct mtk_iommu_plat_data mt2712_data = {
static const struct mtk_iommu_plat_data mt6779_data = {
.m4u_plat = M4U_MT6779,
.flags = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN |
- MTK_IOMMU_TYPE_MM,
+ MTK_IOMMU_TYPE_MM | PGTABLE_PA_35_EN,
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
.banks_num = 1,
.banks_enable = {true},
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index e1cb51b9866c..128c7a3f1778 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -532,15 +532,10 @@ static void mtk_iommu_v1_release_device(struct device *dev)
struct device *larbdev;
unsigned int larbid;
- if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
- return;
-
data = dev_iommu_priv_get(dev);
larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
larbdev = data->larb_imu[larbid].dev;
device_link_remove(dev, larbdev);
-
- iommu_fwspec_free(dev);
}
static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index bd409bab6286..511959c8a14d 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -383,16 +383,6 @@ static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
return &sdev->iommu;
}
-static void sprd_iommu_release_device(struct device *dev)
-{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- if (!fwspec || fwspec->ops != &sprd_iommu_ops)
- return;
-
- iommu_fwspec_free(dev);
-}
-
static struct iommu_group *sprd_iommu_device_group(struct device *dev)
{
struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
@@ -417,7 +407,6 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops sprd_iommu_ops = {
.domain_alloc = sprd_iommu_domain_alloc,
.probe_device = sprd_iommu_probe_device,
- .release_device = sprd_iommu_release_device,
.device_group = sprd_iommu_device_group,
.of_xlate = sprd_iommu_of_xlate,
.pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index c54ab477b8fd..a84c63518773 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -738,8 +738,6 @@ static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
-static void sun50i_iommu_release_device(struct device *dev) {}
-
static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
{
struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
@@ -764,7 +762,6 @@ static const struct iommu_ops sun50i_iommu_ops = {
.domain_alloc = sun50i_iommu_domain_alloc,
.of_xlate = sun50i_iommu_of_xlate,
.probe_device = sun50i_iommu_probe_device,
- .release_device = sun50i_iommu_release_device,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sun50i_iommu_attach_device,
.detach_dev = sun50i_iommu_detach_device,
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index a6700a40a6f8..e5ca3cf1a949 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -246,10 +246,6 @@ static struct iommu_device *gart_iommu_probe_device(struct device *dev)
return &gart_handle->iommu;
}
-static void gart_iommu_release_device(struct device *dev)
-{
-}
-
static int gart_iommu_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -273,7 +269,6 @@ static void gart_iommu_sync(struct iommu_domain *domain,
static const struct iommu_ops gart_iommu_ops = {
.domain_alloc = gart_iommu_domain_alloc,
.probe_device = gart_iommu_probe_device,
- .release_device = gart_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
.of_xlate = gart_iommu_of_xlate,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 1fea68e551f1..2a8de975fe63 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -864,8 +864,6 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
return &smmu->iommu;
}
-static void tegra_smmu_release_device(struct device *dev) {}
-
static const struct tegra_smmu_group_soc *
tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
{
@@ -966,7 +964,6 @@ static int tegra_smmu_of_xlate(struct device *dev,
static const struct iommu_ops tegra_smmu_ops = {
.domain_alloc = tegra_smmu_domain_alloc,
.probe_device = tegra_smmu_probe_device,
- .release_device = tegra_smmu_release_device,
.device_group = tegra_smmu_device_group,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 25be4b822aa0..08eeafc9529f 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -788,11 +788,13 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return 0;
}
-static int viommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
int ret;
u32 flags;
+ size_t size = pgsize * pgcount;
u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -823,17 +825,21 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret)
viommu_del_mappings(vdomain, iova, end);
+ else if (mapped)
+ *mapped = size;
return ret;
}
-static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
int ret = 0;
size_t unmapped;
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
+ size_t size = pgsize * pgcount;
unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size)
@@ -964,7 +970,7 @@ static struct iommu_device *viommu_probe_device(struct device *dev)
return &viommu->iommu;
err_free_dev:
- generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
+ iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
return ERR_PTR(ret);
@@ -981,15 +987,9 @@ static void viommu_probe_finalize(struct device *dev)
static void viommu_release_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev;
-
- if (!fwspec || fwspec->ops != &viommu_ops)
- return;
-
- vdev = dev_iommu_priv_get(dev);
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
- generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
+ iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
}
@@ -1013,13 +1013,12 @@ static struct iommu_ops viommu_ops = {
.release_device = viommu_release_device,
.device_group = viommu_device_group,
.get_resv_regions = viommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = viommu_attach_dev,
- .map = viommu_map,
- .unmap = viommu_unmap,
+ .map_pages = viommu_map_pages,
+ .unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.free = viommu_domain_free,
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index 327f3ab62c03..741612ba6a52 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -129,7 +129,7 @@ static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
clear_csr_ecfg(ECFG0_IM);
clear_csr_estat(ESTATF_IP);
- cpuintc_handle = irq_domain_alloc_fwnode(NULL);
+ cpuintc_handle = irq_domain_alloc_named_fwnode("CPUINTC");
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
&loongarch_cpu_intc_irq_domain_ops, NULL);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 80d8ca6f2d46..16e9af8d8b1e 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -111,11 +111,15 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
/* Mask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0);
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+
/* Set route for target vector */
eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
+
/* Unmask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0);
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
+ 0x0, priv->node * CORES_PER_EIO_NODE);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -286,7 +290,7 @@ static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi
}
}
-struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
+static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
{
int i;
@@ -344,7 +348,8 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
if (!priv)
return -ENOMEM;
- priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc);
+ priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
+ acpi_eiointc->node);
if (!priv->domain_handle) {
pr_err("Unable to allocate domain handle\n");
goto out_free_priv;
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index c4f3c886ad61..0da8716f8f24 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -207,7 +207,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
"reg-names", core_reg_names[i]);
if (index < 0)
- return -EINVAL;
+ goto out_iounmap;
priv->core_isr[i] = of_iomap(node, index);
}
@@ -360,7 +360,7 @@ int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic
parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_liointc);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index d0e8551bebfa..a72ede90ffc6 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -282,7 +282,7 @@ int __init pch_msi_acpi_init(struct irq_domain *parent,
int ret;
struct fwnode_handle *domain_handle;
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchmsi);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchmsi->msg_address);
ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
acpi_pchmsi->count, parent, domain_handle);
if (ret < 0)
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index b6f1392964b1..c01b9c257005 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -48,25 +48,6 @@ static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
-int find_pch_pic(u32 gsi)
-{
- int i;
-
- /* Find the PCH_PIC that manages this GSI. */
- for (i = 0; i < MAX_IO_PICS; i++) {
- struct pch_pic *priv = pch_pic_priv[i];
-
- if (!priv)
- return -1;
-
- if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
- return i;
- }
-
- pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
- return -1;
-}
-
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
{
u32 reg;
@@ -325,6 +306,25 @@ IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
#endif
#ifdef CONFIG_ACPI
+int find_pch_pic(u32 gsi)
+{
+ int i;
+
+ /* Find the PCH_PIC that manages this GSI. */
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ struct pch_pic *priv = pch_pic_priv[i];
+
+ if (!priv)
+ return -1;
+
+ if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
+ return i;
+ }
+
+ pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
+ return -1;
+}
+
static int __init
pch_lpc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
@@ -349,7 +349,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchpic);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index b65bd8878d4f..499e5f81b3fe 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -95,10 +95,11 @@ static const struct irq_domain_ops riscv_intc_domain_ops = {
static int __init riscv_intc_init(struct device_node *node,
struct device_node *parent)
{
- int rc, hartid;
+ int rc;
+ unsigned long hartid;
- hartid = riscv_of_parent_hartid(node);
- if (hartid < 0) {
+ rc = riscv_of_parent_hartid(node, &hartid);
+ if (rc < 0) {
pr_warn("unable to find hart id for %pOF\n", node);
return 0;
}
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index ba4938188570..2f4784860df5 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -374,7 +374,8 @@ static int __init __plic_init(struct device_node *node,
for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
irq_hw_number_t hwirq;
- int cpu, hartid;
+ int cpu;
+ unsigned long hartid;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
@@ -398,8 +399,8 @@ static int __init __plic_init(struct device_node *node,
continue;
}
- hartid = riscv_of_parent_hartid(parent.np);
- if (hartid < 0) {
+ error = riscv_of_parent_hartid(parent.np, &hartid);
+ if (error < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i);
continue;
}
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index e1f771c72fc4..ad3e2c1b3c87 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -148,10 +148,10 @@ static int tegra_ictlr_suspend(void)
lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
/* Disable COP interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
/* Disable CPU interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
/* Enable the wakeup sources of ictlr */
writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
@@ -172,12 +172,12 @@ static void tegra_ictlr_resume(void)
writel_relaxed(lic->cpu_iep[i],
ictlr + ICTLR_CPU_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
writel_relaxed(lic->cpu_ier[i],
ictlr + ICTLR_CPU_IER_SET);
writel_relaxed(lic->cop_iep[i],
ictlr + ICTLR_COP_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
writel_relaxed(lic->cop_ier[i],
ictlr + ICTLR_COP_IER_SET);
}
@@ -312,7 +312,7 @@ static int __init tegra_ictlr_init(struct device_node *node,
lic->base[i] = base;
/* Disable all interrupts */
- writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR);
/* All interrupts target IRQ */
writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a49979f41eee..499d0f215a8b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -447,16 +447,16 @@ config LEDS_LP8860
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
- depends on LEDS_CLASS
+ depends on LEDS_CLASS && BROKEN
depends on X86 && SERIO_I8042 && DMI
help
This driver makes the mail LED accessible from userspace
- programs through the leds subsystem. This LED have three
- known mode: off, blink at 0.5Hz and blink at 1Hz.
+ programs through the LEDs subsystem. This LED has three
+ known modes: off, blink at 0.5Hz and blink at 1Hz.
The driver supports two kinds of interface: using ledtrig-timer
or through /sys/class/leds/clevo::mail/brightness. As this LED
- cannot change it's brightness it blinks instead. The brightness
+ cannot change its brightness it blinks instead. The brightness
value 0 means off, 1..127 means blink at 0.5Hz and 128..255 means
blink at 1Hz.
@@ -697,7 +697,7 @@ config LEDS_MENF21BMC
config LEDS_IS31FL319X
tristate "LED Support for ISSI IS31FL319x I2C LED controller family"
- depends on LEDS_CLASS && I2C && OF
+ depends on LEDS_CLASS && I2C
select REGMAP_I2C
help
This option enables support for LEDs connected to ISSI IS31FL319x
diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
index 59ba81e40e85..945c84286a4e 100644
--- a/drivers/leds/blink/Kconfig
+++ b/drivers/leds/blink/Kconfig
@@ -1,3 +1,17 @@
+config LEDS_BCM63138
+ tristate "LED Support for Broadcom BCM63138 SoC"
+ depends on LEDS_CLASS
+ depends on ARCH_BCM4908 || ARCH_BCM_5301X || BCM63XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ default ARCH_BCM4908
+ help
+ This option enables support for LED controller that is part of
+ BCM63138 SoC. The same hardware block is known to be also used
+ in BCM4908, BCM6848, BCM6858, BCM63148, BCM63381 and BCM68360.
+
+ If compiled as module it will be called leds-bcm63138.
+
config LEDS_LGM
tristate "LED support for LGM SoC series"
depends on X86 || COMPILE_TEST
diff --git a/drivers/leds/blink/Makefile b/drivers/leds/blink/Makefile
index fa5d04dccf13..447029f4153a 100644
--- a/drivers/leds/blink/Makefile
+++ b/drivers/leds/blink/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_BCM63138) += leds-bcm63138.o
obj-$(CONFIG_LEDS_LGM) += leds-lgm-sso.o
diff --git a/drivers/leds/blink/leds-bcm63138.c b/drivers/leds/blink/leds-bcm63138.c
new file mode 100644
index 000000000000..2cf2761e4914
--- /dev/null
+++ b/drivers/leds/blink/leds-bcm63138.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define BCM63138_MAX_LEDS 32
+#define BCM63138_MAX_BRIGHTNESS 9
+
+#define BCM63138_LED_BITS 4 /* how many bits control a single LED */
+#define BCM63138_LED_MASK ((1 << BCM63138_LED_BITS) - 1) /* 0xf */
+#define BCM63138_LEDS_PER_REG (32 / BCM63138_LED_BITS) /* 8 */
+
+#define BCM63138_GLB_CTRL 0x00
+#define BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL 0x00000002
+#define BCM63138_GLB_CTRL_SERIAL_LED_EN_POL 0x00000008
+#define BCM63138_MASK 0x04
+#define BCM63138_HW_LED_EN 0x08
+#define BCM63138_SERIAL_LED_SHIFT_SEL 0x0c
+#define BCM63138_FLASH_RATE_CTRL1 0x10
+#define BCM63138_FLASH_RATE_CTRL2 0x14
+#define BCM63138_FLASH_RATE_CTRL3 0x18
+#define BCM63138_FLASH_RATE_CTRL4 0x1c
+#define BCM63138_BRIGHT_CTRL1 0x20
+#define BCM63138_BRIGHT_CTRL2 0x24
+#define BCM63138_BRIGHT_CTRL3 0x28
+#define BCM63138_BRIGHT_CTRL4 0x2c
+#define BCM63138_POWER_LED_CFG 0x30
+#define BCM63138_HW_POLARITY 0xb4
+#define BCM63138_SW_DATA 0xb8
+#define BCM63138_SW_POLARITY 0xbc
+#define BCM63138_PARALLEL_LED_POLARITY 0xc0
+#define BCM63138_SERIAL_LED_POLARITY 0xc4
+#define BCM63138_HW_LED_STATUS 0xc8
+#define BCM63138_FLASH_CTRL_STATUS 0xcc
+#define BCM63138_FLASH_BRT_CTRL 0xd0
+#define BCM63138_FLASH_P_LED_OUT_STATUS 0xd4
+#define BCM63138_FLASH_S_LED_OUT_STATUS 0xd8
+
+struct bcm63138_leds {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+struct bcm63138_led {
+ struct bcm63138_leds *leds;
+ struct led_classdev cdev;
+ u32 pin;
+ bool active_low;
+};
+
+/*
+ * I/O access
+ */
+
+static void bcm63138_leds_write(struct bcm63138_leds *leds, unsigned int reg,
+ u32 data)
+{
+ writel(data, leds->base + reg);
+}
+
+static unsigned long bcm63138_leds_read(struct bcm63138_leds *leds,
+ unsigned int reg)
+{
+ return readl(leds->base + reg);
+}
+
+static void bcm63138_leds_update_bits(struct bcm63138_leds *leds,
+ unsigned int reg, u32 mask, u32 val)
+{
+ WARN_ON(val & ~mask);
+
+ bcm63138_leds_write(leds, reg, (bcm63138_leds_read(leds, reg) & ~mask) | (val & mask));
+}
+
+/*
+ * Helpers
+ */
+
+static void bcm63138_leds_set_flash_rate(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ u8 value)
+{
+ int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4;
+ int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS;
+
+ bcm63138_leds_update_bits(leds, BCM63138_FLASH_RATE_CTRL1 + reg_offset,
+ BCM63138_LED_MASK << shift, value << shift);
+}
+
+static void bcm63138_leds_set_bright(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ u8 value)
+{
+ int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4;
+ int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS;
+
+ bcm63138_leds_update_bits(leds, BCM63138_BRIGHT_CTRL1 + reg_offset,
+ BCM63138_LED_MASK << shift, value << shift);
+}
+
+static void bcm63138_leds_enable_led(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ enum led_brightness value)
+{
+ u32 bit = BIT(led->pin);
+
+ bcm63138_leds_update_bits(leds, BCM63138_SW_DATA, bit, value ? bit : 0);
+}
+
+/*
+ * API callbacks
+ */
+
+static void bcm63138_leds_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev);
+ struct bcm63138_leds *leds = led->leds;
+ unsigned long flags;
+
+ spin_lock_irqsave(&leds->lock, flags);
+
+ bcm63138_leds_enable_led(leds, led, value);
+ if (!value)
+ bcm63138_leds_set_flash_rate(leds, led, 0);
+ else
+ bcm63138_leds_set_bright(leds, led, value);
+
+ spin_unlock_irqrestore(&leds->lock, flags);
+}
+
+static int bcm63138_leds_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev);
+ struct bcm63138_leds *leds = led->leds;
+ unsigned long flags;
+ u8 value;
+
+ if (!*delay_on && !*delay_off) {
+ *delay_on = 640;
+ *delay_off = 640;
+ }
+
+ if (*delay_on != *delay_off) {
+ dev_dbg(led_cdev->dev, "Blinking at unequal delays is not supported\n");
+ return -EINVAL;
+ }
+
+ switch (*delay_on) {
+ case 1152 ... 1408: /* 1280 ms ± 10% */
+ value = 0x7;
+ break;
+ case 576 ... 704: /* 640 ms ± 10% */
+ value = 0x6;
+ break;
+ case 288 ... 352: /* 320 ms ± 10% */
+ value = 0x5;
+ break;
+ case 126 ... 154: /* 140 ms ± 10% */
+ value = 0x4;
+ break;
+ case 59 ... 72: /* 65 ms ± 10% */
+ value = 0x3;
+ break;
+ default:
+ dev_dbg(led_cdev->dev, "Blinking delay value %lu is unsupported\n",
+ *delay_on);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&leds->lock, flags);
+
+ bcm63138_leds_enable_led(leds, led, BCM63138_MAX_BRIGHTNESS);
+ bcm63138_leds_set_flash_rate(leds, led, value);
+
+ spin_unlock_irqrestore(&leds->lock, flags);
+
+ return 0;
+}
+
+/*
+ * LED driver
+ */
+
+static void bcm63138_leds_create_led(struct bcm63138_leds *leds,
+ struct device_node *np)
+{
+ struct led_init_data init_data = {
+ .fwnode = of_fwnode_handle(np),
+ };
+ struct device *dev = leds->dev;
+ struct bcm63138_led *led;
+ struct pinctrl *pinctrl;
+ u32 bit;
+ int err;
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led) {
+ dev_err(dev, "Failed to alloc LED\n");
+ return;
+ }
+
+ led->leds = leds;
+
+ if (of_property_read_u32(np, "reg", &led->pin)) {
+ dev_err(dev, "Missing \"reg\" property in %pOF\n", np);
+ goto err_free;
+ }
+
+ if (led->pin >= BCM63138_MAX_LEDS) {
+ dev_err(dev, "Invalid \"reg\" value %d\n", led->pin);
+ goto err_free;
+ }
+
+ led->active_low = of_property_read_bool(np, "active-low");
+
+ led->cdev.max_brightness = BCM63138_MAX_BRIGHTNESS;
+ led->cdev.brightness_set = bcm63138_leds_brightness_set;
+ led->cdev.blink_set = bcm63138_leds_blink_set;
+
+ err = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
+ if (err) {
+ dev_err(dev, "Failed to register LED %pOF: %d\n", np, err);
+ goto err_free;
+ }
+
+ pinctrl = devm_pinctrl_get_select_default(led->cdev.dev);
+ if (IS_ERR(pinctrl) && PTR_ERR(pinctrl) != -ENODEV) {
+ dev_warn(led->cdev.dev, "Failed to select %pOF pinctrl: %ld\n",
+ np, PTR_ERR(pinctrl));
+ }
+
+ bit = BIT(led->pin);
+ bcm63138_leds_update_bits(leds, BCM63138_PARALLEL_LED_POLARITY, bit,
+ led->active_low ? 0 : bit);
+ bcm63138_leds_update_bits(leds, BCM63138_HW_LED_EN, bit, 0);
+ bcm63138_leds_set_flash_rate(leds, led, 0);
+ bcm63138_leds_enable_led(leds, led, led->cdev.brightness);
+
+ return;
+
+err_free:
+ devm_kfree(dev, led);
+}
+
+static int bcm63138_leds_probe(struct platform_device *pdev)
+{
+ struct device_node *np = dev_of_node(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct bcm63138_leds *leds;
+ struct device_node *child;
+
+ leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ leds->dev = dev;
+
+ leds->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(leds->base))
+ return PTR_ERR(leds->base);
+
+ spin_lock_init(&leds->lock);
+
+ bcm63138_leds_write(leds, BCM63138_GLB_CTRL,
+ BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL |
+ BCM63138_GLB_CTRL_SERIAL_LED_EN_POL);
+ bcm63138_leds_write(leds, BCM63138_HW_LED_EN, 0);
+ bcm63138_leds_write(leds, BCM63138_SERIAL_LED_POLARITY, 0);
+ bcm63138_leds_write(leds, BCM63138_PARALLEL_LED_POLARITY, 0);
+
+ for_each_available_child_of_node(np, child) {
+ bcm63138_leds_create_led(leds, child);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id bcm63138_leds_of_match_table[] = {
+ { .compatible = "brcm,bcm63138-leds", },
+ { },
+};
+
+static struct platform_driver bcm63138_leds_driver = {
+ .probe = bcm63138_leds_probe,
+ .driver = {
+ .name = "leds-bcm63xxx",
+ .of_match_table = bcm63138_leds_of_match_table,
+ },
+};
+
+module_platform_driver(bcm63138_leds_driver);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, bcm63138_leds_of_match_table);
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 4161b9dd7e48..52b59b62f437 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -11,9 +11,9 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -21,39 +21,64 @@
/* register numbers */
#define IS31FL319X_SHUTDOWN 0x00
-#define IS31FL319X_CTRL1 0x01
-#define IS31FL319X_CTRL2 0x02
-#define IS31FL319X_CONFIG1 0x03
-#define IS31FL319X_CONFIG2 0x04
-#define IS31FL319X_RAMP_MODE 0x05
-#define IS31FL319X_BREATH_MASK 0x06
-#define IS31FL319X_PWM(channel) (0x07 + channel)
-#define IS31FL319X_DATA_UPDATE 0x10
-#define IS31FL319X_T0(channel) (0x11 + channel)
-#define IS31FL319X_T123_1 0x1a
-#define IS31FL319X_T123_2 0x1b
-#define IS31FL319X_T123_3 0x1c
-#define IS31FL319X_T4(channel) (0x1d + channel)
-#define IS31FL319X_TIME_UPDATE 0x26
-#define IS31FL319X_RESET 0xff
-
-#define IS31FL319X_REG_CNT (IS31FL319X_RESET + 1)
+
+/* registers for 3190, 3191 and 3193 */
+#define IS31FL3190_BREATHING 0x01
+#define IS31FL3190_LEDMODE 0x02
+#define IS31FL3190_CURRENT 0x03
+#define IS31FL3190_PWM(channel) (0x04 + channel)
+#define IS31FL3190_DATA_UPDATE 0x07
+#define IS31FL3190_T0(channel) (0x0a + channel)
+#define IS31FL3190_T1T2(channel) (0x10 + channel)
+#define IS31FL3190_T3T4(channel) (0x16 + channel)
+#define IS31FL3190_TIME_UPDATE 0x1c
+#define IS31FL3190_LEDCONTROL 0x1d
+#define IS31FL3190_RESET 0x2f
+
+#define IS31FL3190_CURRENT_uA_MIN 5000
+#define IS31FL3190_CURRENT_uA_DEFAULT 42000
+#define IS31FL3190_CURRENT_uA_MAX 42000
+#define IS31FL3190_CURRENT_MASK GENMASK(4, 2)
+#define IS31FL3190_CURRENT_5_mA 0x02
+#define IS31FL3190_CURRENT_10_mA 0x01
+#define IS31FL3190_CURRENT_17dot5_mA 0x04
+#define IS31FL3190_CURRENT_30_mA 0x03
+#define IS31FL3190_CURRENT_42_mA 0x00
+
+/* registers for 3196 and 3199 */
+#define IS31FL3196_CTRL1 0x01
+#define IS31FL3196_CTRL2 0x02
+#define IS31FL3196_CONFIG1 0x03
+#define IS31FL3196_CONFIG2 0x04
+#define IS31FL3196_RAMP_MODE 0x05
+#define IS31FL3196_BREATH_MARK 0x06
+#define IS31FL3196_PWM(channel) (0x07 + channel)
+#define IS31FL3196_DATA_UPDATE 0x10
+#define IS31FL3196_T0(channel) (0x11 + channel)
+#define IS31FL3196_T123_1 0x1a
+#define IS31FL3196_T123_2 0x1b
+#define IS31FL3196_T123_3 0x1c
+#define IS31FL3196_T4(channel) (0x1d + channel)
+#define IS31FL3196_TIME_UPDATE 0x26
+#define IS31FL3196_RESET 0xff
+
+#define IS31FL3196_REG_CNT (IS31FL3196_RESET + 1)
#define IS31FL319X_MAX_LEDS 9
/* CS (Current Setting) in CONFIG2 register */
-#define IS31FL319X_CONFIG2_CS_SHIFT 4
-#define IS31FL319X_CONFIG2_CS_MASK 0x7
-#define IS31FL319X_CONFIG2_CS_STEP_REF 12
+#define IS31FL3196_CONFIG2_CS_SHIFT 4
+#define IS31FL3196_CONFIG2_CS_MASK GENMASK(2, 0)
+#define IS31FL3196_CONFIG2_CS_STEP_REF 12
-#define IS31FL319X_CURRENT_MIN ((u32)5000)
-#define IS31FL319X_CURRENT_MAX ((u32)40000)
-#define IS31FL319X_CURRENT_STEP ((u32)5000)
-#define IS31FL319X_CURRENT_DEFAULT ((u32)20000)
+#define IS31FL3196_CURRENT_uA_MIN 5000
+#define IS31FL3196_CURRENT_uA_MAX 40000
+#define IS31FL3196_CURRENT_uA_STEP 5000
+#define IS31FL3196_CURRENT_uA_DEFAULT 20000
/* Audio gain in CONFIG2 register */
-#define IS31FL319X_AUDIO_GAIN_DB_MAX ((u32)21)
-#define IS31FL319X_AUDIO_GAIN_DB_STEP ((u32)3)
+#define IS31FL3196_AUDIO_GAIN_DB_MAX ((u32)21)
+#define IS31FL3196_AUDIO_GAIN_DB_STEP 3
/*
* regmap is used as a cache of chip's register space,
@@ -78,52 +103,161 @@ struct is31fl319x_chip {
struct is31fl319x_chipdef {
int num_leds;
+ u8 reset_reg;
+ const struct regmap_config *is31fl319x_regmap_config;
+ int (*brightness_set)(struct led_classdev *cdev, enum led_brightness brightness);
+ u32 current_default;
+ u32 current_min;
+ u32 current_max;
+ bool is_3196or3199;
};
-static const struct is31fl319x_chipdef is31fl3190_cdef = {
- .num_leds = 1,
-};
+static bool is31fl319x_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* we have no readable registers */
+ return false;
+}
-static const struct is31fl319x_chipdef is31fl3193_cdef = {
- .num_leds = 3,
+static bool is31fl3190_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* volatile registers are not cached */
+ switch (reg) {
+ case IS31FL3190_DATA_UPDATE:
+ case IS31FL3190_TIME_UPDATE:
+ case IS31FL3190_RESET:
+ return true; /* always write-through */
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default is31fl3190_reg_defaults[] = {
+ { IS31FL3190_LEDMODE, 0x00 },
+ { IS31FL3190_CURRENT, 0x00 },
+ { IS31FL3190_PWM(0), 0x00 },
+ { IS31FL3190_PWM(1), 0x00 },
+ { IS31FL3190_PWM(2), 0x00 },
};
-static const struct is31fl319x_chipdef is31fl3196_cdef = {
- .num_leds = 6,
+static struct regmap_config is31fl3190_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IS31FL3190_RESET,
+ .cache_type = REGCACHE_FLAT,
+ .readable_reg = is31fl319x_readable_reg,
+ .volatile_reg = is31fl3190_volatile_reg,
+ .reg_defaults = is31fl3190_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(is31fl3190_reg_defaults),
};
-static const struct is31fl319x_chipdef is31fl3199_cdef = {
- .num_leds = 9,
+static bool is31fl3196_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* volatile registers are not cached */
+ switch (reg) {
+ case IS31FL3196_DATA_UPDATE:
+ case IS31FL3196_TIME_UPDATE:
+ case IS31FL3196_RESET:
+ return true; /* always write-through */
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default is31fl3196_reg_defaults[] = {
+ { IS31FL3196_CONFIG1, 0x00 },
+ { IS31FL3196_CONFIG2, 0x00 },
+ { IS31FL3196_PWM(0), 0x00 },
+ { IS31FL3196_PWM(1), 0x00 },
+ { IS31FL3196_PWM(2), 0x00 },
+ { IS31FL3196_PWM(3), 0x00 },
+ { IS31FL3196_PWM(4), 0x00 },
+ { IS31FL3196_PWM(5), 0x00 },
+ { IS31FL3196_PWM(6), 0x00 },
+ { IS31FL3196_PWM(7), 0x00 },
+ { IS31FL3196_PWM(8), 0x00 },
};
-static const struct of_device_id of_is31fl319x_match[] = {
- { .compatible = "issi,is31fl3190", .data = &is31fl3190_cdef, },
- { .compatible = "issi,is31fl3191", .data = &is31fl3190_cdef, },
- { .compatible = "issi,is31fl3193", .data = &is31fl3193_cdef, },
- { .compatible = "issi,is31fl3196", .data = &is31fl3196_cdef, },
- { .compatible = "issi,is31fl3199", .data = &is31fl3199_cdef, },
- { .compatible = "si-en,sn3199", .data = &is31fl3199_cdef, },
- { }
+static struct regmap_config is31fl3196_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IS31FL3196_REG_CNT,
+ .cache_type = REGCACHE_FLAT,
+ .readable_reg = is31fl319x_readable_reg,
+ .volatile_reg = is31fl3196_volatile_reg,
+ .reg_defaults = is31fl3196_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(is31fl3196_reg_defaults),
};
-MODULE_DEVICE_TABLE(of, of_is31fl319x_match);
-static int is31fl319x_brightness_set(struct led_classdev *cdev,
+static int is31fl3190_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led, cdev);
+ struct is31fl319x_chip *is31 = led->chip;
+ int chan = led - is31->leds;
+ int ret;
+ int i;
+ u8 ctrl = 0;
+
+ dev_dbg(&is31->client->dev, "channel %d: %d\n", chan, brightness);
+
+ mutex_lock(&is31->lock);
+
+ /* update PWM register */
+ ret = regmap_write(is31->regmap, IS31FL3190_PWM(chan), brightness);
+ if (ret < 0)
+ goto out;
+
+ /* read current brightness of all PWM channels */
+ for (i = 0; i < is31->cdef->num_leds; i++) {
+ unsigned int pwm_value;
+ bool on;
+
+ /*
+ * since neither cdev nor the chip can provide
+ * the current setting, we read from the regmap cache
+ */
+
+ ret = regmap_read(is31->regmap, IS31FL3190_PWM(i), &pwm_value);
+ on = ret >= 0 && pwm_value > LED_OFF;
+
+ ctrl |= on << i;
+ }
+
+ if (ctrl > 0) {
+ dev_dbg(&is31->client->dev, "power up %02x\n", ctrl);
+ regmap_write(is31->regmap, IS31FL3190_LEDCONTROL, ctrl);
+ /* update PWMs */
+ regmap_write(is31->regmap, IS31FL3190_DATA_UPDATE, 0x00);
+ /* enable chip from shut down and enable all channels */
+ ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x20);
+ } else {
+ dev_dbg(&is31->client->dev, "power down\n");
+ /* shut down (no need to clear LEDCONTROL) */
+ ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x01);
+ }
+
+out:
+ mutex_unlock(&is31->lock);
+
+ return ret;
+}
+
+static int is31fl3196_brightness_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
- struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led,
- cdev);
+ struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led, cdev);
struct is31fl319x_chip *is31 = led->chip;
int chan = led - is31->leds;
int ret;
int i;
u8 ctrl1 = 0, ctrl2 = 0;
- dev_dbg(&is31->client->dev, "%s %d: %d\n", __func__, chan, brightness);
+ dev_dbg(&is31->client->dev, "channel %d: %d\n", chan, brightness);
mutex_lock(&is31->lock);
/* update PWM register */
- ret = regmap_write(is31->regmap, IS31FL319X_PWM(chan), brightness);
+ ret = regmap_write(is31->regmap, IS31FL3196_PWM(chan), brightness);
if (ret < 0)
goto out;
@@ -137,9 +271,7 @@ static int is31fl319x_brightness_set(struct led_classdev *cdev,
* the current setting, we read from the regmap cache
*/
- ret = regmap_read(is31->regmap, IS31FL319X_PWM(i), &pwm_value);
- dev_dbg(&is31->client->dev, "%s read %d: ret=%d: %d\n",
- __func__, i, ret, pwm_value);
+ ret = regmap_read(is31->regmap, IS31FL3196_PWM(i), &pwm_value);
on = ret >= 0 && pwm_value > LED_OFF;
if (i < 3)
@@ -153,10 +285,10 @@ static int is31fl319x_brightness_set(struct led_classdev *cdev,
if (ctrl1 > 0 || ctrl2 > 0) {
dev_dbg(&is31->client->dev, "power up %02x %02x\n",
ctrl1, ctrl2);
- regmap_write(is31->regmap, IS31FL319X_CTRL1, ctrl1);
- regmap_write(is31->regmap, IS31FL319X_CTRL2, ctrl2);
+ regmap_write(is31->regmap, IS31FL3196_CTRL1, ctrl1);
+ regmap_write(is31->regmap, IS31FL3196_CTRL2, ctrl2);
/* update PWMs */
- regmap_write(is31->regmap, IS31FL319X_DATA_UPDATE, 0x00);
+ regmap_write(is31->regmap, IS31FL3196_DATA_UPDATE, 0x00);
/* enable chip from shut down */
ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x01);
} else {
@@ -171,92 +303,141 @@ out:
return ret;
}
-static int is31fl319x_parse_child_dt(const struct device *dev,
- const struct device_node *child,
- struct is31fl319x_led *led)
+static const struct is31fl319x_chipdef is31fl3190_cdef = {
+ .num_leds = 1,
+ .reset_reg = IS31FL3190_RESET,
+ .is31fl319x_regmap_config = &is31fl3190_regmap_config,
+ .brightness_set = is31fl3190_brightness_set,
+ .current_default = IS31FL3190_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3190_CURRENT_uA_MIN,
+ .current_max = IS31FL3190_CURRENT_uA_MAX,
+ .is_3196or3199 = false,
+};
+
+static const struct is31fl319x_chipdef is31fl3193_cdef = {
+ .num_leds = 3,
+ .reset_reg = IS31FL3190_RESET,
+ .is31fl319x_regmap_config = &is31fl3190_regmap_config,
+ .brightness_set = is31fl3190_brightness_set,
+ .current_default = IS31FL3190_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3190_CURRENT_uA_MIN,
+ .current_max = IS31FL3190_CURRENT_uA_MAX,
+ .is_3196or3199 = false,
+};
+
+static const struct is31fl319x_chipdef is31fl3196_cdef = {
+ .num_leds = 6,
+ .reset_reg = IS31FL3196_RESET,
+ .is31fl319x_regmap_config = &is31fl3196_regmap_config,
+ .brightness_set = is31fl3196_brightness_set,
+ .current_default = IS31FL3196_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3196_CURRENT_uA_MIN,
+ .current_max = IS31FL3196_CURRENT_uA_MAX,
+ .is_3196or3199 = true,
+};
+
+static const struct is31fl319x_chipdef is31fl3199_cdef = {
+ .num_leds = 9,
+ .reset_reg = IS31FL3196_RESET,
+ .is31fl319x_regmap_config = &is31fl3196_regmap_config,
+ .brightness_set = is31fl3196_brightness_set,
+ .current_default = IS31FL3196_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3196_CURRENT_uA_MIN,
+ .current_max = IS31FL3196_CURRENT_uA_MAX,
+ .is_3196or3199 = true,
+};
+
+static const struct of_device_id of_is31fl319x_match[] = {
+ { .compatible = "issi,is31fl3190", .data = &is31fl3190_cdef, },
+ { .compatible = "issi,is31fl3191", .data = &is31fl3190_cdef, },
+ { .compatible = "issi,is31fl3193", .data = &is31fl3193_cdef, },
+ { .compatible = "issi,is31fl3196", .data = &is31fl3196_cdef, },
+ { .compatible = "issi,is31fl3199", .data = &is31fl3199_cdef, },
+ { .compatible = "si-en,sn3190", .data = &is31fl3190_cdef, },
+ { .compatible = "si-en,sn3191", .data = &is31fl3190_cdef, },
+ { .compatible = "si-en,sn3193", .data = &is31fl3193_cdef, },
+ { .compatible = "si-en,sn3196", .data = &is31fl3196_cdef, },
+ { .compatible = "si-en,sn3199", .data = &is31fl3199_cdef, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_is31fl319x_match);
+
+static int is31fl319x_parse_child_fw(const struct device *dev,
+ const struct fwnode_handle *child,
+ struct is31fl319x_led *led,
+ struct is31fl319x_chip *is31)
{
struct led_classdev *cdev = &led->cdev;
int ret;
- if (of_property_read_string(child, "label", &cdev->name))
- cdev->name = child->name;
+ if (fwnode_property_read_string(child, "label", &cdev->name))
+ cdev->name = fwnode_get_name(child);
- ret = of_property_read_string(child, "linux,default-trigger",
- &cdev->default_trigger);
+ ret = fwnode_property_read_string(child, "linux,default-trigger", &cdev->default_trigger);
if (ret < 0 && ret != -EINVAL) /* is optional */
return ret;
- led->max_microamp = IS31FL319X_CURRENT_DEFAULT;
- ret = of_property_read_u32(child, "led-max-microamp",
- &led->max_microamp);
+ led->max_microamp = is31->cdef->current_default;
+ ret = fwnode_property_read_u32(child, "led-max-microamp", &led->max_microamp);
if (!ret) {
- if (led->max_microamp < IS31FL319X_CURRENT_MIN)
+ if (led->max_microamp < is31->cdef->current_min)
return -EINVAL; /* not supported */
led->max_microamp = min(led->max_microamp,
- IS31FL319X_CURRENT_MAX);
+ is31->cdef->current_max);
}
return 0;
}
-static int is31fl319x_parse_dt(struct device *dev,
- struct is31fl319x_chip *is31)
+static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
{
- struct device_node *np = dev_of_node(dev), *child;
+ struct fwnode_handle *fwnode = dev_fwnode(dev), *child;
int count;
int ret;
- if (!np)
- return -ENODEV;
-
- is31->shutdown_gpio = devm_gpiod_get_optional(dev,
- "shutdown",
- GPIOD_OUT_HIGH);
- if (IS_ERR(is31->shutdown_gpio)) {
- ret = PTR_ERR(is31->shutdown_gpio);
- dev_err(dev, "Failed to get shutdown gpio: %d\n", ret);
- return ret;
- }
+ is31->shutdown_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(is31->shutdown_gpio))
+ return dev_err_probe(dev, PTR_ERR(is31->shutdown_gpio),
+ "Failed to get shutdown gpio\n");
is31->cdef = device_get_match_data(dev);
- count = of_get_available_child_count(np);
+ count = 0;
+ fwnode_for_each_available_child_node(fwnode, child)
+ count++;
dev_dbg(dev, "probing with %d leds defined in DT\n", count);
- if (!count || count > is31->cdef->num_leds) {
- dev_err(dev, "Number of leds defined must be between 1 and %u\n",
- is31->cdef->num_leds);
- return -ENODEV;
- }
+ if (!count || count > is31->cdef->num_leds)
+ return dev_err_probe(dev, -ENODEV,
+ "Number of leds defined must be between 1 and %u\n",
+ is31->cdef->num_leds);
- for_each_available_child_of_node(np, child) {
+ fwnode_for_each_available_child_node(fwnode, child) {
struct is31fl319x_led *led;
u32 reg;
- ret = of_property_read_u32(child, "reg", &reg);
+ ret = fwnode_property_read_u32(child, "reg", &reg);
if (ret) {
- dev_err(dev, "Failed to read led 'reg' property\n");
+ ret = dev_err_probe(dev, ret, "Failed to read led 'reg' property\n");
goto put_child_node;
}
if (reg < 1 || reg > is31->cdef->num_leds) {
- dev_err(dev, "invalid led reg %u\n", reg);
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "invalid led reg %u\n", reg);
goto put_child_node;
}
led = &is31->leds[reg - 1];
if (led->configured) {
- dev_err(dev, "led %u is already configured\n", reg);
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "led %u is already configured\n", reg);
goto put_child_node;
}
- ret = is31fl319x_parse_child_dt(dev, child, led);
+ ret = is31fl319x_parse_child_fw(dev, child, led, is31);
if (ret) {
- dev_err(dev, "led %u DT parsing failed\n", reg);
+ ret = dev_err_probe(dev, ret, "led %u DT parsing failed\n", reg);
goto put_child_node;
}
@@ -264,82 +445,62 @@ static int is31fl319x_parse_dt(struct device *dev,
}
is31->audio_gain_db = 0;
- ret = of_property_read_u32(np, "audio-gain-db", &is31->audio_gain_db);
- if (!ret)
- is31->audio_gain_db = min(is31->audio_gain_db,
- IS31FL319X_AUDIO_GAIN_DB_MAX);
+ if (is31->cdef->is_3196or3199) {
+ ret = fwnode_property_read_u32(fwnode, "audio-gain-db", &is31->audio_gain_db);
+ if (!ret)
+ is31->audio_gain_db = min(is31->audio_gain_db,
+ IS31FL3196_AUDIO_GAIN_DB_MAX);
+ }
return 0;
put_child_node:
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
-static bool is31fl319x_readable_reg(struct device *dev, unsigned int reg)
-{ /* we have no readable registers */
- return false;
-}
-
-static bool is31fl319x_volatile_reg(struct device *dev, unsigned int reg)
-{ /* volatile registers are not cached */
- switch (reg) {
- case IS31FL319X_DATA_UPDATE:
- case IS31FL319X_TIME_UPDATE:
- case IS31FL319X_RESET:
- return true; /* always write-through */
+static inline int is31fl3190_microamp_to_cs(struct device *dev, u32 microamp)
+{
+ switch (microamp) {
+ case 5000:
+ return IS31FL3190_CURRENT_5_mA;
+ case 10000:
+ return IS31FL3190_CURRENT_10_mA;
+ case 17500:
+ return IS31FL3190_CURRENT_17dot5_mA;
+ case 30000:
+ return IS31FL3190_CURRENT_30_mA;
+ case 42000:
+ return IS31FL3190_CURRENT_42_mA;
default:
- return false;
+ dev_warn(dev, "Unsupported current value: %d, using 5000 µA!\n", microamp);
+ return IS31FL3190_CURRENT_5_mA;
}
}
-static const struct reg_default is31fl319x_reg_defaults[] = {
- { IS31FL319X_CONFIG1, 0x00},
- { IS31FL319X_CONFIG2, 0x00},
- { IS31FL319X_PWM(0), 0x00},
- { IS31FL319X_PWM(1), 0x00},
- { IS31FL319X_PWM(2), 0x00},
- { IS31FL319X_PWM(3), 0x00},
- { IS31FL319X_PWM(4), 0x00},
- { IS31FL319X_PWM(5), 0x00},
- { IS31FL319X_PWM(6), 0x00},
- { IS31FL319X_PWM(7), 0x00},
- { IS31FL319X_PWM(8), 0x00},
-};
-
-static struct regmap_config regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = IS31FL319X_REG_CNT,
- .cache_type = REGCACHE_FLAT,
- .readable_reg = is31fl319x_readable_reg,
- .volatile_reg = is31fl319x_volatile_reg,
- .reg_defaults = is31fl319x_reg_defaults,
- .num_reg_defaults = ARRAY_SIZE(is31fl319x_reg_defaults),
-};
-
-static inline int is31fl319x_microamp_to_cs(struct device *dev, u32 microamp)
-{ /* round down to nearest supported value (range check done by caller) */
- u32 step = microamp / IS31FL319X_CURRENT_STEP;
+static inline int is31fl3196_microamp_to_cs(struct device *dev, u32 microamp)
+{
+ /* round down to nearest supported value (range check done by caller) */
+ u32 step = microamp / IS31FL3196_CURRENT_uA_STEP;
- return ((IS31FL319X_CONFIG2_CS_STEP_REF - step) &
- IS31FL319X_CONFIG2_CS_MASK) <<
- IS31FL319X_CONFIG2_CS_SHIFT; /* CS encoding */
+ return ((IS31FL3196_CONFIG2_CS_STEP_REF - step) &
+ IS31FL3196_CONFIG2_CS_MASK) <<
+ IS31FL3196_CONFIG2_CS_SHIFT; /* CS encoding */
}
-static inline int is31fl319x_db_to_gain(u32 dezibel)
-{ /* round down to nearest supported value (range check done by caller) */
- return dezibel / IS31FL319X_AUDIO_GAIN_DB_STEP;
+static inline int is31fl3196_db_to_gain(u32 dezibel)
+{
+ /* round down to nearest supported value (range check done by caller) */
+ return dezibel / IS31FL3196_AUDIO_GAIN_DB_STEP;
}
-static int is31fl319x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int is31fl319x_probe(struct i2c_client *client)
{
struct is31fl319x_chip *is31;
struct device *dev = &client->dev;
int err;
int i = 0;
- u32 aggregated_led_microamp = IS31FL319X_CURRENT_MAX;
+ u32 aggregated_led_microamp;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -EIO;
@@ -349,10 +510,13 @@ static int is31fl319x_probe(struct i2c_client *client,
return -ENOMEM;
mutex_init(&is31->lock);
+ err = devm_add_action(dev, (void (*)(void *))mutex_destroy, &is31->lock);
+ if (err)
+ return err;
- err = is31fl319x_parse_dt(&client->dev, is31);
+ err = is31fl319x_parse_fw(&client->dev, is31);
if (err)
- goto free_mutex;
+ return err;
if (is31->shutdown_gpio) {
gpiod_direction_output(is31->shutdown_gpio, 0);
@@ -361,37 +525,35 @@ static int is31fl319x_probe(struct i2c_client *client,
}
is31->client = client;
- is31->regmap = devm_regmap_init_i2c(client, &regmap_config);
- if (IS_ERR(is31->regmap)) {
- dev_err(&client->dev, "failed to allocate register map\n");
- err = PTR_ERR(is31->regmap);
- goto free_mutex;
- }
+ is31->regmap = devm_regmap_init_i2c(client, is31->cdef->is31fl319x_regmap_config);
+ if (IS_ERR(is31->regmap))
+ return dev_err_probe(dev, PTR_ERR(is31->regmap), "failed to allocate register map\n");
i2c_set_clientdata(client, is31);
/* check for write-reply from chip (we can't read any registers) */
- err = regmap_write(is31->regmap, IS31FL319X_RESET, 0x00);
- if (err < 0) {
- dev_err(&client->dev, "no response from chip write: err = %d\n",
- err);
- err = -EIO; /* does not answer */
- goto free_mutex;
- }
+ err = regmap_write(is31->regmap, is31->cdef->reset_reg, 0x00);
+ if (err < 0)
+ return dev_err_probe(dev, err, "no response from chip write\n");
/*
* Kernel conventions require per-LED led-max-microamp property.
* But the chip does not allow to limit individual LEDs.
* So we take minimum from all subnodes for safety of hardware.
*/
+ aggregated_led_microamp = is31->cdef->current_max;
for (i = 0; i < is31->cdef->num_leds; i++)
if (is31->leds[i].configured &&
is31->leds[i].max_microamp < aggregated_led_microamp)
aggregated_led_microamp = is31->leds[i].max_microamp;
- regmap_write(is31->regmap, IS31FL319X_CONFIG2,
- is31fl319x_microamp_to_cs(dev, aggregated_led_microamp) |
- is31fl319x_db_to_gain(is31->audio_gain_db));
+ if (is31->cdef->is_3196or3199)
+ regmap_write(is31->regmap, IS31FL3196_CONFIG2,
+ is31fl3196_microamp_to_cs(dev, aggregated_led_microamp) |
+ is31fl3196_db_to_gain(is31->audio_gain_db));
+ else
+ regmap_update_bits(is31->regmap, IS31FL3190_CURRENT, IS31FL3190_CURRENT_MASK,
+ is31fl3190_microamp_to_cs(dev, aggregated_led_microamp));
for (i = 0; i < is31->cdef->num_leds; i++) {
struct is31fl319x_led *led = &is31->leds[i];
@@ -400,26 +562,14 @@ static int is31fl319x_probe(struct i2c_client *client,
continue;
led->chip = is31;
- led->cdev.brightness_set_blocking = is31fl319x_brightness_set;
+ led->cdev.brightness_set_blocking = is31->cdef->brightness_set;
err = devm_led_classdev_register(&client->dev, &led->cdev);
if (err < 0)
- goto free_mutex;
+ return err;
}
return 0;
-
-free_mutex:
- mutex_destroy(&is31->lock);
- return err;
-}
-
-static int is31fl319x_remove(struct i2c_client *client)
-{
- struct is31fl319x_chip *is31 = i2c_get_clientdata(client);
-
- mutex_destroy(&is31->lock);
- return 0;
}
/*
@@ -432,6 +582,10 @@ static const struct i2c_device_id is31fl319x_id[] = {
{ "is31fl3193" },
{ "is31fl3196" },
{ "is31fl3199" },
+ { "sn3190" },
+ { "sn3191" },
+ { "sn3193" },
+ { "sn3196" },
{ "sn3199" },
{},
};
@@ -440,10 +594,9 @@ MODULE_DEVICE_TABLE(i2c, is31fl319x_id);
static struct i2c_driver is31fl319x_driver = {
.driver = {
.name = "leds-is31fl319x",
- .of_match_table = of_match_ptr(of_is31fl319x_match),
+ .of_match_table = of_is31fl319x_match,
},
- .probe = is31fl319x_probe,
- .remove = is31fl319x_remove,
+ .probe_new = is31fl319x_probe,
.id_table = is31fl319x_id,
};
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 1adfed1c0619..eac6f4a573b2 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -239,9 +239,6 @@ static int omnia_leds_probe(struct i2c_client *client,
led += ret;
}
- if (devm_device_add_groups(dev, omnia_led_controller_groups))
- dev_warn(dev, "Could not add attribute group!\n");
-
return 0;
}
@@ -283,6 +280,7 @@ static struct i2c_driver omnia_leds_driver = {
.driver = {
.name = "leds-turris-omnia",
.of_match_table = of_omnia_leds_match,
+ .dev_groups = omnia_led_controller_groups,
},
};
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index 45e38708ecb1..da9d2218ae18 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -19,6 +19,7 @@
struct pwm_led {
struct pwm_device *pwm;
struct pwm_state state;
+ bool active_low;
};
struct pwm_mc_led {
@@ -45,6 +46,9 @@ static int led_pwm_mc_set(struct led_classdev *cdev,
duty *= mc_cdev->subled_info[i].brightness;
do_div(duty, cdev->max_brightness);
+ if (priv->leds[i].active_low)
+ duty = priv->leds[i].state.period - duty;
+
priv->leds[i].state.duty_cycle = duty;
priv->leds[i].state.enabled = duty > 0;
ret = pwm_apply_state(priv->leds[i].pwm,
@@ -72,11 +76,11 @@ static int iterate_subleds(struct device *dev, struct pwm_mc_led *priv,
pwmled = &priv->leds[priv->mc_cdev.num_colors];
pwmled->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
if (IS_ERR(pwmled->pwm)) {
- ret = PTR_ERR(pwmled->pwm);
- dev_err(dev, "unable to request PWM: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(pwmled->pwm), "unable to request PWM\n");
goto release_fwnode;
}
pwm_init_state(pwmled->pwm, &pwmled->state);
+ pwmled->active_low = fwnode_property_read_bool(fwnode, "active-low");
ret = fwnode_property_read_u32(fwnode, "color", &color);
if (ret) {
diff --git a/drivers/leds/simple/Kconfig b/drivers/leds/simple/Kconfig
index 9f6a68336659..fd2b8225d926 100644
--- a/drivers/leds/simple/Kconfig
+++ b/drivers/leds/simple/Kconfig
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config LEDS_SIEMENS_SIMATIC_IPC
tristate "LED driver for Siemens Simatic IPCs"
- depends on LEDS_CLASS
+ depends on LEDS_GPIO
depends on SIEMENS_SIMATIC_IPC
help
This option enables support for the LEDs of several Industrial PCs
from Siemens.
- To compile this driver as a module, choose M here: the module
- will be called simatic-ipc-leds.
+ To compile this driver as a module, choose M here: the modules
+ will be called simatic-ipc-leds and simatic-ipc-leds-gpio.
diff --git a/drivers/leds/simple/Makefile b/drivers/leds/simple/Makefile
index 8481f1e9e360..1c7ef5e1324b 100644
--- a/drivers/leds/simple/Makefile
+++ b/drivers/leds/simple/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds.o
+obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds-gpio.o
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.c b/drivers/leds/simple/simatic-ipc-leds-gpio.c
new file mode 100644
index 000000000000..4c9e663a90ba
--- /dev/null
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for GPIO based LEDs
+ *
+ * Copyright (c) Siemens AG, 2022
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 3, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 5, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 56, NULL, 6, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 59, NULL, 7, GPIO_ACTIVE_HIGH),
+ },
+};
+
+static const struct gpio_led simatic_ipc_gpio_leds[] = {
+ { .name = "green:" LED_FUNCTION_STATUS "-3" },
+ { .name = "red:" LED_FUNCTION_STATUS "-1" },
+ { .name = "green:" LED_FUNCTION_STATUS "-1" },
+ { .name = "red:" LED_FUNCTION_STATUS "-2" },
+ { .name = "green:" LED_FUNCTION_STATUS "-2" },
+ { .name = "red:" LED_FUNCTION_STATUS "-3" },
+};
+
+static const struct gpio_led_platform_data simatic_ipc_gpio_leds_pdata = {
+ .num_leds = ARRAY_SIZE(simatic_ipc_gpio_leds),
+ .leds = simatic_ipc_gpio_leds,
+};
+
+static struct platform_device *simatic_leds_pdev;
+
+static int simatic_ipc_leds_gpio_remove(struct platform_device *pdev)
+{
+ gpiod_remove_lookup_table(&simatic_ipc_led_gpio_table);
+ platform_device_unregister(simatic_leds_pdev);
+
+ return 0;
+}
+
+static int simatic_ipc_leds_gpio_probe(struct platform_device *pdev)
+{
+ struct gpio_desc *gpiod;
+ int err;
+
+ gpiod_add_lookup_table(&simatic_ipc_led_gpio_table);
+ simatic_leds_pdev = platform_device_register_resndata(NULL,
+ "leds-gpio", PLATFORM_DEVID_NONE, NULL, 0,
+ &simatic_ipc_gpio_leds_pdata,
+ sizeof(simatic_ipc_gpio_leds_pdata));
+ if (IS_ERR(simatic_leds_pdev)) {
+ err = PTR_ERR(simatic_leds_pdev);
+ goto out;
+ }
+
+ /* PM_BIOS_BOOT_N */
+ gpiod = gpiod_get_index(&simatic_leds_pdev->dev, NULL, 6, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ err = PTR_ERR(gpiod);
+ goto out;
+ }
+ gpiod_put(gpiod);
+
+ /* PM_WDT_OUT */
+ gpiod = gpiod_get_index(&simatic_leds_pdev->dev, NULL, 7, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ err = PTR_ERR(gpiod);
+ goto out;
+ }
+ gpiod_put(gpiod);
+
+ return 0;
+out:
+ simatic_ipc_leds_gpio_remove(pdev);
+
+ return err;
+}
+
+static struct platform_driver simatic_ipc_led_gpio_driver = {
+ .probe = simatic_ipc_leds_gpio_probe,
+ .remove = simatic_ipc_leds_gpio_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ }
+};
+module_platform_driver(simatic_ipc_led_gpio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_SOFTDEP("pre: platform:leds-gpio");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/simple/simatic-ipc-leds.c b/drivers/leds/simple/simatic-ipc-leds.c
index 078d43f5ba38..4894c228c165 100644
--- a/drivers/leds/simple/simatic-ipc-leds.c
+++ b/drivers/leds/simple/simatic-ipc-leds.c
@@ -23,7 +23,7 @@
#define SIMATIC_IPC_LED_PORT_BASE 0x404E
struct simatic_ipc_led {
- unsigned int value; /* mask for io and offset for mem */
+ unsigned int value; /* mask for io */
char *name;
struct led_classdev cdev;
};
@@ -38,21 +38,6 @@ static struct simatic_ipc_led simatic_ipc_leds_io[] = {
{ }
};
-/* the actual start will be discovered with PCI, 0 is a placeholder */
-static struct resource simatic_ipc_led_mem_res = DEFINE_RES_MEM_NAMED(0, SZ_4K, KBUILD_MODNAME);
-
-static void __iomem *simatic_ipc_led_memory;
-
-static struct simatic_ipc_led simatic_ipc_leds_mem[] = {
- {0x500 + 0x1A0, "red:" LED_FUNCTION_STATUS "-1"},
- {0x500 + 0x1A8, "green:" LED_FUNCTION_STATUS "-1"},
- {0x500 + 0x1C8, "red:" LED_FUNCTION_STATUS "-2"},
- {0x500 + 0x1D0, "green:" LED_FUNCTION_STATUS "-2"},
- {0x500 + 0x1E0, "red:" LED_FUNCTION_STATUS "-3"},
- {0x500 + 0x198, "green:" LED_FUNCTION_STATUS "-3"},
- { }
-};
-
static struct resource simatic_ipc_led_io_res =
DEFINE_RES_IO_NAMED(SIMATIC_IPC_LED_PORT_BASE, SZ_2, KBUILD_MODNAME);
@@ -88,28 +73,6 @@ static enum led_brightness simatic_ipc_led_get_io(struct led_classdev *led_cd)
return inw(SIMATIC_IPC_LED_PORT_BASE) & led->value ? LED_OFF : led_cd->max_brightness;
}
-static void simatic_ipc_led_set_mem(struct led_classdev *led_cd,
- enum led_brightness brightness)
-{
- struct simatic_ipc_led *led = cdev_to_led(led_cd);
- void __iomem *reg = simatic_ipc_led_memory + led->value;
- u32 val;
-
- val = readl(reg);
- val = (val & ~1) | (brightness == LED_OFF);
- writel(val, reg);
-}
-
-static enum led_brightness simatic_ipc_led_get_mem(struct led_classdev *led_cd)
-{
- struct simatic_ipc_led *led = cdev_to_led(led_cd);
- void __iomem *reg = simatic_ipc_led_memory + led->value;
- u32 val;
-
- val = readl(reg);
- return (val & 1) ? LED_OFF : led_cd->max_brightness;
-}
-
static int simatic_ipc_leds_probe(struct platform_device *pdev)
{
const struct simatic_ipc_platform *plat = pdev->dev.platform_data;
@@ -117,9 +80,7 @@ static int simatic_ipc_leds_probe(struct platform_device *pdev)
struct simatic_ipc_led *ipcled;
struct led_classdev *cdev;
struct resource *res;
- void __iomem *reg;
- int err, type;
- u32 val;
+ int err;
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_227D:
@@ -134,52 +95,19 @@ static int simatic_ipc_leds_probe(struct platform_device *pdev)
}
ipcled = simatic_ipc_leds_io;
}
- type = IORESOURCE_IO;
if (!devm_request_region(dev, res->start, resource_size(res), KBUILD_MODNAME)) {
dev_err(dev, "Unable to register IO resource at %pR\n", res);
return -EBUSY;
}
break;
- case SIMATIC_IPC_DEVICE_127E:
- res = &simatic_ipc_led_mem_res;
- ipcled = simatic_ipc_leds_mem;
- type = IORESOURCE_MEM;
-
- /* get GPIO base from PCI */
- res->start = simatic_ipc_get_membase0(PCI_DEVFN(13, 0));
- if (res->start == 0)
- return -ENODEV;
-
- /* do the final address calculation */
- res->start = res->start + (0xC5 << 16);
- res->end += res->start;
-
- simatic_ipc_led_memory = devm_ioremap_resource(dev, res);
- if (IS_ERR(simatic_ipc_led_memory))
- return PTR_ERR(simatic_ipc_led_memory);
-
- /* initialize power/watchdog LED */
- reg = simatic_ipc_led_memory + 0x500 + 0x1D8; /* PM_WDT_OUT */
- val = readl(reg);
- writel(val & ~1, reg);
-
- reg = simatic_ipc_led_memory + 0x500 + 0x1C0; /* PM_BIOS_BOOT_N */
- val = readl(reg);
- writel(val | 1, reg);
- break;
default:
return -ENODEV;
}
while (ipcled->value) {
cdev = &ipcled->cdev;
- if (type == IORESOURCE_MEM) {
- cdev->brightness_set = simatic_ipc_led_set_mem;
- cdev->brightness_get = simatic_ipc_led_get_mem;
- } else {
- cdev->brightness_set = simatic_ipc_led_set_io;
- cdev->brightness_get = simatic_ipc_led_get_io;
- }
+ cdev->brightness_set = simatic_ipc_led_set_io;
+ cdev->brightness_get = simatic_ipc_led_get_io;
cdev->max_brightness = LED_ON;
cdev->name = ipcled->name;
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 439fab4eaa85..1bbb9ca08d40 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -647,7 +647,7 @@ do_adb_query(struct adb_request *req)
switch(req->data[1]) {
case ADB_QUERY_GETDEVINFO:
- if (req->nbytes < 3)
+ if (req->nbytes < 3 || req->data[2] >= 16)
break;
mutex_lock(&adb_handler_mutex);
req->reply[0] = adb_handler[req->data[2]].original_address;
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index b10239d6ef93..02922073c9ef 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -19,13 +19,15 @@
#include <linux/suspend.h>
#include <linux/slab.h>
-#define IMX_MU_CHANS 16
+#define IMX_MU_CHANS 17
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
/* TX0/RX0 */
#define IMX_MU_S4_CHANS 2
#define IMX_MU_CHAN_NAME_SIZE 20
+#define IMX_MU_NUM_RR 4
+
#define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
#define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
@@ -35,9 +37,11 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_RX = 1, /* Rx */
IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
+ IMX_MU_TYPE_RST = 4, /* Reset */
};
enum imx_mu_xcr {
+ IMX_MU_CR,
IMX_MU_GIER,
IMX_MU_GCR,
IMX_MU_TCR,
@@ -50,6 +54,7 @@ enum imx_mu_xsr {
IMX_MU_GSR,
IMX_MU_TSR,
IMX_MU_RSR,
+ IMX_MU_xSR_MAX,
};
struct imx_sc_rpc_msg_max {
@@ -85,7 +90,7 @@ struct imx_mu_priv {
int irq[IMX_MU_CHANS];
bool suspend;
- u32 xcr[4];
+ u32 xcr[IMX_MU_xCR_MAX];
bool side_b;
};
@@ -105,8 +110,8 @@ struct imx_mu_dcfg {
enum imx_mu_type type;
u32 xTR; /* Transmit Register0 */
u32 xRR; /* Receive Register0 */
- u32 xSR[4]; /* Status Registers */
- u32 xCR[4]; /* Control Registers */
+ u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
+ u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
};
#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
@@ -121,6 +126,9 @@ struct imx_mu_dcfg {
#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Request */
#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
+/* MU reset */
+#define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5))
+#define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7))
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
@@ -497,6 +505,8 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
(ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
+ case IMX_MU_TYPE_RST:
+ return IRQ_NONE;
default:
dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
cp->type);
@@ -581,6 +591,8 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
+ int ret;
+ u32 sr;
if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
@@ -598,6 +610,13 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
case IMX_MU_TYPE_RXDB:
imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
+ case IMX_MU_TYPE_RST:
+ imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0);
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr,
+ !(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5);
+ if (ret)
+ dev_warn(priv->dev, "RST channel timeout\n");
+ break;
default:
break;
}
@@ -694,6 +713,7 @@ static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
static void imx_mu_init_generic(struct imx_mu_priv *priv)
{
unsigned int i;
+ unsigned int val;
for (i = 0; i < IMX_MU_CHANS; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
@@ -715,6 +735,14 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
+
+ /* Clear any pending GIP */
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
+ imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
+
+ /* Clear any pending RSR */
+ for (i = 0; i < IMX_MU_NUM_RR; i++)
+ imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
}
static void imx_mu_init_specific(struct imx_mu_priv *priv)
@@ -865,7 +893,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
- .xCR = {0x24, 0x24, 0x24, 0x24},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
@@ -888,7 +916,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
- .xCR = {0x110, 0x114, 0x120, 0x128},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 2578e5aaa935..9465f9081515 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -192,15 +192,10 @@ static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
{
- struct cmdq_task_cb *cb = &task->pkt->async_cb;
struct cmdq_cb_data data;
data.sta = sta;
- data.data = cb->data;
data.pkt = task->pkt;
- if (cb->cb)
- cb->cb(data);
-
mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
@@ -448,7 +443,6 @@ done:
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
{
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
- struct cmdq_task_cb *cb;
struct cmdq_cb_data data;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task, *tmp;
@@ -465,13 +459,8 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
- cb = &task->pkt->async_cb;
data.sta = -ECONNABORTED;
- data.data = cb->data;
data.pkt = task->pkt;
- if (cb->cb)
- cb->cb(data);
-
mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
kfree(task);
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index cf3e8096942a..529c9d04e9a4 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -29,7 +29,7 @@ config BCACHE_CLOSURES_DEBUG
operations that get stuck.
config BCACHE_ASYNC_REGISTRATION
- bool "Asynchronous device registration (EXPERIMENTAL)"
+ bool "Asynchronous device registration"
depends on BCACHE
help
Add a sysfs file /sys/fs/bcache/register_async. Writing registering
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index e136d6edc1ed..147c493a989a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -812,7 +812,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
- if (register_shrinker(&c->shrink))
+ if (register_shrinker(&c->shrink, "md-bcache:%pU", c->set_uuid))
pr_warn("bcache: %s: could not register shrinker\n",
__func__);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index dc01ce33265b..09c7ed2650ca 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/stacktrace.h>
+#include <linux/jump_label.h>
#define DM_MSG_PREFIX "bufio"
@@ -81,6 +82,8 @@
*/
struct dm_bufio_client {
struct mutex lock;
+ spinlock_t spinlock;
+ bool no_sleep;
struct list_head lru[LIST_SIZE];
unsigned long n_buffers[LIST_SIZE];
@@ -90,7 +93,6 @@ struct dm_bufio_client {
s8 sectors_per_block_bits;
void (*alloc_callback)(struct dm_buffer *);
void (*write_callback)(struct dm_buffer *);
-
struct kmem_cache *slab_buffer;
struct kmem_cache *slab_cache;
struct dm_io_client *dm_io;
@@ -161,23 +163,34 @@ struct dm_buffer {
#endif
};
+static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
+
/*----------------------------------------------------------------*/
#define dm_bufio_in_request() (!!current->bio_list)
static void dm_bufio_lock(struct dm_bufio_client *c)
{
- mutex_lock_nested(&c->lock, dm_bufio_in_request());
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ spin_lock_bh(&c->spinlock);
+ else
+ mutex_lock_nested(&c->lock, dm_bufio_in_request());
}
static int dm_bufio_trylock(struct dm_bufio_client *c)
{
- return mutex_trylock(&c->lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return spin_trylock_bh(&c->spinlock);
+ else
+ return mutex_trylock(&c->lock);
}
static void dm_bufio_unlock(struct dm_bufio_client *c)
{
- mutex_unlock(&c->lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ spin_unlock_bh(&c->spinlock);
+ else
+ mutex_unlock(&c->lock);
}
/*----------------------------------------------------------------*/
@@ -802,6 +815,10 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
BUG_ON(test_bit(B_WRITING, &b->state));
BUG_ON(test_bit(B_DIRTY, &b->state));
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
+ unlikely(test_bit(B_READING, &b->state)))
+ continue;
+
if (!b->hold_count) {
__make_buffer_clean(b);
__unlink_buffer(b);
@@ -810,6 +827,9 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
cond_resched();
}
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return NULL;
+
list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
BUG_ON(test_bit(B_READING, &b->state));
@@ -1617,7 +1637,8 @@ static void drop_buffers(struct dm_bufio_client *c)
*/
static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
{
- if (!(gfp & __GFP_FS)) {
+ if (!(gfp & __GFP_FS) ||
+ (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
@@ -1715,7 +1736,8 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrin
struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
unsigned reserved_buffers, unsigned aux_size,
void (*alloc_callback)(struct dm_buffer *),
- void (*write_callback)(struct dm_buffer *))
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags)
{
int r;
struct dm_bufio_client *c;
@@ -1745,12 +1767,18 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->alloc_callback = alloc_callback;
c->write_callback = write_callback;
+ if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
+ c->no_sleep = true;
+ static_branch_inc(&no_sleep_enabled);
+ }
+
for (i = 0; i < LIST_SIZE; i++) {
INIT_LIST_HEAD(&c->lru[i]);
c->n_buffers[i] = 0;
}
mutex_init(&c->lock);
+ spin_lock_init(&c->spinlock);
INIT_LIST_HEAD(&c->reserved_buffers);
c->need_reserved_buffers = reserved_buffers;
@@ -1804,7 +1832,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
- r = register_shrinker(&c->shrinker);
+ r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
+ MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
if (r)
goto bad;
@@ -1876,6 +1905,8 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
kmem_cache_destroy(c->slab_buffer);
dm_io_client_destroy(c->dm_io);
mutex_destroy(&c->lock);
+ if (c->no_sleep)
+ static_branch_dec(&no_sleep_enabled);
kfree(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 223e8e1a7a13..512cc6cea095 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -313,7 +313,8 @@ static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1, 0, NULL, NULL);
+ ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1,
+ 0, NULL, NULL, 0);
if (IS_ERR(ec->bufio)) {
ti->error = "Cannot create dm bufio client";
r = PTR_ERR(ec->bufio);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c60f9b2ece2d..aaf2472df6e5 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4439,7 +4439,7 @@ try_smaller_buffer:
}
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
- 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
+ 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
if (IS_ERR(ic->bufio)) {
r = PTR_ERR(ic->bufio);
ti->error = "Cannot initialize dm-bufio";
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 1ec17c32867f..c640be453313 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3728,6 +3728,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
}
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index f46f930eedf9..680cc05ec654 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -493,7 +493,7 @@ static int read_exceptions(struct pstore *ps,
client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
ps->store->chunk_size << SECTOR_SHIFT,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(client))
return PTR_ERR(client);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index cea2b3789736..23cffce56403 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -749,7 +749,7 @@ int verity_fec_ctr(struct dm_verity *v)
f->bufio = dm_bufio_client_create(f->dev->bdev,
f->io_size,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(f->bufio)) {
ti->error = "Cannot initialize FEC bufio client";
return PTR_ERR(f->bufio);
@@ -765,7 +765,7 @@ int verity_fec_ctr(struct dm_verity *v)
f->data_bufio = dm_bufio_client_create(v->data_dev->bdev,
1 << v->data_dev_block_bits,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(f->data_bufio)) {
ti->error = "Cannot initialize FEC data bufio client";
return PTR_ERR(f->data_bufio);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 4fd853a56b1a..94b6cb599db4 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -20,6 +20,7 @@
#include <linux/reboot.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/jump_label.h>
#define DM_MSG_PREFIX "verity"
@@ -35,14 +36,17 @@
#define DM_VERITY_OPT_PANIC "panic_on_corruption"
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
+#define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet"
-#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \
+#define DM_VERITY_OPTS_MAX (4 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
+
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
@@ -221,7 +225,7 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
struct mapped_device *md = dm_table_get_md(v->ti->table);
/* Corruption should be visible in device status in all modes */
- v->hash_failed = 1;
+ v->hash_failed = true;
if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
goto out;
@@ -287,7 +291,19 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
verity_hash_at_level(v, block, level, &hash_block, &offset);
- data = dm_bufio_read(v->bufio, hash_block, &buf);
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ data = dm_bufio_get(v->bufio, hash_block, &buf);
+ if (data == NULL) {
+ /*
+ * In tasklet and the hash was not in the bufio cache.
+ * Return early and resume execution from a work-queue
+ * to read the hash from disk.
+ */
+ return -EAGAIN;
+ }
+ } else
+ data = dm_bufio_read(v->bufio, hash_block, &buf);
+
if (IS_ERR(data))
return PTR_ERR(data);
@@ -308,6 +324,15 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
v->digest_size) == 0))
aux->hash_verified = 1;
+ else if (static_branch_unlikely(&use_tasklet_enabled) &&
+ io->in_tasklet) {
+ /*
+ * Error handling code (FEC included) cannot be run in a
+ * tasklet since it may sleep, so fallback to work-queue.
+ */
+ r = -EAGAIN;
+ goto release_ret_r;
+ }
else if (verity_fec_decode(v, io,
DM_VERITY_BLOCK_TYPE_METADATA,
hash_block, data, NULL) == 0)
@@ -474,10 +499,24 @@ static int verity_verify_io(struct dm_verity_io *io)
{
bool is_zero;
struct dm_verity *v = io->v;
+#if defined(CONFIG_DM_VERITY_FEC)
struct bvec_iter start;
- unsigned b;
+#endif
+ struct bvec_iter iter_copy;
+ struct bvec_iter *iter;
struct crypto_wait wait;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
+ unsigned int b;
+
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ /*
+ * Copy the iterator in case we need to restart
+ * verification in a work-queue.
+ */
+ iter_copy = io->iter;
+ iter = &iter_copy;
+ } else
+ iter = &io->iter;
for (b = 0; b < io->n_blocks; b++) {
int r;
@@ -486,7 +525,7 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks &&
likely(test_bit(cur_block, v->validated_blocks))) {
- verity_bv_skip_block(v, io, &io->iter);
+ verity_bv_skip_block(v, io, iter);
continue;
}
@@ -501,7 +540,7 @@ static int verity_verify_io(struct dm_verity_io *io)
* If we expect a zero block, don't validate, just
* return zeros.
*/
- r = verity_for_bv_block(v, io, &io->iter,
+ r = verity_for_bv_block(v, io, iter,
verity_bv_zero);
if (unlikely(r < 0))
return r;
@@ -513,8 +552,11 @@ static int verity_verify_io(struct dm_verity_io *io)
if (unlikely(r < 0))
return r;
- start = io->iter;
- r = verity_for_io_block(v, io, &io->iter, &wait);
+#if defined(CONFIG_DM_VERITY_FEC)
+ if (verity_fec_is_enabled(v))
+ start = *iter;
+#endif
+ r = verity_for_io_block(v, io, iter, &wait);
if (unlikely(r < 0))
return r;
@@ -528,9 +570,18 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
continue;
+ } else if (static_branch_unlikely(&use_tasklet_enabled) &&
+ io->in_tasklet) {
+ /*
+ * Error handling code (FEC included) cannot be run in a
+ * tasklet since it may sleep, so fallback to work-queue.
+ */
+ return -EAGAIN;
+#if defined(CONFIG_DM_VERITY_FEC)
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
- cur_block, NULL, &start) == 0) {
+ cur_block, NULL, &start) == 0) {
continue;
+#endif
} else {
if (bio->bi_status) {
/*
@@ -567,7 +618,8 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_status = status;
- verity_fec_finish_io(io);
+ if (!static_branch_unlikely(&use_tasklet_enabled) || !io->in_tasklet)
+ verity_fec_finish_io(io);
bio_endio(bio);
}
@@ -576,9 +628,29 @@ static void verity_work(struct work_struct *w)
{
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
+ io->in_tasklet = false;
+
+ verity_fec_init_io(io);
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
+static void verity_tasklet(unsigned long data)
+{
+ struct dm_verity_io *io = (struct dm_verity_io *)data;
+ int err;
+
+ io->in_tasklet = true;
+ err = verity_verify_io(io);
+ if (err == -EAGAIN) {
+ /* fallback to retrying with work-queue */
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ return;
+ }
+
+ verity_finish_io(io, errno_to_blk_status(err));
+}
+
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
@@ -589,8 +661,13 @@ static void verity_end_io(struct bio *bio)
return;
}
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
+ tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
+ tasklet_schedule(&io->tasklet);
+ } else {
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ }
}
/*
@@ -701,8 +778,6 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio->bi_private = io;
io->iter = bio->bi_iter;
- verity_fec_init_io(io);
-
verity_submit_prefetch(v, io);
submit_bio_noacct(bio);
@@ -752,6 +827,8 @@ static void verity_status(struct dm_target *ti, status_type_t type,
args++;
if (v->validated_blocks)
args++;
+ if (v->use_tasklet)
+ args++;
if (v->signature_key_desc)
args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS;
if (!args)
@@ -777,6 +854,8 @@ static void verity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
if (v->validated_blocks)
DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
+ if (v->use_tasklet)
+ DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY);
sz = verity_fec_status_table(v, sz, result, maxlen);
if (v->signature_key_desc)
DMEMIT(" " DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY
@@ -890,6 +969,9 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->signature_key_desc);
+ if (v->use_tasklet)
+ static_branch_dec(&use_tasklet_enabled);
+
kfree(v);
}
@@ -968,9 +1050,10 @@ static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
}
static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
- struct dm_verity_sig_opts *verify_args)
+ struct dm_verity_sig_opts *verify_args,
+ bool only_modifier_opts)
{
- int r;
+ int r = 0;
unsigned argc;
struct dm_target *ti = v->ti;
const char *arg_name;
@@ -991,6 +1074,8 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
argc--;
if (verity_is_verity_mode(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_parse_verity_mode(v, arg_name);
if (r) {
ti->error = "Conflicting error handling parameters";
@@ -999,6 +1084,8 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
+ if (only_modifier_opts)
+ continue;
r = verity_alloc_zero_digest(v);
if (r) {
ti->error = "Cannot allocate zero digest";
@@ -1007,17 +1094,29 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
+ if (only_modifier_opts)
+ continue;
r = verity_alloc_most_once(v);
if (r)
return r;
continue;
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_TASKLET_VERIFY)) {
+ v->use_tasklet = true;
+ static_branch_inc(&use_tasklet_enabled);
+ continue;
+
} else if (verity_is_fec_opt_arg(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
if (r)
return r;
continue;
+
} else if (verity_verify_is_sig_opt_arg(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_verify_sig_parse_opt_args(as, v,
verify_args,
&argc, arg_name);
@@ -1025,8 +1124,17 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
return r;
continue;
+ } else if (only_modifier_opts) {
+ /*
+ * Ignore unrecognized opt, could easily be an extra
+ * argument to an option whose parsing was skipped.
+ * Normal parsing (@only_modifier_opts=false) will
+ * properly parse all options (and their extra args).
+ */
+ continue;
}
+ DMERR("Unrecognized verity feature request: %s", arg_name);
ti->error = "Unrecognized verity feature request";
return -EINVAL;
} while (argc && !r);
@@ -1054,6 +1162,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct dm_verity_sig_opts verify_args = {0};
struct dm_arg_set as;
unsigned int num;
+ unsigned int wq_flags;
unsigned long long num_ll;
int r;
int i;
@@ -1085,6 +1194,15 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ /* Parse optional parameters that modify primary args */
+ if (argc > 10) {
+ as.argc = argc - 10;
+ as.argv = argv + 10;
+ r = verity_parse_opt_args(&as, v, &verify_args, true);
+ if (r < 0)
+ goto bad;
+ }
+
if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
num > 1) {
ti->error = "Invalid version";
@@ -1156,7 +1274,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- v->tfm = crypto_alloc_ahash(v->alg_name, 0, 0);
+ v->tfm = crypto_alloc_ahash(v->alg_name, 0,
+ v->use_tasklet ? CRYPTO_ALG_ASYNC : 0);
if (IS_ERR(v->tfm)) {
ti->error = "Cannot initialize hash function";
r = PTR_ERR(v->tfm);
@@ -1218,8 +1337,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (argc) {
as.argc = argc;
as.argv = argv;
-
- r = verity_parse_opt_args(&as, v, &verify_args);
+ r = verity_parse_opt_args(&as, v, &verify_args, false);
if (r < 0)
goto bad;
}
@@ -1266,7 +1384,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
- dm_bufio_alloc_callback, NULL);
+ dm_bufio_alloc_callback, NULL,
+ v->use_tasklet ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
if (IS_ERR(v->bufio)) {
ti->error = "Cannot initialize dm-bufio";
r = PTR_ERR(v->bufio);
@@ -1281,7 +1400,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
- v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
+ wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
+ if (v->use_tasklet) {
+ /*
+ * Allow verify_wq to preempt softirq since verification in
+ * tasklet will fall-back to using it for error handling
+ * (or if the bufio cache doesn't have required hashes).
+ */
+ wq_flags |= WQ_HIGHPRI;
+ }
+ v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;
@@ -1343,7 +1471,7 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
- .version = {1, 8, 1},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index c832cc3e3d24..45455de1b4bc 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -13,6 +13,7 @@
#include <linux/dm-bufio.h>
#include <linux/device-mapper.h>
+#include <linux/interrupt.h>
#include <crypto/hash.h>
#define DM_VERITY_MAX_LEVELS 63
@@ -51,9 +52,10 @@ struct dm_verity {
unsigned char hash_per_block_bits; /* log2(hashes in hash block) */
unsigned char levels; /* the number of tree levels */
unsigned char version;
+ bool hash_failed:1; /* set if hash of any block failed */
+ bool use_tasklet:1; /* try to verify in tasklet before work-queue */
unsigned digest_size; /* digest size for the current hash algorithm */
unsigned int ahash_reqsize;/* the size of temporary space for crypto */
- int hash_failed; /* set to 1 if hash of any block failed */
enum verity_mode mode; /* mode for handling verification errors */
unsigned corrupted_errs;/* Number of errors for corrupted blocks */
@@ -76,10 +78,12 @@ struct dm_verity_io {
sector_t block;
unsigned n_blocks;
+ bool in_tasklet;
struct bvec_iter iter;
struct work_struct work;
+ struct tasklet_struct tasklet;
/*
* Three variably-size fields follow this struct:
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 1fc161d65673..96a003eb7323 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1594,7 +1594,8 @@ done:
default:
BUG();
- return -1;
+ wc_unlock(wc);
+ return DM_MAPIO_KILL;
}
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 34db364c23a8..0278482fac94 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -2945,7 +2945,9 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
/* Metadata cache shrinker */
- ret = register_shrinker(&zmd->mblk_shrinker);
+ ret = register_shrinker(&zmd->mblk_shrinker, "md-meta:(%u:%u)",
+ MAJOR(dev->bdev->bd_dev),
+ MINOR(dev->bdev->bd_dev));
if (ret) {
dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
goto err;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 99642f69bfa7..60549b65c799 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -752,7 +752,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off);
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
return 0;
}
@@ -1016,7 +1016,7 @@ static void dm_wq_requeue_work(struct work_struct *work)
while (io) {
struct dm_io *next = io->next;
- dm_io_rewind(io, &md->queue->bio_split);
+ dm_io_rewind(io, &md->disk->bio_split);
io->next = NULL;
__dm_io_complete(io, false);
@@ -1181,7 +1181,7 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
* Does the target need to split IO even further?
* - varied (per target) IO splitting is a tenet of DM; this
* explains why stacked chunk_sectors based splitting via
- * blk_queue_split() isn't possible here.
+ * bio_split_to_limits() isn't possible here.
*/
if (!ti->max_io_len)
return len;
@@ -1751,10 +1751,10 @@ static void dm_split_and_process_bio(struct mapped_device *md,
is_abnormal = is_abnormal_io(bio);
if (unlikely(is_abnormal)) {
/*
- * Use blk_queue_split() for abnormal IO (e.g. discard, etc)
+ * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
* otherwise associated queue_limits won't be imposed.
*/
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
}
init_clone_info(&ci, md, map, bio, is_abnormal);
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index 2cf973722f59..91836e6de326 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -125,7 +125,6 @@ static void __init md_setup_drive(struct md_setup_args *args)
char *devname = args->device_names;
dev_t devices[MD_SB_DISKS + 1], mdev;
struct mdu_array_info_s ainfo = { };
- struct block_device *bdev;
struct mddev *mddev;
int err = 0, i;
char name[16];
@@ -169,24 +168,16 @@ static void __init md_setup_drive(struct md_setup_args *args)
pr_info("md: Loading %s: %s\n", name, args->device_names);
- bdev = blkdev_get_by_dev(mdev, FMODE_READ, NULL);
- if (IS_ERR(bdev)) {
- pr_err("md: open failed - cannot start array %s\n", name);
+ mddev = md_alloc(mdev, name);
+ if (IS_ERR(mddev)) {
+ pr_err("md: md_alloc failed - cannot start array %s\n", name);
return;
}
- err = -EIO;
- if (WARN(bdev->bd_disk->fops != &md_fops,
- "Opening block device %x resulted in non-md device\n",
- mdev))
- goto out_blkdev_put;
-
- mddev = bdev->bd_disk->private_data;
-
err = mddev_lock(mddev);
if (err) {
pr_err("md: failed to lock array %s\n", name);
- goto out_blkdev_put;
+ goto out_mddev_put;
}
if (!list_empty(&mddev->disks) || mddev->raid_disks) {
@@ -230,8 +221,8 @@ static void __init md_setup_drive(struct md_setup_args *args)
pr_warn("md: starting %s failed\n", name);
out_unlock:
mddev_unlock(mddev);
-out_blkdev_put:
- blkdev_put(bdev, FMODE_READ);
+out_mddev_put:
+ mddev_put(mddev);
}
static int __init raid_setup(char *str)
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 37cbcce3cc66..742b2349fea3 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -40,7 +40,7 @@ struct resync_info {
/* Lock the send communication. This is done through
* bit manipulation as opposed to a mutex in order to
- * accomodate lock and hold. See next comment.
+ * accommodate lock and hold. See next comment.
*/
#define MD_CLUSTER_SEND_LOCK 4
/* If cluster operations (such as adding a disk) must lock the
@@ -689,7 +689,7 @@ static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
/*
* If resync thread run after raid1d thread, then process_metadata_update
* could not continue if raid1d held reconfig_mutex (and raid1d is blocked
- * since another node already got EX on Token and waitting the EX of Ack),
+ * since another node already got EX on Token and waiting the EX of Ack),
* so let resync wake up thread in case flag is set.
*/
if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4df78e30b76a..729be2c5296c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -368,28 +368,6 @@ EXPORT_SYMBOL_GPL(md_new_event);
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
-/*
- * iterates through all used mddevs in the system.
- * We take care to grab the all_mddevs_lock whenever navigating
- * the list, and to always hold a refcount when unlocked.
- * Any code which breaks out of this loop while own
- * a reference to the current mddev and must mddev_put it.
- */
-#define for_each_mddev(_mddev,_tmp) \
- \
- for (({ spin_lock(&all_mddevs_lock); \
- _tmp = all_mddevs.next; \
- _mddev = NULL;}); \
- ({ if (_tmp != &all_mddevs) \
- mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
- spin_unlock(&all_mddevs_lock); \
- if (_mddev) mddev_put(_mddev); \
- _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
- _tmp != &all_mddevs;}); \
- ({ spin_lock(&all_mddevs_lock); \
- _tmp = _tmp->next;}) \
- )
-
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
@@ -464,7 +442,7 @@ static void md_submit_bio(struct bio *bio)
return;
}
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
if (bio_sectors(bio) != 0)
@@ -647,13 +625,17 @@ EXPORT_SYMBOL(md_flush_request);
static inline struct mddev *mddev_get(struct mddev *mddev)
{
+ lockdep_assert_held(&all_mddevs_lock);
+
+ if (test_bit(MD_DELETED, &mddev->flags))
+ return NULL;
atomic_inc(&mddev->active);
return mddev;
}
static void mddev_delayed_delete(struct work_struct *ws);
-static void mddev_put(struct mddev *mddev)
+void mddev_put(struct mddev *mddev)
{
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
@@ -661,7 +643,7 @@ static void mddev_put(struct mddev *mddev)
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
- list_del_init(&mddev->all_mddevs);
+ set_bit(MD_DELETED, &mddev->flags);
/*
* Call queue_work inside the spinlock so that
@@ -678,7 +660,6 @@ static void md_safemode_timeout(struct timer_list *t);
void mddev_init(struct mddev *mddev)
{
- kobject_init(&mddev->kobj, &md_ktype);
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->bitmap_info.mutex);
@@ -733,22 +714,6 @@ static dev_t mddev_alloc_unit(void)
return dev;
}
-static struct mddev *mddev_find(dev_t unit)
-{
- struct mddev *mddev;
-
- if (MAJOR(unit) != MD_MAJOR)
- unit &= ~((1 << MdpMinorShift) - 1);
-
- spin_lock(&all_mddevs_lock);
- mddev = mddev_find_locked(unit);
- if (mddev)
- mddev_get(mddev);
- spin_unlock(&all_mddevs_lock);
-
- return mddev;
-}
-
static struct mddev *mddev_alloc(dev_t unit)
{
struct mddev *new;
@@ -791,6 +756,15 @@ out_free_new:
return ERR_PTR(error);
}
+static void mddev_free(struct mddev *mddev)
+{
+ spin_lock(&all_mddevs_lock);
+ list_del(&mddev->all_mddevs);
+ spin_unlock(&all_mddevs_lock);
+
+ kfree(mddev);
+}
+
static const struct attribute_group md_redundancy_group;
void mddev_unlock(struct mddev *mddev)
@@ -3335,14 +3309,35 @@ rdev_size_show(struct md_rdev *rdev, char *page)
return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
}
-static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
+static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b)
{
/* check if two start/length pairs overlap */
- if (s1+l1 <= s2)
- return 0;
- if (s2+l2 <= s1)
- return 0;
- return 1;
+ if (a->data_offset + a->sectors <= b->data_offset)
+ return false;
+ if (b->data_offset + b->sectors <= a->data_offset)
+ return false;
+ return true;
+}
+
+static bool md_rdev_overlaps(struct md_rdev *rdev)
+{
+ struct mddev *mddev;
+ struct md_rdev *rdev2;
+
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
+ if (test_bit(MD_DELETED, &mddev->flags))
+ continue;
+ rdev_for_each(rdev2, mddev) {
+ if (rdev != rdev2 && rdev->bdev == rdev2->bdev &&
+ md_rdevs_overlap(rdev, rdev2)) {
+ spin_unlock(&all_mddevs_lock);
+ return true;
+ }
+ }
+ }
+ spin_unlock(&all_mddevs_lock);
+ return false;
}
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
@@ -3394,46 +3389,21 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
return -EINVAL; /* component must fit device */
rdev->sectors = sectors;
- if (sectors > oldsectors && my_mddev->external) {
- /* Need to check that all other rdevs with the same
- * ->bdev do not overlap. 'rcu' is sufficient to walk
- * the rdev lists safely.
- * This check does not provide a hard guarantee, it
- * just helps avoid dangerous mistakes.
- */
- struct mddev *mddev;
- int overlap = 0;
- struct list_head *tmp;
-
- rcu_read_lock();
- for_each_mddev(mddev, tmp) {
- struct md_rdev *rdev2;
- rdev_for_each(rdev2, mddev)
- if (rdev->bdev == rdev2->bdev &&
- rdev != rdev2 &&
- overlaps(rdev->data_offset, rdev->sectors,
- rdev2->data_offset,
- rdev2->sectors)) {
- overlap = 1;
- break;
- }
- if (overlap) {
- mddev_put(mddev);
- break;
- }
- }
- rcu_read_unlock();
- if (overlap) {
- /* Someone else could have slipped in a size
- * change here, but doing so is just silly.
- * We put oldsectors back because we *know* it is
- * safe, and trust userspace not to race with
- * itself
- */
- rdev->sectors = oldsectors;
- return -EBUSY;
- }
+ /*
+ * Check that all other rdevs with the same bdev do not overlap. This
+ * check does not provide a hard guarantee, it just helps avoid
+ * dangerous mistakes.
+ */
+ if (sectors > oldsectors && my_mddev->external &&
+ md_rdev_overlaps(rdev)) {
+ /*
+ * Someone else could have slipped in a size change here, but
+ * doing so is just silly. We put oldsectors back because we
+ * know it is safe, and trust userspace not to race with itself.
+ */
+ rdev->sectors = oldsectors;
+ return -EBUSY;
}
return len;
}
@@ -4830,6 +4800,19 @@ action_store(struct mddev *mddev, const char *page, size_t len)
if (work_pending(&mddev->del_work))
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
+ sector_t save_rp = mddev->reshape_position;
+
+ mddev_unlock(mddev);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
+ mddev_lock_nointr(mddev);
+ /*
+ * set RECOVERY_INTR again and restore reshape
+ * position in case others changed them after
+ * got lock, eg, reshape_position_store and
+ * md_check_recovery.
+ */
+ mddev->reshape_position = save_rp;
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
@@ -5001,7 +4984,7 @@ static ssize_t
sync_speed_show(struct mddev *mddev, char *page)
{
unsigned long resync, dt, db;
- if (mddev->curr_resync == 0)
+ if (mddev->curr_resync == MD_RESYNC_NONE)
return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
dt = (jiffies - mddev->resync_mark) / HZ;
@@ -5020,8 +5003,8 @@ sync_completed_show(struct mddev *mddev, char *page)
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
- if (mddev->curr_resync == 1 ||
- mddev->curr_resync == 2)
+ if (mddev->curr_resync == MD_RESYNC_YIELDED ||
+ mddev->curr_resync == MD_RESYNC_DELAYED)
return sprintf(page, "delayed\n");
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -5532,11 +5515,10 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
spin_lock(&all_mddevs_lock);
- if (list_empty(&mddev->all_mddevs)) {
+ if (!mddev_get(mddev)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
- mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = entry->show(mddev, page);
@@ -5557,18 +5539,17 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock(&all_mddevs_lock);
- if (list_empty(&mddev->all_mddevs)) {
+ if (!mddev_get(mddev)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
- mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = entry->store(mddev, page, length);
mddev_put(mddev);
return rv;
}
-static void md_free(struct kobject *ko)
+static void md_kobj_release(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
@@ -5577,15 +5558,8 @@ static void md_free(struct kobject *ko)
if (mddev->sysfs_level)
sysfs_put(mddev->sysfs_level);
- if (mddev->gendisk) {
- del_gendisk(mddev->gendisk);
- put_disk(mddev->gendisk);
- }
- percpu_ref_exit(&mddev->writes_pending);
-
- bioset_exit(&mddev->bio_set);
- bioset_exit(&mddev->sync_set);
- kfree(mddev);
+ del_gendisk(mddev->gendisk);
+ put_disk(mddev->gendisk);
}
static const struct sysfs_ops md_sysfs_ops = {
@@ -5593,7 +5567,7 @@ static const struct sysfs_ops md_sysfs_ops = {
.store = md_attr_store,
};
static struct kobj_type md_ktype = {
- .release = md_free,
+ .release = md_kobj_release,
.sysfs_ops = &md_sysfs_ops,
.default_groups = md_attr_groups,
};
@@ -5604,7 +5578,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
- kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
@@ -5623,7 +5596,7 @@ int mddev_init_writes_pending(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
-static int md_alloc(dev_t dev, char *name)
+struct mddev *md_alloc(dev_t dev, char *name)
{
/*
* If dev is zero, name is the name of a device to allocate with
@@ -5647,12 +5620,13 @@ static int md_alloc(dev_t dev, char *name)
* removed (mddev_delayed_delete).
*/
flush_workqueue(md_misc_wq);
+ flush_workqueue(md_rdev_misc_wq);
mutex_lock(&disks_mutex);
mddev = mddev_alloc(dev);
if (IS_ERR(mddev)) {
- mutex_unlock(&disks_mutex);
- return PTR_ERR(mddev);
+ error = PTR_ERR(mddev);
+ goto out_unlock;
}
partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
@@ -5670,7 +5644,7 @@ static int md_alloc(dev_t dev, char *name)
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
error = -EEXIST;
- goto out_unlock_disks_mutex;
+ goto out_free_mddev;
}
spin_unlock(&all_mddevs_lock);
}
@@ -5683,7 +5657,7 @@ static int md_alloc(dev_t dev, char *name)
error = -ENOMEM;
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
- goto out_unlock_disks_mutex;
+ goto out_free_mddev;
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -5704,25 +5678,45 @@ static int md_alloc(dev_t dev, char *name)
mddev->gendisk = disk;
error = add_disk(disk);
if (error)
- goto out_cleanup_disk;
+ goto out_put_disk;
+ kobject_init(&mddev->kobj, &md_ktype);
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
- if (error)
- goto out_del_gendisk;
+ if (error) {
+ /*
+ * The disk is already live at this point. Clear the hold flag
+ * and let mddev_put take care of the deletion, as it isn't any
+ * different from a normal close on last release now.
+ */
+ mddev->hold_active = 0;
+ mutex_unlock(&disks_mutex);
+ mddev_put(mddev);
+ return ERR_PTR(error);
+ }
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
- goto out_unlock_disks_mutex;
+ mutex_unlock(&disks_mutex);
+ return mddev;
-out_del_gendisk:
- del_gendisk(disk);
-out_cleanup_disk:
+out_put_disk:
put_disk(disk);
-out_unlock_disks_mutex:
+out_free_mddev:
+ mddev_free(mddev);
+out_unlock:
mutex_unlock(&disks_mutex);
+ return ERR_PTR(error);
+}
+
+static int md_alloc_and_put(dev_t dev, char *name)
+{
+ struct mddev *mddev = md_alloc(dev, name);
+
+ if (IS_ERR(mddev))
+ return PTR_ERR(mddev);
mddev_put(mddev);
- return error;
+ return 0;
}
static void md_probe(dev_t dev)
@@ -5730,7 +5724,7 @@ static void md_probe(dev_t dev)
if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
return;
if (create_on_open)
- md_alloc(dev, NULL);
+ md_alloc_and_put(dev, NULL);
}
static int add_named_array(const char *val, const struct kernel_param *kp)
@@ -5752,12 +5746,12 @@ static int add_named_array(const char *val, const struct kernel_param *kp)
return -E2BIG;
strscpy(buf, val, len+1);
if (strncmp(buf, "md_", 3) == 0)
- return md_alloc(0, buf);
+ return md_alloc_and_put(0, buf);
if (strncmp(buf, "md", 2) == 0 &&
isdigit(buf[2]) &&
kstrtoul(buf+2, 10, &devnum) == 0 &&
devnum <= MINORMASK)
- return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
+ return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL);
return -EINVAL;
}
@@ -6197,6 +6191,7 @@ static void __md_stop_writes(struct mddev *mddev)
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
}
@@ -6266,6 +6261,7 @@ void md_stop(struct mddev *mddev)
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
+ __md_stop_writes(mddev);
__md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
@@ -6497,9 +6493,8 @@ static void autorun_devices(int part)
break;
}
- md_probe(dev);
- mddev = mddev_find(dev);
- if (!mddev)
+ mddev = md_alloc(dev, NULL);
+ if (IS_ERR(mddev))
break;
if (mddev_lock(mddev))
@@ -7782,45 +7777,33 @@ out_unlock:
static int md_open(struct block_device *bdev, fmode_t mode)
{
- /*
- * Succeed if we can lock the mddev, which confirms that
- * it isn't being stopped right now.
- */
- struct mddev *mddev = mddev_find(bdev->bd_dev);
+ struct mddev *mddev;
int err;
+ spin_lock(&all_mddevs_lock);
+ mddev = mddev_get(bdev->bd_disk->private_data);
+ spin_unlock(&all_mddevs_lock);
if (!mddev)
return -ENODEV;
- if (mddev->gendisk != bdev->bd_disk) {
- /* we are racing with mddev_put which is discarding this
- * bd_disk.
- */
- mddev_put(mddev);
- /* Wait until bdev->bd_disk is definitely gone */
- if (work_pending(&mddev->del_work))
- flush_workqueue(md_misc_wq);
- return -EBUSY;
- }
- BUG_ON(mddev != bdev->bd_disk->private_data);
-
- if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
+ err = mutex_lock_interruptible(&mddev->open_mutex);
+ if (err)
goto out;
- if (test_bit(MD_CLOSING, &mddev->flags)) {
- mutex_unlock(&mddev->open_mutex);
- err = -ENODEV;
- goto out;
- }
+ err = -ENODEV;
+ if (test_bit(MD_CLOSING, &mddev->flags))
+ goto out_unlock;
- err = 0;
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
bdev_check_media_change(bdev);
- out:
- if (err)
- mddev_put(mddev);
+ return 0;
+
+out_unlock:
+ mutex_unlock(&mddev->open_mutex);
+out:
+ mddev_put(mddev);
return err;
}
@@ -7844,6 +7827,17 @@ static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
return ret;
}
+static void md_free_disk(struct gendisk *disk)
+{
+ struct mddev *mddev = disk->private_data;
+
+ percpu_ref_exit(&mddev->writes_pending);
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
+
+ mddev_free(mddev);
+}
+
const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -7857,6 +7851,7 @@ const struct block_device_operations md_fops =
.getgeo = md_getgeo,
.check_events = md_check_events,
.set_read_only = md_set_read_only,
+ .free_disk = md_free_disk,
};
static int md_thread(void *arg)
@@ -8018,16 +8013,26 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
max_sectors = mddev->dev_sectors;
resync = mddev->curr_resync;
- if (resync <= 3) {
+ if (resync < MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
/* Still cleaning up */
resync = max_sectors;
- } else if (resync > max_sectors)
+ } else if (resync > max_sectors) {
resync = max_sectors;
- else
+ } else {
resync -= atomic_read(&mddev->recovery_active);
+ if (resync < MD_RESYNC_ACTIVE) {
+ /*
+ * Resync has started, but the subtraction has
+ * yielded one of the special values. Force it
+ * to active to ensure the status reports an
+ * active resync.
+ */
+ resync = MD_RESYNC_ACTIVE;
+ }
+ }
- if (resync == 0) {
+ if (resync == MD_RESYNC_NONE) {
if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
struct md_rdev *rdev;
@@ -8051,7 +8056,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
}
return 0;
}
- if (resync < 3) {
+ if (resync < MD_RESYNC_ACTIVE) {
seq_printf(seq, "\tresync=DELAYED");
return 1;
}
@@ -8152,6 +8157,8 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
mddev_get(mddev);
+ if (!mddev_get(mddev))
+ continue;
spin_unlock(&all_mddevs_lock);
return mddev;
}
@@ -8165,25 +8172,35 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
struct mddev *next_mddev, *mddev = v;
+ struct mddev *to_put = NULL;
++*pos;
if (v == (void*)2)
return NULL;
spin_lock(&all_mddevs_lock);
- if (v == (void*)1)
+ if (v == (void*)1) {
tmp = all_mddevs.next;
- else
+ } else {
+ to_put = mddev;
+ tmp = mddev->all_mddevs.next;
+ }
+
+ for (;;) {
+ if (tmp == &all_mddevs) {
+ next_mddev = (void*)2;
+ *pos = 0x10000;
+ break;
+ }
+ next_mddev = list_entry(tmp, struct mddev, all_mddevs);
+ if (mddev_get(next_mddev))
+ break;
+ mddev = next_mddev;
tmp = mddev->all_mddevs.next;
- if (tmp != &all_mddevs)
- next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
- else {
- next_mddev = (void*)2;
- *pos = 0x10000;
}
spin_unlock(&all_mddevs_lock);
- if (v != (void*)1)
+ if (to_put)
mddev_put(mddev);
return next_mddev;
@@ -8682,7 +8699,6 @@ void md_do_sync(struct md_thread *thread)
unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
- struct list_head *tmp;
sector_t last_check;
int skipped = 0;
struct md_rdev *rdev;
@@ -8729,13 +8745,7 @@ void md_do_sync(struct md_thread *thread)
mddev->last_sync_action = action ?: desc;
- /* we overload curr_resync somewhat here.
- * 0 == not engaged in resync at all
- * 2 == checking that there is no conflict with another sync
- * 1 == like 2, but have yielded to allow conflicting resync to
- * commence
- * other == active in resync - this many blocks
- *
+ /*
* Before starting a resync we must have set curr_resync to
* 2, and then checked that every "conflicting" array has curr_resync
* less than ours. When we find one that is the same or higher
@@ -8747,24 +8757,29 @@ void md_do_sync(struct md_thread *thread)
do {
int mddev2_minor = -1;
- mddev->curr_resync = 2;
+ mddev->curr_resync = MD_RESYNC_DELAYED;
try_again:
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
- for_each_mddev(mddev2, tmp) {
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry(mddev2, &all_mddevs, all_mddevs) {
+ if (test_bit(MD_DELETED, &mddev2->flags))
+ continue;
if (mddev2 == mddev)
continue;
if (!mddev->parallel_resync
&& mddev2->curr_resync
&& match_mddev_units(mddev, mddev2)) {
DEFINE_WAIT(wq);
- if (mddev < mddev2 && mddev->curr_resync == 2) {
+ if (mddev < mddev2 &&
+ mddev->curr_resync == MD_RESYNC_DELAYED) {
/* arbitrarily yield */
- mddev->curr_resync = 1;
+ mddev->curr_resync = MD_RESYNC_YIELDED;
wake_up(&resync_wait);
}
- if (mddev > mddev2 && mddev->curr_resync == 1)
+ if (mddev > mddev2 &&
+ mddev->curr_resync == MD_RESYNC_YIELDED)
/* no need to wait here, we can wait the next
* time 'round when curr_resync == 2
*/
@@ -8782,7 +8797,8 @@ void md_do_sync(struct md_thread *thread)
desc, mdname(mddev),
mdname(mddev2));
}
- mddev_put(mddev2);
+ spin_unlock(&all_mddevs_lock);
+
if (signal_pending(current))
flush_signals(current);
schedule();
@@ -8792,7 +8808,8 @@ void md_do_sync(struct md_thread *thread)
finish_wait(&resync_wait, &wq);
}
}
- } while (mddev->curr_resync < 2);
+ spin_unlock(&all_mddevs_lock);
+ } while (mddev->curr_resync < MD_RESYNC_DELAYED);
j = 0;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -8876,7 +8893,7 @@ void md_do_sync(struct md_thread *thread)
desc, mdname(mddev));
mddev->curr_resync = j;
} else
- mddev->curr_resync = 3; /* no longer delayed */
+ mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
md_new_event();
@@ -9011,14 +9028,14 @@ void md_do_sync(struct md_thread *thread)
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- mddev->curr_resync > 3) {
+ mddev->curr_resync >= MD_RESYNC_ACTIVE) {
mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync > 3) {
+ mddev->curr_resync >= MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
@@ -9082,7 +9099,7 @@ void md_do_sync(struct md_thread *thread)
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
- mddev->curr_resync = 0;
+ mddev->curr_resync = MD_RESYNC_NONE;
spin_unlock(&mddev->lock);
wake_up(&resync_wait);
@@ -9303,6 +9320,7 @@ void md_check_recovery(struct mddev *mddev)
* ->spare_active and clear saved_raid_disk
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -9338,6 +9356,7 @@ void md_check_recovery(struct mddev *mddev)
goto unlock;
}
if (mddev->sync_thread) {
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
goto unlock;
}
@@ -9417,8 +9436,7 @@ void md_reap_sync_thread(struct mddev *mddev)
sector_t old_dev_sectors = mddev->dev_sectors;
bool is_reshaped = false;
- /* resync has finished, collect result */
- md_unregister_thread(&mddev->sync_thread);
+ /* sync_thread should be unregistered, collect result */
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
mddev->degraded != mddev->raid_disks) {
@@ -9466,6 +9484,7 @@ void md_reap_sync_thread(struct mddev *mddev)
wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event();
if (mddev->event_work.func)
@@ -9544,11 +9563,14 @@ EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
- struct list_head *tmp;
- struct mddev *mddev;
+ struct mddev *mddev, *n;
int need_delay = 0;
- for_each_mddev(mddev, tmp) {
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+ if (!mddev_get(mddev))
+ continue;
+ spin_unlock(&all_mddevs_lock);
if (mddev_trylock(mddev)) {
if (mddev->pers)
__md_stop_writes(mddev);
@@ -9557,7 +9579,11 @@ static int md_notify_reboot(struct notifier_block *this,
mddev_unlock(mddev);
}
need_delay = 1;
+ mddev_put(mddev);
+ spin_lock(&all_mddevs_lock);
}
+ spin_unlock(&all_mddevs_lock);
+
/*
* certain more exotic SCSI devices are known to be
* volatile wrt too early system reboots. While the
@@ -9876,8 +9902,7 @@ void md_autostart_arrays(int part)
static __exit void md_exit(void)
{
- struct mddev *mddev;
- struct list_head *tmp;
+ struct mddev *mddev, *n;
int delay = 1;
unregister_blkdev(MD_MAJOR,"md");
@@ -9897,17 +9922,24 @@ static __exit void md_exit(void)
}
remove_proc_entry("mdstat", NULL);
- for_each_mddev(mddev, tmp) {
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+ if (!mddev_get(mddev))
+ continue;
+ spin_unlock(&all_mddevs_lock);
export_array(mddev);
mddev->ctime = 0;
mddev->hold_active = 0;
/*
- * for_each_mddev() will call mddev_put() at the end of each
- * iteration. As the mddev is now fully clear, this will
- * schedule the mddev for destruction by a workqueue, and the
+ * As the mddev is now fully clear, mddev_put will schedule
+ * the mddev for destruction by a workqueue, and the
* destroy_workqueue() below will wait for that to complete.
*/
+ mddev_put(mddev);
+ spin_lock(&all_mddevs_lock);
}
+ spin_unlock(&all_mddevs_lock);
+
destroy_workqueue(md_rdev_misc_wq);
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b4f84b27bdef..b4e2d8b87b61 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -254,6 +254,7 @@ struct md_cluster_info;
* @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that
* array is ready yet.
* @MD_BROKEN: This is used to stop writes and mark array as failed.
+ * @MD_DELETED: This device is being deleted
*
* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added
*/
@@ -270,6 +271,7 @@ enum mddev_flags {
MD_UPDATING_SB,
MD_NOT_READY,
MD_BROKEN,
+ MD_DELETED,
};
enum mddev_sb_flags {
@@ -288,6 +290,21 @@ struct serial_info {
sector_t _subtree_last; /* highest sector in subtree of rb node */
};
+/*
+ * mddev->curr_resync stores the current sector of the resync but
+ * also has some overloaded values.
+ */
+enum {
+ /* No resync in progress */
+ MD_RESYNC_NONE = 0,
+ /* Yielded to allow another conflicting resync to commence */
+ MD_RESYNC_YIELDED = 1,
+ /* Delayed to check that there is no conflict with another sync */
+ MD_RESYNC_DELAYED = 2,
+ /* Any value greater than or equal to this is in an active resync */
+ MD_RESYNC_ACTIVE = 3,
+};
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -750,6 +767,8 @@ extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
+struct mddev *md_alloc(dev_t dev, char *name);
+void mddev_put(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
extern int md_start(struct mddev *mddev);
extern void md_stop(struct mddev *mddev);
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 54c089a50b15..11935864f50f 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -391,7 +391,8 @@ struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
sizeof(struct buffer_aux),
dm_block_manager_alloc_callback,
- dm_block_manager_write_callback);
+ dm_block_manager_write_callback,
+ 0);
if (IS_ERR(bm->bufio)) {
r = PTR_ERR(bm->bufio);
kfree(bm);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 26545950ca42..64d6e4cd8a3a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2167,9 +2167,12 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
int err = 0;
int number = rdev->raid_disk;
struct md_rdev **rdevp;
- struct raid10_info *p = conf->mirrors + number;
+ struct raid10_info *p;
print_conf(conf);
+ if (unlikely(number >= mddev->raid_disks))
+ return 0;
+ p = conf->mirrors + number;
if (rdev == p->rdev)
rdevp = &p->rdev;
else if (rdev == p->replacement)
@@ -2636,18 +2639,18 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
}
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
- int sectors, struct page *page, int rw)
+ int sectors, struct page *page, enum req_op op)
{
sector_t first_bad;
int bad_sectors;
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
- && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
+ && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1;
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
/* success */
return 1;
- if (rw == WRITE) {
+ if (op == REQ_OP_WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED,
@@ -2777,7 +2780,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
if (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s, conf->tmppage, WRITE)
+ s, conf->tmppage, REQ_OP_WRITE)
== 0) {
/* Well, this device is dead */
pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
@@ -2811,8 +2814,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
switch (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s, conf->tmppage,
- READ)) {
+ s, conf->tmppage, REQ_OP_READ)) {
case 0:
/* Well, this device is dead */
pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 6f2dd73128b0..f4e1cc1ece43 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1590,18 +1590,13 @@ void r5l_quiesce(struct r5l_log *log, int quiesce)
bool r5l_log_disk_error(struct r5conf *conf)
{
- struct r5l_log *log;
- bool ret;
- /* don't allow write if journal disk is missing */
- rcu_read_lock();
- log = rcu_dereference(conf->log);
+ struct r5l_log *log = conf->log;
+ /* don't allow write if journal disk is missing */
if (!log)
- ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+ return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
else
- ret = test_bit(Faulty, &log->rdev->flags);
- rcu_read_unlock();
- return ret;
+ return test_bit(Faulty, &log->rdev->flags);
}
#define R5L_RECOVERY_PAGE_POOL_SIZE 256
@@ -2534,12 +2529,13 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
struct r5conf *conf;
int ret;
- spin_lock(&mddev->lock);
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
conf = mddev->private;
- if (!conf || !conf->log) {
- spin_unlock(&mddev->lock);
- return 0;
- }
+ if (!conf || !conf->log)
+ goto out_unlock;
switch (conf->log->r5c_journal_mode) {
case R5C_JOURNAL_MODE_WRITE_THROUGH:
@@ -2557,7 +2553,9 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
default:
ret = 0;
}
- spin_unlock(&mddev->lock);
+
+out_unlock:
+ mddev_unlock(mddev);
return ret;
}
@@ -2639,7 +2637,7 @@ int r5c_try_caching_write(struct r5conf *conf,
int i;
struct r5dev *dev;
int to_cache = 0;
- void **pslot;
+ void __rcu **pslot;
sector_t tree_index;
int ret;
uintptr_t refcount;
@@ -2806,7 +2804,7 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
int i;
int do_wakeup = 0;
sector_t tree_index;
- void **pslot;
+ void __rcu **pslot;
uintptr_t refcount;
if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
@@ -3145,7 +3143,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->stripe_in_journal_lock);
atomic_set(&log->stripe_in_journal_count, 0);
- rcu_assign_pointer(conf->log, log);
+ conf->log = log;
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
@@ -3167,13 +3165,13 @@ void r5l_exit_log(struct r5conf *conf)
{
struct r5l_log *log = conf->log;
- conf->log = NULL;
- synchronize_rcu();
-
/* Ensure disable_writeback_work wakes up and exits */
wake_up(&conf->mddev->sb_wait);
flush_work(&log->disable_writeback_work);
md_unregister_thread(&log->reclaim_thread);
+
+ conf->log = NULL;
+
mempool_exit(&log->meta_pool);
bioset_exit(&log->bs);
mempool_exit(&log->io_pool);
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 43c714a8798c..c8332502669e 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -2,49 +2,46 @@
#ifndef _RAID5_LOG_H
#define _RAID5_LOG_H
-extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
-extern void r5l_exit_log(struct r5conf *conf);
-extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
-extern void r5l_write_stripe_run(struct r5l_log *log);
-extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
-extern void r5l_stripe_write_finished(struct stripe_head *sh);
-extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
-extern void r5l_quiesce(struct r5l_log *log, int quiesce);
-extern bool r5l_log_disk_error(struct r5conf *conf);
-extern bool r5c_is_writeback(struct r5l_log *log);
-extern int
-r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
- struct stripe_head_state *s, int disks);
-extern void
-r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
- struct stripe_head_state *s);
-extern void r5c_release_extra_page(struct stripe_head *sh);
-extern void r5c_use_extra_page(struct stripe_head *sh);
-extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
-extern void r5c_handle_cached_data_endio(struct r5conf *conf,
- struct stripe_head *sh, int disks);
-extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh);
-extern void r5c_make_stripe_write_out(struct stripe_head *sh);
-extern void r5c_flush_cache(struct r5conf *conf, int num);
-extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
-extern void r5c_check_cached_full_stripe(struct r5conf *conf);
+int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
+void r5l_exit_log(struct r5conf *conf);
+int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
+void r5l_write_stripe_run(struct r5l_log *log);
+void r5l_flush_stripe_to_raid(struct r5l_log *log);
+void r5l_stripe_write_finished(struct stripe_head *sh);
+int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
+void r5l_quiesce(struct r5l_log *log, int quiesce);
+bool r5l_log_disk_error(struct r5conf *conf);
+bool r5c_is_writeback(struct r5l_log *log);
+int r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s, int disks);
+void r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s);
+void r5c_release_extra_page(struct stripe_head *sh);
+void r5c_use_extra_page(struct stripe_head *sh);
+void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks);
+int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh);
+void r5c_make_stripe_write_out(struct stripe_head *sh);
+void r5c_flush_cache(struct r5conf *conf, int num);
+void r5c_check_stripe_cache_usage(struct r5conf *conf);
+void r5c_check_cached_full_stripe(struct r5conf *conf);
extern struct md_sysfs_entry r5c_journal_mode;
-extern void r5c_update_on_rdev_error(struct mddev *mddev,
- struct md_rdev *rdev);
-extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
-extern int r5l_start(struct r5l_log *log);
+void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev);
+bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
+int r5l_start(struct r5l_log *log);
-extern struct dma_async_tx_descriptor *
+struct dma_async_tx_descriptor *
ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx);
-extern int ppl_init_log(struct r5conf *conf);
-extern void ppl_exit_log(struct r5conf *conf);
-extern int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh);
-extern void ppl_write_stripe_run(struct r5conf *conf);
-extern void ppl_stripe_write_finished(struct stripe_head *sh);
-extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
-extern void ppl_quiesce(struct r5conf *conf, int quiesce);
-extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
+int ppl_init_log(struct r5conf *conf);
+void ppl_exit_log(struct r5conf *conf);
+int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh);
+void ppl_write_stripe_run(struct r5conf *conf);
+void ppl_stripe_write_finished(struct stripe_head *sh);
+int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
+void ppl_quiesce(struct r5conf *conf, int quiesce);
+int ppl_handle_flush_request(struct bio *bio);
extern struct md_sysfs_entry ppl_write_hint;
static inline bool raid5_has_log(struct r5conf *conf)
@@ -111,7 +108,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
if (conf->log)
ret = r5l_handle_flush_request(conf->log, bio);
else if (raid5_has_ppl(conf))
- ret = ppl_handle_flush_request(conf->log, bio);
+ ret = ppl_handle_flush_request(bio);
return ret;
}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 98988cb26295..31b9157bc9ae 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -679,7 +679,7 @@ void ppl_quiesce(struct r5conf *conf, int quiesce)
}
}
-int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
+int ppl_handle_flush_request(struct bio *bio)
{
if (bio->bi_iter.bi_size == 0) {
bio_endio(bio);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5cabdbbac48b..31a0cbf63384 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -61,6 +61,8 @@
#define cpu_to_group(cpu) cpu_to_node(cpu)
#define ANY_GROUP NUMA_NO_NODE
+#define RAID5_MAX_REQ_STRIPES 256
+
static bool devices_handle_discard_safely = false;
module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
@@ -624,6 +626,49 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
return NULL;
}
+static struct stripe_head *find_get_stripe(struct r5conf *conf,
+ sector_t sector, short generation, int hash)
+{
+ int inc_empty_inactive_list_flag;
+ struct stripe_head *sh;
+
+ sh = __find_stripe(conf, sector, generation);
+ if (!sh)
+ return NULL;
+
+ if (atomic_inc_not_zero(&sh->count))
+ return sh;
+
+ /*
+ * Slow path. The reference count is zero which means the stripe must
+ * be on a list (sh->lru). Must remove the stripe from the list that
+ * references it with the device_lock held.
+ */
+
+ spin_lock(&conf->device_lock);
+ if (!atomic_read(&sh->count)) {
+ if (!test_bit(STRIPE_HANDLE, &sh->state))
+ atomic_inc(&conf->active_stripes);
+ BUG_ON(list_empty(&sh->lru) &&
+ !test_bit(STRIPE_EXPANDING, &sh->state));
+ inc_empty_inactive_list_flag = 0;
+ if (!list_empty(conf->inactive_list + hash))
+ inc_empty_inactive_list_flag = 1;
+ list_del_init(&sh->lru);
+ if (list_empty(conf->inactive_list + hash) &&
+ inc_empty_inactive_list_flag)
+ atomic_inc(&conf->empty_inactive_list_nr);
+ if (sh->group) {
+ sh->group->stripes_cnt--;
+ sh->group = NULL;
+ }
+ }
+ atomic_inc(&sh->count);
+ spin_unlock(&conf->device_lock);
+
+ return sh;
+}
+
/*
* Need to check if array has failed when deciding whether to:
* - start an array
@@ -710,80 +755,121 @@ static bool has_failed(struct r5conf *conf)
return degraded > conf->max_degraded;
}
-struct stripe_head *
-raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- int previous, int noblock, int noquiesce)
+enum stripe_result {
+ STRIPE_SUCCESS = 0,
+ STRIPE_RETRY,
+ STRIPE_SCHEDULE_AND_RETRY,
+ STRIPE_FAIL,
+};
+
+struct stripe_request_ctx {
+ /* a reference to the last stripe_head for batching */
+ struct stripe_head *batch_last;
+
+ /* first sector in the request */
+ sector_t first_sector;
+
+ /* last sector in the request */
+ sector_t last_sector;
+
+ /*
+ * bitmap to track stripe sectors that have been added to stripes
+ * add one to account for unaligned requests
+ */
+ DECLARE_BITMAP(sectors_to_do, RAID5_MAX_REQ_STRIPES + 1);
+
+ /* the request had REQ_PREFLUSH, cleared after the first stripe_head */
+ bool do_flush;
+};
+
+/*
+ * Block until another thread clears R5_INACTIVE_BLOCKED or
+ * there are fewer than 3/4 the maximum number of active stripes
+ * and there is an inactive stripe available.
+ */
+static bool is_inactive_blocked(struct r5conf *conf, int hash)
+{
+ int active = atomic_read(&conf->active_stripes);
+
+ if (list_empty(conf->inactive_list + hash))
+ return false;
+
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
+ return true;
+
+ return active < (conf->max_nr_stripes * 3 / 4);
+}
+
+static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
+ struct stripe_request_ctx *ctx, sector_t sector,
+ bool previous, bool noblock, bool noquiesce)
{
struct stripe_head *sh;
int hash = stripe_hash_locks_hash(conf, sector);
- int inc_empty_inactive_list_flag;
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
spin_lock_irq(conf->hash_locks + hash);
- do {
- wait_event_lock_irq(conf->wait_for_quiescent,
- conf->quiesce == 0 || noquiesce,
+retry:
+ if (!noquiesce && conf->quiesce) {
+ /*
+ * Must release the reference to batch_last before waiting,
+ * on quiesce, otherwise the batch_last will hold a reference
+ * to a stripe and raid5_quiesce() will deadlock waiting for
+ * active_stripes to go to zero.
+ */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
+
+ wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
*(conf->hash_locks + hash));
- sh = __find_stripe(conf, sector, conf->generation - previous);
- if (!sh) {
- if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
- sh = get_free_stripe(conf, hash);
- if (!sh && !test_bit(R5_DID_ALLOC,
- &conf->cache_state))
- set_bit(R5_ALLOC_MORE,
- &conf->cache_state);
- }
- if (noblock && sh == NULL)
- break;
+ }
- r5c_check_stripe_cache_usage(conf);
- if (!sh) {
- set_bit(R5_INACTIVE_BLOCKED,
- &conf->cache_state);
- r5l_wake_reclaim(conf->log, 0);
- wait_event_lock_irq(
- conf->wait_for_stripe,
- !list_empty(conf->inactive_list + hash) &&
- (atomic_read(&conf->active_stripes)
- < (conf->max_nr_stripes * 3 / 4)
- || !test_bit(R5_INACTIVE_BLOCKED,
- &conf->cache_state)),
- *(conf->hash_locks + hash));
- clear_bit(R5_INACTIVE_BLOCKED,
- &conf->cache_state);
- } else {
- init_stripe(sh, sector, previous);
- atomic_inc(&sh->count);
- }
- } else if (!atomic_inc_not_zero(&sh->count)) {
- spin_lock(&conf->device_lock);
- if (!atomic_read(&sh->count)) {
- if (!test_bit(STRIPE_HANDLE, &sh->state))
- atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&sh->lru) &&
- !test_bit(STRIPE_EXPANDING, &sh->state));
- inc_empty_inactive_list_flag = 0;
- if (!list_empty(conf->inactive_list + hash))
- inc_empty_inactive_list_flag = 1;
- list_del_init(&sh->lru);
- if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
- atomic_inc(&conf->empty_inactive_list_nr);
- if (sh->group) {
- sh->group->stripes_cnt--;
- sh->group = NULL;
- }
- }
- atomic_inc(&sh->count);
- spin_unlock(&conf->device_lock);
- }
- } while (sh == NULL);
+ sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
+ if (sh)
+ goto out;
+
+ if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
+ goto wait_for_stripe;
+
+ sh = get_free_stripe(conf, hash);
+ if (sh) {
+ r5c_check_stripe_cache_usage(conf);
+ init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ goto out;
+ }
+
+ if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE, &conf->cache_state);
+
+wait_for_stripe:
+ if (noblock)
+ goto out;
+ set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ is_inactive_blocked(conf, hash),
+ *(conf->hash_locks + hash));
+ clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ goto retry;
+
+out:
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
+struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
+ sector_t sector, bool previous, bool noblock, bool noquiesce)
+{
+ return __raid5_get_active_stripe(conf, NULL, sector, previous, noblock,
+ noquiesce);
+}
+
static bool is_full_stripe_write(struct stripe_head *sh)
{
BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
@@ -824,13 +910,13 @@ static bool stripe_can_batch(struct stripe_head *sh)
}
/* we only do back search */
-static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
+static void stripe_add_to_batch_list(struct r5conf *conf,
+ struct stripe_head *sh, struct stripe_head *last_sh)
{
struct stripe_head *head;
sector_t head_sector, tmp_sec;
int hash;
int dd_idx;
- int inc_empty_inactive_list_flag;
/* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
tmp_sec = sh->sector;
@@ -838,36 +924,20 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
return;
head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf);
- hash = stripe_hash_locks_hash(conf, head_sector);
- spin_lock_irq(conf->hash_locks + hash);
- head = __find_stripe(conf, head_sector, conf->generation);
- if (head && !atomic_inc_not_zero(&head->count)) {
- spin_lock(&conf->device_lock);
- if (!atomic_read(&head->count)) {
- if (!test_bit(STRIPE_HANDLE, &head->state))
- atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&head->lru) &&
- !test_bit(STRIPE_EXPANDING, &head->state));
- inc_empty_inactive_list_flag = 0;
- if (!list_empty(conf->inactive_list + hash))
- inc_empty_inactive_list_flag = 1;
- list_del_init(&head->lru);
- if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
- atomic_inc(&conf->empty_inactive_list_nr);
- if (head->group) {
- head->group->stripes_cnt--;
- head->group = NULL;
- }
- }
+ if (last_sh && head_sector == last_sh->sector) {
+ head = last_sh;
atomic_inc(&head->count);
- spin_unlock(&conf->device_lock);
+ } else {
+ hash = stripe_hash_locks_hash(conf, head_sector);
+ spin_lock_irq(conf->hash_locks + hash);
+ head = find_get_stripe(conf, head_sector, conf->generation,
+ hash);
+ spin_unlock_irq(conf->hash_locks + hash);
+ if (!head)
+ return;
+ if (!stripe_can_batch(head))
+ goto out;
}
- spin_unlock_irq(conf->hash_locks + hash);
-
- if (!head)
- return;
- if (!stripe_can_batch(head))
- goto out;
lock_two_stripes(head, sh);
/* clear_batch_ready clear the flag */
@@ -2882,10 +2952,10 @@ static void raid5_end_write_request(struct bio *bi)
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
- raid5_release_stripe(sh);
if (sh->batch_head && sh != sh->batch_head)
raid5_release_stripe(sh->batch_head);
+ raid5_release_stripe(sh);
}
static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
@@ -3413,39 +3483,32 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
s->locked, s->ops_request);
}
-/*
- * Each stripe/dev can have one or more bion attached.
- * toread/towrite point to the first in a chain.
- * The bi_next chain must be in order.
- */
-static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
- int forwrite, int previous)
+static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi,
+ int dd_idx, int forwrite)
{
- struct bio **bip;
struct r5conf *conf = sh->raid_conf;
- int firstwrite=0;
+ struct bio **bip;
- pr_debug("adding bi b#%llu to stripe s#%llu\n",
- (unsigned long long)bi->bi_iter.bi_sector,
- (unsigned long long)sh->sector);
+ pr_debug("checking bi b#%llu to stripe s#%llu\n",
+ bi->bi_iter.bi_sector, sh->sector);
- spin_lock_irq(&sh->stripe_lock);
/* Don't allow new IO added to stripes in batch list */
if (sh->batch_head)
- goto overlap;
- if (forwrite) {
+ return true;
+
+ if (forwrite)
bip = &sh->dev[dd_idx].towrite;
- if (*bip == NULL)
- firstwrite = 1;
- } else
+ else
bip = &sh->dev[dd_idx].toread;
+
while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
- goto overlap;
- bip = & (*bip)->bi_next;
+ return true;
+ bip = &(*bip)->bi_next;
}
+
if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
- goto overlap;
+ return true;
if (forwrite && raid5_has_ppl(conf)) {
/*
@@ -3474,9 +3537,30 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
}
if (first + conf->chunk_sectors * (count - 1) != last)
- goto overlap;
+ return true;
+ }
+
+ return false;
+}
+
+static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
+ int dd_idx, int forwrite, int previous)
+{
+ struct r5conf *conf = sh->raid_conf;
+ struct bio **bip;
+ int firstwrite = 0;
+
+ if (forwrite) {
+ bip = &sh->dev[dd_idx].towrite;
+ if (!*bip)
+ firstwrite = 1;
+ } else {
+ bip = &sh->dev[dd_idx].toread;
}
+ while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector)
+ bip = &(*bip)->bi_next;
+
if (!forwrite || previous)
clear_bit(STRIPE_BATCH_READY, &sh->state);
@@ -3502,9 +3586,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
sh->overwrite_disks++;
}
- pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
- (unsigned long long)(*bip)->bi_iter.bi_sector,
- (unsigned long long)sh->sector, dd_idx);
+ pr_debug("added bi b#%llu to stripe s#%llu, disk %d, logical %llu\n",
+ (*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
+ sh->dev[dd_idx].sector);
if (conf->mddev->bitmap && firstwrite) {
/* Cannot hold spinlock over bitmap_startwrite,
@@ -3512,7 +3596,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
* we have added to the bitmap and set bm_seq.
* So set STRIPE_BITMAP_PENDING to prevent
* batching.
- * If multiple add_stripe_bio() calls race here they
+ * If multiple __add_stripe_bio() calls race here they
* much all set STRIPE_BITMAP_PENDING. So only the first one
* to complete "bitmap_startwrite" gets to set
* STRIPE_BIT_DELAY. This is important as once a stripe
@@ -3530,16 +3614,27 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
}
- spin_unlock_irq(&sh->stripe_lock);
+}
- if (stripe_can_batch(sh))
- stripe_add_to_batch_list(conf, sh);
- return 1;
+/*
+ * Each stripe/dev can have one or more bios attached.
+ * toread/towrite point to the first in a chain.
+ * The bi_next chain must be in order.
+ */
+static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi,
+ int dd_idx, int forwrite, int previous)
+{
+ spin_lock_irq(&sh->stripe_lock);
+
+ if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
+ set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
+ spin_unlock_irq(&sh->stripe_lock);
+ return false;
+ }
- overlap:
- set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
+ __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
spin_unlock_irq(&sh->stripe_lock);
- return 0;
+ return true;
}
static void end_reshape(struct r5conf *conf);
@@ -5785,17 +5880,215 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
bio_endio(bi);
}
-static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+static bool ahead_of_reshape(struct mddev *mddev, sector_t sector,
+ sector_t reshape_sector)
{
- struct r5conf *conf = mddev->private;
+ return mddev->reshape_backwards ? sector < reshape_sector :
+ sector >= reshape_sector;
+}
+
+static bool range_ahead_of_reshape(struct mddev *mddev, sector_t min,
+ sector_t max, sector_t reshape_sector)
+{
+ return mddev->reshape_backwards ? max < reshape_sector :
+ min >= reshape_sector;
+}
+
+static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
+ struct stripe_head *sh)
+{
+ sector_t max_sector = 0, min_sector = MaxSector;
+ bool ret = false;
int dd_idx;
- sector_t new_sector;
- sector_t logical_sector, last_sector;
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+ if (dd_idx == sh->pd_idx)
+ continue;
+
+ min_sector = min(min_sector, sh->dev[dd_idx].sector);
+ max_sector = min(max_sector, sh->dev[dd_idx].sector);
+ }
+
+ spin_lock_irq(&conf->device_lock);
+
+ if (!range_ahead_of_reshape(mddev, min_sector, max_sector,
+ conf->reshape_progress))
+ /* mismatch, need to try again */
+ ret = true;
+
+ spin_unlock_irq(&conf->device_lock);
+
+ return ret;
+}
+
+static int add_all_stripe_bios(struct r5conf *conf,
+ struct stripe_request_ctx *ctx, struct stripe_head *sh,
+ struct bio *bi, int forwrite, int previous)
+{
+ int dd_idx;
+ int ret = 1;
+
+ spin_lock_irq(&sh->stripe_lock);
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+ struct r5dev *dev = &sh->dev[dd_idx];
+
+ if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ continue;
+
+ if (dev->sector < ctx->first_sector ||
+ dev->sector >= ctx->last_sector)
+ continue;
+
+ if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
+ set_bit(R5_Overlap, &dev->flags);
+ ret = 0;
+ continue;
+ }
+ }
+
+ if (!ret)
+ goto out;
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+ struct r5dev *dev = &sh->dev[dd_idx];
+
+ if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ continue;
+
+ if (dev->sector < ctx->first_sector ||
+ dev->sector >= ctx->last_sector)
+ continue;
+
+ __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
+ clear_bit((dev->sector - ctx->first_sector) >>
+ RAID5_STRIPE_SHIFT(conf), ctx->sectors_to_do);
+ }
+
+out:
+ spin_unlock_irq(&sh->stripe_lock);
+ return ret;
+}
+
+static enum stripe_result make_stripe_request(struct mddev *mddev,
+ struct r5conf *conf, struct stripe_request_ctx *ctx,
+ sector_t logical_sector, struct bio *bi)
+{
+ const int rw = bio_data_dir(bi);
+ enum stripe_result ret;
struct stripe_head *sh;
+ sector_t new_sector;
+ int previous = 0;
+ int seq, dd_idx;
+
+ seq = read_seqcount_begin(&conf->gen_lock);
+
+ if (unlikely(conf->reshape_progress != MaxSector)) {
+ /*
+ * Spinlock is needed as reshape_progress may be
+ * 64bit on a 32bit platform, and so it might be
+ * possible to see a half-updated value
+ * Of course reshape_progress could change after
+ * the lock is dropped, so once we get a reference
+ * to the stripe that we think it is, we will have
+ * to check again.
+ */
+ spin_lock_irq(&conf->device_lock);
+ if (ahead_of_reshape(mddev, logical_sector,
+ conf->reshape_progress)) {
+ previous = 1;
+ } else {
+ if (ahead_of_reshape(mddev, logical_sector,
+ conf->reshape_safe)) {
+ spin_unlock_irq(&conf->device_lock);
+ return STRIPE_SCHEDULE_AND_RETRY;
+ }
+ }
+ spin_unlock_irq(&conf->device_lock);
+ }
+
+ new_sector = raid5_compute_sector(conf, logical_sector, previous,
+ &dd_idx, NULL);
+ pr_debug("raid456: %s, sector %llu logical %llu\n", __func__,
+ new_sector, logical_sector);
+
+ sh = __raid5_get_active_stripe(conf, ctx, new_sector, previous,
+ (bi->bi_opf & REQ_RAHEAD), 0);
+ if (unlikely(!sh)) {
+ /* cannot get stripe, just give-up */
+ bi->bi_status = BLK_STS_IOERR;
+ return STRIPE_FAIL;
+ }
+
+ if (unlikely(previous) &&
+ stripe_ahead_of_reshape(mddev, conf, sh)) {
+ /*
+ * Expansion moved on while waiting for a stripe.
+ * Expansion could still move past after this
+ * test, but as we are holding a reference to
+ * 'sh', we know that if that happens,
+ * STRIPE_EXPANDING will get set and the expansion
+ * won't proceed until we finish with the stripe.
+ */
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out_release;
+ }
+
+ if (read_seqcount_retry(&conf->gen_lock, seq)) {
+ /* Might have got the wrong stripe_head by accident */
+ ret = STRIPE_RETRY;
+ goto out_release;
+ }
+
+ if (test_bit(STRIPE_EXPANDING, &sh->state) ||
+ !add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) {
+ /*
+ * Stripe is busy expanding or add failed due to
+ * overlap. Flush everything and wait a while.
+ */
+ md_wakeup_thread(mddev->thread);
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out_release;
+ }
+
+ if (stripe_can_batch(sh)) {
+ stripe_add_to_batch_list(conf, sh, ctx->batch_last);
+ if (ctx->batch_last)
+ raid5_release_stripe(ctx->batch_last);
+ atomic_inc(&sh->count);
+ ctx->batch_last = sh;
+ }
+
+ if (ctx->do_flush) {
+ set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
+ /* we only need flush for one stripe */
+ ctx->do_flush = false;
+ }
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ if ((!sh->batch_head || sh == sh->batch_head) &&
+ (bi->bi_opf & REQ_SYNC) &&
+ !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ atomic_inc(&conf->preread_active_stripes);
+
+ release_stripe_plug(mddev, sh);
+ return STRIPE_SUCCESS;
+
+out_release:
+ raid5_release_stripe(sh);
+ return ret;
+}
+
+static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct r5conf *conf = mddev->private;
+ sector_t logical_sector;
+ struct stripe_request_ctx ctx = {};
const int rw = bio_data_dir(bi);
- DEFINE_WAIT(w);
- bool do_prepare;
- bool do_flush = false;
+ enum stripe_result res;
+ int s, stripe_cnt;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
int ret = log_handle_flush_request(conf, bi);
@@ -5811,7 +6104,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
* if r5l_handle_flush_request() didn't clear REQ_PREFLUSH,
* we need to flush journal device
*/
- do_flush = bi->bi_opf & REQ_PREFLUSH;
+ ctx.do_flush = bi->bi_opf & REQ_PREFLUSH;
}
if (!md_write_start(mddev, bi))
@@ -5835,134 +6128,68 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
}
logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
- last_sector = bio_end_sector(bi);
+ ctx.first_sector = logical_sector;
+ ctx.last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
+ stripe_cnt = DIV_ROUND_UP_SECTOR_T(ctx.last_sector - logical_sector,
+ RAID5_STRIPE_SECTORS(conf));
+ bitmap_set(ctx.sectors_to_do, 0, stripe_cnt);
+
+ pr_debug("raid456: %s, logical %llu to %llu\n", __func__,
+ bi->bi_iter.bi_sector, ctx.last_sector);
+
/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
if ((bi->bi_opf & REQ_NOWAIT) &&
(conf->reshape_progress != MaxSector) &&
- (mddev->reshape_backwards
- ? (logical_sector > conf->reshape_progress && logical_sector <= conf->reshape_safe)
- : (logical_sector >= conf->reshape_safe && logical_sector < conf->reshape_progress))) {
+ !ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) &&
+ ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) {
bio_wouldblock_error(bi);
if (rw == WRITE)
md_write_end(mddev);
return true;
}
md_account_bio(mddev, &bi);
- prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
- for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
- int previous;
- int seq;
-
- do_prepare = false;
- retry:
- seq = read_seqcount_begin(&conf->gen_lock);
- previous = 0;
- if (do_prepare)
- prepare_to_wait(&conf->wait_for_overlap, &w,
- TASK_UNINTERRUPTIBLE);
- if (unlikely(conf->reshape_progress != MaxSector)) {
- /* spinlock is needed as reshape_progress may be
- * 64bit on a 32bit platform, and so it might be
- * possible to see a half-updated value
- * Of course reshape_progress could change after
- * the lock is dropped, so once we get a reference
- * to the stripe that we think it is, we will have
- * to check again.
- */
- spin_lock_irq(&conf->device_lock);
- if (mddev->reshape_backwards
- ? logical_sector < conf->reshape_progress
- : logical_sector >= conf->reshape_progress) {
- previous = 1;
- } else {
- if (mddev->reshape_backwards
- ? logical_sector < conf->reshape_safe
- : logical_sector >= conf->reshape_safe) {
- spin_unlock_irq(&conf->device_lock);
- schedule();
- do_prepare = true;
- goto retry;
- }
- }
- spin_unlock_irq(&conf->device_lock);
- }
- new_sector = raid5_compute_sector(conf, logical_sector,
- previous,
- &dd_idx, NULL);
- pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
- (unsigned long long)new_sector,
- (unsigned long long)logical_sector);
+ add_wait_queue(&conf->wait_for_overlap, &wait);
+ while (1) {
+ res = make_stripe_request(mddev, conf, &ctx, logical_sector,
+ bi);
+ if (res == STRIPE_FAIL)
+ break;
- sh = raid5_get_active_stripe(conf, new_sector, previous,
- (bi->bi_opf & REQ_RAHEAD), 0);
- if (sh) {
- if (unlikely(previous)) {
- /* expansion might have moved on while waiting for a
- * stripe, so we must do the range check again.
- * Expansion could still move past after this
- * test, but as we are holding a reference to
- * 'sh', we know that if that happens,
- * STRIPE_EXPANDING will get set and the expansion
- * won't proceed until we finish with the stripe.
- */
- int must_retry = 0;
- spin_lock_irq(&conf->device_lock);
- if (mddev->reshape_backwards
- ? logical_sector >= conf->reshape_progress
- : logical_sector < conf->reshape_progress)
- /* mismatch, need to try again */
- must_retry = 1;
- spin_unlock_irq(&conf->device_lock);
- if (must_retry) {
- raid5_release_stripe(sh);
- schedule();
- do_prepare = true;
- goto retry;
- }
- }
- if (read_seqcount_retry(&conf->gen_lock, seq)) {
- /* Might have got the wrong stripe_head
- * by accident
- */
- raid5_release_stripe(sh);
- goto retry;
- }
+ if (res == STRIPE_RETRY)
+ continue;
- if (test_bit(STRIPE_EXPANDING, &sh->state) ||
- !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
- /* Stripe is busy expanding or
- * add failed due to overlap. Flush everything
- * and wait a while
- */
- md_wakeup_thread(mddev->thread);
- raid5_release_stripe(sh);
- schedule();
- do_prepare = true;
- goto retry;
- }
- if (do_flush) {
- set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
- /* we only need flush for one stripe */
- do_flush = false;
+ if (res == STRIPE_SCHEDULE_AND_RETRY) {
+ /*
+ * Must release the reference to batch_last before
+ * scheduling and waiting for work to be done,
+ * otherwise the batch_last stripe head could prevent
+ * raid5_activate_delayed() from making progress
+ * and thus deadlocking.
+ */
+ if (ctx.batch_last) {
+ raid5_release_stripe(ctx.batch_last);
+ ctx.batch_last = NULL;
}
- set_bit(STRIPE_HANDLE, &sh->state);
- clear_bit(STRIPE_DELAYED, &sh->state);
- if ((!sh->batch_head || sh == sh->batch_head) &&
- (bi->bi_opf & REQ_SYNC) &&
- !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- atomic_inc(&conf->preread_active_stripes);
- release_stripe_plug(mddev, sh);
- } else {
- /* cannot get stripe for read-ahead, just give-up */
- bi->bi_status = BLK_STS_IOERR;
- break;
+ wait_woken(&wait, TASK_UNINTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ continue;
}
+
+ s = find_first_bit(ctx.sectors_to_do, stripe_cnt);
+ if (s == stripe_cnt)
+ break;
+
+ logical_sector = ctx.first_sector +
+ (s << RAID5_STRIPE_SHIFT(conf));
}
- finish_wait(&conf->wait_for_overlap, &w);
+ remove_wait_queue(&conf->wait_for_overlap, &wait);
+
+ if (ctx.batch_last)
+ raid5_release_stripe(ctx.batch_last);
if (rw == WRITE)
md_write_end(mddev);
@@ -7417,7 +7644,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.count_objects = raid5_cache_count;
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
- ret = register_shrinker(&conf->shrinker);
+ ret = register_shrinker(&conf->shrinker, "md-raid5:%s", mdname(mddev));
if (ret) {
pr_warn("md/raid:%s: couldn't register shrinker.\n",
mdname(mddev));
@@ -7815,7 +8042,15 @@ static int raid5_run(struct mddev *mddev)
mddev->queue->limits.discard_granularity < stripe)
blk_queue_max_discard_sectors(mddev->queue, 0);
- blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
+ /*
+ * Requests require having a bitmap for each stripe.
+ * Limit the max sectors based on this.
+ */
+ blk_queue_max_hw_sectors(mddev->queue,
+ RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf));
+
+ /* No restrictions on the number of segments in the request */
+ blk_queue_max_segments(mddev->queue, USHRT_MAX);
}
if (log_init(conf, journal_dev, raid5_has_ppl(conf)))
@@ -8066,8 +8301,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* find the disk ... but prefer rdev->saved_raid_disk
* if possible.
*/
- if (rdev->saved_raid_disk >= 0 &&
- rdev->saved_raid_disk >= first &&
+ if (rdev->saved_raid_disk >= first &&
rdev->saved_raid_disk <= last &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
first = rdev->saved_raid_disk;
@@ -8704,8 +8938,11 @@ static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf)
err = log_init(conf, NULL, true);
if (!err) {
err = resize_stripes(conf, conf->pool_size);
- if (err)
+ if (err) {
+ mddev_suspend(mddev);
log_exit(conf);
+ mddev_resume(mddev);
+ }
}
} else
err = -EINVAL;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 638d29863503..a5082bed83c8 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -812,7 +812,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
struct stripe_head *sh);
extern struct stripe_head *
raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- int previous, int noblock, int noquiesce);
+ bool previous, bool noblock, bool noquiesce);
extern int raid5_calc_degraded(struct r5conf *conf);
extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
#endif
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 0de7acdf58a7..f66ac14cffad 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2517,7 +2517,6 @@ static struct snd_soc_component_driver tda1997x_codec_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int tda1997x_probe(struct i2c_client *client,
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index cb48c5ff3dee..c93d2906e4c7 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -875,7 +875,7 @@ static int vcodec_domains_get(struct venus_core *core)
}
skip_pmdomains:
- if (!core->has_opp_table)
+ if (!core->res->opp_pmdomain)
return 0;
/* Attach the power domain for setting performance state */
@@ -1007,6 +1007,10 @@ static int core_get_v4(struct venus_core *core)
if (ret)
return ret;
+ ret = vcodec_domains_get(core);
+ if (ret)
+ return ret;
+
if (core->res->opp_pmdomain) {
ret = devm_pm_opp_of_add_table(dev);
if (!ret) {
@@ -1017,10 +1021,6 @@ static int core_get_v4(struct venus_core *core)
}
}
- ret = vcodec_domains_get(core);
- if (ret)
- return ret;
-
return 0;
}
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 908f8d5392b2..85bc936c02f9 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1395,15 +1395,14 @@ err_msg:
static int tegra_emc_opp_table_init(struct tegra_emc *emc)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
- struct opp_table *hw_opp_table;
- int err;
+ int opp_token, err;
- hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
- err = PTR_ERR_OR_ZERO(hw_opp_table);
- if (err) {
+ err = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ if (err < 0) {
dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
return err;
}
+ opp_token = err;
err = dev_pm_opp_of_add_table(emc->dev);
if (err) {
@@ -1430,7 +1429,7 @@ static int tegra_emc_opp_table_init(struct tegra_emc *emc)
remove_table:
dev_pm_opp_of_remove_table(emc->dev);
put_hw_table:
- dev_pm_opp_put_supported_hw(hw_opp_table);
+ dev_pm_opp_put_supported_hw(opp_token);
return err;
}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index ed9a683b3ca8..ba8414519515 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1341,17 +1341,17 @@ static int msb_ftl_initialize(struct msb_data *msb)
msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
msb->logical_block_count = msb->zone_count * 496 - 2;
- msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
- msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+ msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
+ msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
msb->lba_to_pba_table =
kmalloc_array(msb->logical_block_count, sizeof(u16),
GFP_KERNEL);
if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
!msb->erased_blocks_bitmap) {
- kfree(msb->used_blocks_bitmap);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
kfree(msb->lba_to_pba_table);
- kfree(msb->erased_blocks_bitmap);
return -ENOMEM;
}
@@ -1946,7 +1946,8 @@ static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
static void msb_data_clear(struct msb_data *msb)
{
kfree(msb->boot_page);
- kfree(msb->used_blocks_bitmap);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
kfree(msb->lba_to_pba_table);
kfree(msb->cache);
msb->card = NULL;
@@ -2243,8 +2244,8 @@ static int msb_resume(struct memstick_dev *card)
goto out;
if (msb->block_count != new_msb->block_count ||
- memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
- msb->block_count / 8))
+ !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
+ msb->block_count))
goto out;
card_dead = false;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 388675cc1765..62089a8caa2f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -101,7 +101,7 @@ static u8 mptspiInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for interna
* @target: per target private data
* @sdev: SCSI device
*
- * Update the target negotiation parameters based on the the Inquiry
+ * Update the target negotiation parameters based on the Inquiry
* data, adapter capabilities, and NVRAM settings.
**/
static void
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 3b59456f5545..abb58ab1a1a4 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -572,6 +572,7 @@ config LPC_ICH
tristate "Intel ICH LPC"
depends on PCI
select MFD_CORE
+ select P2SB if X86
help
The LPC bridge function of the Intel ICH provides support for
many functional units. This driver provides needed support for
@@ -1357,12 +1358,13 @@ config MFD_STA2X11
select REGMAP_MMIO
config MFD_SUN6I_PRCM
- bool "Allwinner A31 PRCM controller"
+ bool "Allwinner A31/A23/A33 PRCM controller"
depends on ARCH_SUNXI || COMPILE_TEST
select MFD_CORE
help
Support for the PRCM (Power/Reset/Clock Management) unit available
- in A31 SoC.
+ in the A31, A23, and A33 SoCs. Other Allwinner SoCs contain similar
+ hardware, but they do not use this driver.
config MFD_SYSCON
bool "System Controller Register R/W Based on Regmap"
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 56338f9dbd0b..4fb7e35eb5ed 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -596,12 +596,11 @@ static __init int asic3_gpio_probe(struct platform_device *pdev,
return gpiochip_add_data(&asic->gpio, asic);
}
-static int asic3_gpio_remove(struct platform_device *pdev)
+static void asic3_gpio_remove(struct platform_device *pdev)
{
struct asic3 *asic = platform_get_drvdata(pdev);
gpiochip_remove(&asic->gpio);
- return 0;
}
static void asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
@@ -1030,7 +1029,6 @@ static int __init asic3_probe(struct platform_device *pdev)
static int asic3_remove(struct platform_device *pdev)
{
- int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
@@ -1038,9 +1036,8 @@ static int asic3_remove(struct platform_device *pdev)
asic3_mfd_remove(pdev);
- ret = asic3_gpio_remove(pdev);
- if (ret < 0)
- return ret;
+ asic3_gpio_remove(pdev);
+
asic3_irq_remove(pdev);
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), 0);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 8161a5dc68e8..88a212a8168c 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -619,6 +619,9 @@ static const struct mfd_cell axp20x_cells[] = {
static const struct mfd_cell axp221_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp22x_pek_resources),
.resources = axp22x_pek_resources,
@@ -645,6 +648,9 @@ static const struct mfd_cell axp221_cells[] = {
static const struct mfd_cell axp223_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp22x_pek_resources),
.resources = axp22x_pek_resources,
@@ -785,6 +791,9 @@ static const struct mfd_cell axp806_cells[] = {
static const struct mfd_cell axp809_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp809_pek_resources),
.resources = axp809_pek_resources,
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 596731caf407..344ad03bdc42 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -65,6 +65,11 @@ static const struct cros_feature_to_name cros_mcu_devices[] = {
.desc = "System Control Processor",
},
{
+ .id = EC_FEATURE_SCP_C1,
+ .name = CROS_EC_DEV_SCP_C1_NAME,
+ .desc = "System Control Processor 2nd Core",
+ },
+ {
.id = EC_FEATURE_TOUCHPAD,
.name = CROS_EC_DEV_TP_NAME,
.desc = "Touchpad",
@@ -250,8 +255,8 @@ static int ec_device_probe(struct platform_device *pdev)
* The PCHG device cannot be detected by sending EC_FEATURE_GET_CMD, but
* it can be detected by querying the number of peripheral chargers.
*/
- retval = cros_ec_command(ec->ec_dev, 0, EC_CMD_PCHG_COUNT, NULL, 0,
- &pchg_count, sizeof(pchg_count));
+ retval = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_PCHG_COUNT, NULL, 0,
+ &pchg_count, sizeof(pchg_count));
if (retval >= 0 && pchg_count.port_count) {
retval = mfd_add_hotplug_devices(ec->dev,
cros_ec_pchg_cells,
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 56c61c99eb23..27a881da4d6e 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -798,7 +798,7 @@ void db8500_prcmu_get_abb_event_buffer(void __iomem **buf)
* @opp: The new ARM operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
- * This function sets the the operating point of the ARM.
+ * This function sets the operating point of the ARM.
*/
int db8500_prcmu_set_arm_opp(u8 opp)
{
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 852129ea0766..6cd0b0c752d6 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -91,11 +91,6 @@ struct dln2_mod_rx_slots {
spinlock_t lock;
};
-enum dln2_endpoint {
- DLN2_EP_OUT = 0,
- DLN2_EP_IN = 1,
-};
-
struct dln2_dev {
struct usb_device *usb_dev;
struct usb_interface *interface;
@@ -777,16 +772,12 @@ static int dln2_probe(struct usb_interface *interface,
int ret;
int i, j;
- if (hostif->desc.bInterfaceNumber != 0 ||
- hostif->desc.bNumEndpoints < 2)
+ if (hostif->desc.bInterfaceNumber != 0)
return -ENODEV;
- epout = &hostif->endpoint[DLN2_EP_OUT].desc;
- if (!usb_endpoint_is_bulk_out(epout))
- return -ENODEV;
- epin = &hostif->endpoint[DLN2_EP_IN].desc;
- if (!usb_endpoint_is_bulk_in(epin))
- return -ENODEV;
+ ret = usb_find_common_endpoints(hostif, &epin, &epout, NULL, NULL);
+ if (ret)
+ return ret;
dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
if (!dln2)
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index f7950d2197df..bb08b7a73fe1 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -385,6 +385,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x7afc), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afd), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afe), (kernel_ulong_t)&bxt_uart_info },
+ /* MTL-P */
+ { PCI_VDEVICE(INTEL, 0x7e25), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e26), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e27), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e30), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e46), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e50), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e51), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e52), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e78), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e79), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e7a), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e7b), (kernel_ulong_t)&bxt_i2c_info },
/* LKF */
{ PCI_VDEVICE(INTEL, 0x98a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x98a9), (kernel_ulong_t)&bxt_uart_info },
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index bc069c4daa60..8dac0d41f64f 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -2,10 +2,11 @@
/*
* MFD core driver for Intel Broxton Whiskey Cove PMIC
*
- * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015-2017, 2022 Intel Corporation. All rights reserved.
*/
#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -18,9 +19,9 @@
#include <asm/intel_scu_ipc.h>
/* PMIC device registers */
-#define REG_ADDR_MASK 0xFF00
+#define REG_ADDR_MASK GENMASK(15, 8)
#define REG_ADDR_SHIFT 8
-#define REG_OFFSET_MASK 0xFF
+#define REG_OFFSET_MASK GENMASK(7, 0)
/* Interrupt Status Registers */
#define BXTWC_IRQLVL1 0x4E02
@@ -112,29 +113,29 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = {
};
static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = {
- REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01),
+ REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, BIT(0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = {
- REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 0, 0x1f),
+ REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 0, GENMASK(4, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_adc[] = {
- REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 0, 0xff),
+ REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 0, GENMASK(7, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = {
- REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20),
- REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f),
- REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f),
+ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)),
+ REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, GENMASK(4, 0)),
+ REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, GENMASK(4, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_tmu[] = {
- REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, 0x06),
+ REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, GENMASK(2, 1)),
};
static const struct regmap_irq bxtwc_regmap_irqs_crit[] = {
- REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, 0x03),
+ REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, GENMASK(1, 0)),
};
static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
@@ -333,17 +334,19 @@ static unsigned long bxtwc_reg_addr;
static ssize_t addr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%lx\n", bxtwc_reg_addr);
+ return sysfs_emit(buf, "0x%lx\n", bxtwc_reg_addr);
}
static ssize_t addr_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- if (kstrtoul(buf, 0, &bxtwc_reg_addr)) {
- dev_err(dev, "Invalid register address\n");
- return -EINVAL;
- }
- return (ssize_t)count;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &bxtwc_reg_addr);
+ if (ret)
+ return ret;
+
+ return count;
}
static ssize_t val_show(struct device *dev,
@@ -354,12 +357,12 @@ static ssize_t val_show(struct device *dev,
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
ret = regmap_read(pmic->regmap, bxtwc_reg_addr, &val);
- if (ret < 0) {
+ if (ret) {
dev_err(dev, "Failed to read 0x%lx\n", bxtwc_reg_addr);
- return -EIO;
+ return ret;
}
- return sprintf(buf, "0x%02x\n", val);
+ return sysfs_emit(buf, "0x%02x\n", val);
}
static ssize_t val_store(struct device *dev,
@@ -377,7 +380,7 @@ static ssize_t val_store(struct device *dev,
if (ret) {
dev_err(dev, "Failed to write value 0x%02x to address 0x%lx",
val, bxtwc_reg_addr);
- return -EIO;
+ return ret;
}
return count;
}
@@ -394,6 +397,11 @@ static const struct attribute_group bxtwc_group = {
.attrs = bxtwc_attrs,
};
+static const struct attribute_group *bxtwc_groups[] = {
+ &bxtwc_group,
+ NULL
+};
+
static const struct regmap_config bxtwc_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
@@ -410,12 +418,9 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic,
int irq;
irq = regmap_irq_get_virq(pdata, pirq);
- if (irq < 0) {
- dev_err(pmic->dev,
- "Failed to get parent vIRQ(%d) for chip %s, ret:%d\n",
- pirq, chip->name, irq);
- return irq;
- }
+ if (irq < 0)
+ return dev_err_probe(pmic->dev, irq, "Failed to get parent vIRQ(%d) for chip %s\n",
+ pirq, chip->name);
return devm_regmap_add_irq_chip(pmic->dev, pmic->regmap, irq, irq_flags,
0, chip, data);
@@ -423,25 +428,19 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic,
static int bxtwc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
- acpi_handle handle;
acpi_status status;
unsigned long long hrv;
struct intel_soc_pmic *pmic;
- handle = ACPI_HANDLE(&pdev->dev);
- status = acpi_evaluate_integer(handle, "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status)) {
- dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
- return -ENODEV;
- }
- if (hrv != BROXTON_PMIC_WC_HRV) {
- dev_err(&pdev->dev, "Invalid PMIC hardware revision: %llu\n",
- hrv);
- return -ENODEV;
- }
+ status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv);
+ if (ACPI_FAILURE(status))
+ return dev_err_probe(dev, -ENODEV, "Failed to get PMIC hardware revision\n");
+ if (hrv != BROXTON_PMIC_WC_HRV)
+ return dev_err_probe(dev, -ENODEV, "Invalid PMIC hardware revision: %llu\n", hrv);
- pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
return -ENOMEM;
@@ -450,49 +449,39 @@ static int bxtwc_probe(struct platform_device *pdev)
return ret;
pmic->irq = ret;
- dev_set_drvdata(&pdev->dev, pmic);
- pmic->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pmic);
+ pmic->dev = dev;
- pmic->scu = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ pmic->scu = devm_intel_scu_ipc_dev_get(dev);
if (!pmic->scu)
return -EPROBE_DEFER;
- pmic->regmap = devm_regmap_init(&pdev->dev, NULL, pmic,
- &bxtwc_regmap_config);
- if (IS_ERR(pmic->regmap)) {
- ret = PTR_ERR(pmic->regmap);
- dev_err(&pdev->dev, "Failed to initialise regmap: %d\n", ret);
- return ret;
- }
+ pmic->regmap = devm_regmap_init(dev, NULL, pmic, &bxtwc_regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return dev_err_probe(dev, PTR_ERR(pmic->regmap), "Failed to initialise regmap\n");
- ret = devm_regmap_add_irq_chip(&pdev->dev, pmic->regmap, pmic->irq,
+ ret = devm_regmap_add_irq_chip(dev, pmic->regmap, pmic->irq,
IRQF_ONESHOT | IRQF_SHARED,
0, &bxtwc_regmap_irq_chip,
&pmic->irq_chip_data);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_PWRBTN_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_pwrbtn,
&pmic->irq_chip_data_pwrbtn);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add PWRBTN IRQ chip\n");
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_TMU_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_tmu,
&pmic->irq_chip_data_tmu);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add TMU IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add TMU IRQ chip\n");
/* Add chained IRQ handler for BCU IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -500,12 +489,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_bcu,
&pmic->irq_chip_data_bcu);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add BUC IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add BUC IRQ chip\n");
/* Add chained IRQ handler for ADC IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -513,12 +498,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_adc,
&pmic->irq_chip_data_adc);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add ADC IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add ADC IRQ chip\n");
/* Add chained IRQ handler for CHGR IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -526,12 +507,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_chgr,
&pmic->irq_chip_data_chgr);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add CHGR IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add CHGR IRQ chip\n");
/* Add chained IRQ handler for CRIT IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -539,54 +516,33 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_crit,
&pmic->irq_chip_data_crit);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add CRIT IRQ chip\n");
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add CRIT IRQ chip\n");
- return ret;
- }
-
- ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, bxt_wc_dev,
- ARRAY_SIZE(bxt_wc_dev), NULL, 0, NULL);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add devices\n");
- return ret;
- }
-
- ret = sysfs_create_group(&pdev->dev.kobj, &bxtwc_group);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create sysfs group %d\n", ret);
- return ret;
- }
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, bxt_wc_dev, ARRAY_SIZE(bxt_wc_dev),
+ NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add devices\n");
/*
- * There is known hw bug. Upon reset BIT 5 of register
+ * There is a known H/W bug. Upon reset, BIT 5 of register
* BXTWC_CHGR_LVL1_IRQ is 0 which is the expected value. However,
* later it's set to 1(masked) automatically by hardware. So we
- * have the software workaround here to unmaksed it in order to let
- * charger interrutp work.
+ * place the software workaround here to unmask it again in order
+ * to re-enable the charger interrupt.
*/
- regmap_update_bits(pmic->regmap, BXTWC_MIRQLVL1,
- BXTWC_MIRQLVL1_MCHGR, 0);
-
- return 0;
-}
-
-static int bxtwc_remove(struct platform_device *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &bxtwc_group);
+ regmap_update_bits(pmic->regmap, BXTWC_MIRQLVL1, BXTWC_MIRQLVL1_MCHGR, 0);
return 0;
}
static void bxtwc_shutdown(struct platform_device *pdev)
{
- struct intel_soc_pmic *pmic = dev_get_drvdata(&pdev->dev);
+ struct intel_soc_pmic *pmic = platform_get_drvdata(pdev);
disable_irq(pmic->irq);
}
-#ifdef CONFIG_PM_SLEEP
static int bxtwc_suspend(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -603,8 +559,8 @@ static int bxtwc_resume(struct device *dev)
enable_irq(pmic->irq);
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(bxtwc_pm_ops, bxtwc_suspend, bxtwc_resume);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bxtwc_pm_ops, bxtwc_suspend, bxtwc_resume);
static const struct acpi_device_id bxtwc_acpi_ids[] = {
{ "INT34D3", },
@@ -614,16 +570,16 @@ MODULE_DEVICE_TABLE(acpi, bxtwc_acpi_ids);
static struct platform_driver bxtwc_driver = {
.probe = bxtwc_probe,
- .remove = bxtwc_remove,
.shutdown = bxtwc_shutdown,
.driver = {
.name = "BXTWC PMIC",
- .pm = &bxtwc_pm_ops,
- .acpi_match_table = ACPI_PTR(bxtwc_acpi_ids),
+ .pm = pm_sleep_ptr(&bxtwc_pm_ops),
+ .acpi_match_table = bxtwc_acpi_ids,
+ .dev_groups = bxtwc_groups,
},
};
module_platform_driver(bxtwc_driver);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Qipeng Zha<qipeng.zha@intel.com>");
+MODULE_AUTHOR("Qipeng Zha <qipeng.zha@intel.com>");
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index 4eab191e053a..9216f0d34206 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -179,18 +179,13 @@ static int cht_wc_probe(struct i2c_client *client)
int ret;
status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "Failed to get PMIC hardware revision\n");
- return -ENODEV;
- }
- if (hrv != CHT_WC_HRV) {
- dev_err(dev, "Invalid PMIC hardware revision: %llu\n", hrv);
- return -ENODEV;
- }
- if (client->irq < 0) {
- dev_err(dev, "Invalid IRQ\n");
- return -EINVAL;
- }
+ if (ACPI_FAILURE(status))
+ return dev_err_probe(dev, -ENODEV, "Failed to get PMIC hardware revision\n");
+ if (hrv != CHT_WC_HRV)
+ return dev_err_probe(dev, -ENODEV, "Invalid PMIC hardware revision: %llu\n", hrv);
+
+ if (client->irq < 0)
+ return dev_err_probe(dev, -EINVAL, "Invalid IRQ\n");
pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
@@ -227,7 +222,7 @@ static void cht_wc_shutdown(struct i2c_client *client)
disable_irq(pmic->irq);
}
-static int __maybe_unused cht_wc_suspend(struct device *dev)
+static int cht_wc_suspend(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -236,7 +231,7 @@ static int __maybe_unused cht_wc_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused cht_wc_resume(struct device *dev)
+static int cht_wc_resume(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -244,7 +239,7 @@ static int __maybe_unused cht_wc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(cht_wc_pm_ops, cht_wc_suspend, cht_wc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(cht_wc_pm_ops, cht_wc_suspend, cht_wc_resume);
static const struct i2c_device_id cht_wc_i2c_id[] = {
{ }
@@ -258,7 +253,7 @@ static const struct acpi_device_id cht_wc_acpi_ids[] = {
static struct i2c_driver cht_wc_driver = {
.driver = {
.name = "CHT Whiskey Cove PMIC",
- .pm = &cht_wc_pm_ops,
+ .pm = pm_sleep_ptr(&cht_wc_pm_ops),
.acpi_match_table = cht_wc_acpi_ids,
},
.probe_new = cht_wc_probe,
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 9ffab9aafd81..650951f89f1c 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -8,7 +8,8 @@
* Configuration Registers.
*
* This driver is derived from lpc_sch.
-
+ *
+ * Copyright (c) 2017, 2021-2022 Intel Corporation
* Copyright (c) 2011 Extreme Engineering Solution, Inc.
* Author: Aaron Sierra <asierra@xes-inc.com>
*
@@ -42,9 +43,11 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/pinctrl/pinctrl.h>
#include <linux/mfd/core.h>
#include <linux/mfd/lpc_ich.h>
#include <linux/platform_data/itco_wdt.h>
+#include <linux/platform_data/x86/p2sb.h>
#define ACPIBASE 0x40
#define ACPIBASE_GPE_OFF 0x28
@@ -71,8 +74,6 @@
#define BCR 0xdc
#define BCR_WPD BIT(0)
-#define SPIBASE_APL_SZ 4096
-
#define GPIOBASE_ICH0 0x58
#define GPIOCTRL_ICH0 0x5C
#define GPIOBASE_ICH6 0x48
@@ -143,6 +144,73 @@ static struct mfd_cell lpc_ich_gpio_cell = {
.ignore_resource_conflicts = true,
};
+#define APL_GPIO_NORTH 0
+#define APL_GPIO_NORTHWEST 1
+#define APL_GPIO_WEST 2
+#define APL_GPIO_SOUTHWEST 3
+#define APL_GPIO_NR_DEVICES 4
+
+/* Offset data for Apollo Lake GPIO controllers */
+static resource_size_t apl_gpio_offsets[APL_GPIO_NR_DEVICES] = {
+ [APL_GPIO_NORTH] = 0xc50000,
+ [APL_GPIO_NORTHWEST] = 0xc40000,
+ [APL_GPIO_WEST] = 0xc70000,
+ [APL_GPIO_SOUTHWEST] = 0xc00000,
+};
+
+#define APL_GPIO_RESOURCE_SIZE 0x1000
+
+#define APL_GPIO_IRQ 14
+
+static struct resource apl_gpio_resources[APL_GPIO_NR_DEVICES][2] = {
+ [APL_GPIO_NORTH] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+ [APL_GPIO_NORTHWEST] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+ [APL_GPIO_WEST] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+ [APL_GPIO_SOUTHWEST] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+};
+
+static const struct mfd_cell apl_gpio_devices[APL_GPIO_NR_DEVICES] = {
+ [APL_GPIO_NORTH] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_NORTH,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_NORTH]),
+ .resources = apl_gpio_resources[APL_GPIO_NORTH],
+ .ignore_resource_conflicts = true,
+ },
+ [APL_GPIO_NORTHWEST] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_NORTHWEST,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_NORTHWEST]),
+ .resources = apl_gpio_resources[APL_GPIO_NORTHWEST],
+ .ignore_resource_conflicts = true,
+ },
+ [APL_GPIO_WEST] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_WEST,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_WEST]),
+ .resources = apl_gpio_resources[APL_GPIO_WEST],
+ .ignore_resource_conflicts = true,
+ },
+ [APL_GPIO_SOUTHWEST] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_SOUTHWEST,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_SOUTHWEST]),
+ .resources = apl_gpio_resources[APL_GPIO_SOUTHWEST],
+ .ignore_resource_conflicts = true,
+ },
+};
static struct mfd_cell lpc_ich_spi_cell = {
.name = "intel-spi",
@@ -1086,6 +1154,34 @@ wdt_done:
return ret;
}
+static int lpc_ich_init_pinctrl(struct pci_dev *dev)
+{
+ struct resource base;
+ unsigned int i;
+ int ret;
+
+ /* Check, if GPIO has been exported as an ACPI device */
+ if (acpi_dev_present("INT3452", NULL, -1))
+ return -EEXIST;
+
+ ret = p2sb_bar(dev->bus, 0, &base);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(apl_gpio_devices); i++) {
+ struct resource *mem = &apl_gpio_resources[i][0];
+ resource_size_t offset = apl_gpio_offsets[i];
+
+ /* Fill MEM resource */
+ mem->start = base.start + offset;
+ mem->end = base.start + offset + APL_GPIO_RESOURCE_SIZE - 1;
+ mem->flags = base.flags;
+ }
+
+ return mfd_add_devices(&dev->dev, 0, apl_gpio_devices,
+ ARRAY_SIZE(apl_gpio_devices), NULL, 0, NULL);
+}
+
static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data)
{
u32 val;
@@ -1100,35 +1196,32 @@ static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data)
return val & BYT_BCR_WPD;
}
-static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data)
+static bool lpc_ich_set_writeable(struct pci_bus *bus, unsigned int devfn)
{
- struct pci_dev *pdev = data;
u32 bcr;
- pci_read_config_dword(pdev, BCR, &bcr);
+ pci_bus_read_config_dword(bus, devfn, BCR, &bcr);
if (!(bcr & BCR_WPD)) {
bcr |= BCR_WPD;
- pci_write_config_dword(pdev, BCR, bcr);
- pci_read_config_dword(pdev, BCR, &bcr);
+ pci_bus_write_config_dword(bus, devfn, BCR, bcr);
+ pci_bus_read_config_dword(bus, devfn, BCR, &bcr);
}
return bcr & BCR_WPD;
}
-static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data)
+static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data)
{
- unsigned int spi = PCI_DEVFN(13, 2);
- struct pci_bus *bus = data;
- u32 bcr;
+ struct pci_dev *pdev = data;
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_bus_write_config_dword(bus, spi, BCR, bcr);
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- }
+ return lpc_ich_set_writeable(pdev->bus, pdev->devfn);
+}
- return bcr & BCR_WPD;
+static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data)
+{
+ struct pci_dev *pdev = data;
+
+ return lpc_ich_set_writeable(pdev->bus, PCI_DEVFN(13, 2));
}
static int lpc_ich_init_spi(struct pci_dev *dev)
@@ -1137,6 +1230,7 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
struct resource *res = &intel_spi_res[0];
struct intel_spi_boardinfo *info;
u32 spi_base, rcba;
+ int ret;
info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1167,30 +1261,19 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
}
break;
- case INTEL_SPI_BXT: {
- unsigned int p2sb = PCI_DEVFN(13, 0);
- unsigned int spi = PCI_DEVFN(13, 2);
- struct pci_bus *bus = dev->bus;
-
+ case INTEL_SPI_BXT:
/*
* The P2SB is hidden by BIOS and we need to unhide it in
* order to read BAR of the SPI flash device. Once that is
* done we hide it again.
*/
- pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x0);
- pci_bus_read_config_dword(bus, spi, PCI_BASE_ADDRESS_0,
- &spi_base);
- if (spi_base != ~0) {
- res->start = spi_base & 0xfffffff0;
- res->end = res->start + SPIBASE_APL_SZ - 1;
-
- info->set_writeable = lpc_ich_bxt_set_writeable;
- info->data = bus;
- }
+ ret = p2sb_bar(dev->bus, PCI_DEVFN(13, 2), res);
+ if (ret)
+ return ret;
- pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1);
+ info->set_writeable = lpc_ich_bxt_set_writeable;
+ info->data = dev;
break;
- }
default:
return -EINVAL;
@@ -1249,6 +1332,12 @@ static int lpc_ich_probe(struct pci_dev *dev,
cell_added = true;
}
+ if (priv->chipset == LPC_APL) {
+ ret = lpc_ich_init_pinctrl(dev);
+ if (!ret)
+ cell_added = true;
+ }
+
if (lpc_chipset_info[priv->chipset].spi_type) {
ret = lpc_ich_init_spi(dev);
if (!ret)
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index fec2096474ad..a6661e07035b 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -419,9 +419,11 @@ static int max77620_initialise_fps(struct max77620_chip *chip)
ret = max77620_config_fps(chip, fps_child);
if (ret < 0) {
of_node_put(fps_child);
+ of_node_put(fps_np);
return ret;
}
}
+ of_node_put(fps_np);
config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
diff --git a/drivers/mfd/max77714.c b/drivers/mfd/max77714.c
index d1e4247800d2..143a432ea343 100644
--- a/drivers/mfd/max77714.c
+++ b/drivers/mfd/max77714.c
@@ -3,7 +3,7 @@
* Maxim MAX77714 Core Driver
*
* Copyright (C) 2022 Luca Ceresoli
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/i2c.h>
@@ -148,5 +148,5 @@ static struct i2c_driver max77714_driver = {
module_i2c_driver(max77714_driver);
MODULE_DESCRIPTION("Maxim MAX77714 MFD core driver");
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
index ea5e452510eb..389756436af6 100644
--- a/drivers/mfd/mt6358-irq.c
+++ b/drivers/mfd/mt6358-irq.c
@@ -3,6 +3,8 @@
// Copyright (c) 2020 MediaTek Inc.
#include <linux/interrupt.h>
+#include <linux/mfd/mt6357/core.h>
+#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6359/core.h>
@@ -17,6 +19,17 @@
#define MTK_PMIC_REG_WIDTH 16
+static const struct irq_top_t mt6357_ints[] = {
+ MT6357_TOP_GEN(BUCK),
+ MT6357_TOP_GEN(LDO),
+ MT6357_TOP_GEN(PSC),
+ MT6357_TOP_GEN(SCK),
+ MT6357_TOP_GEN(BM),
+ MT6357_TOP_GEN(HK),
+ MT6357_TOP_GEN(AUD),
+ MT6357_TOP_GEN(MISC),
+};
+
static const struct irq_top_t mt6358_ints[] = {
MT6358_TOP_GEN(BUCK),
MT6358_TOP_GEN(LDO),
@@ -39,6 +52,13 @@ static const struct irq_top_t mt6359_ints[] = {
MT6359_TOP_GEN(MISC),
};
+static struct pmic_irq_data mt6357_irqd = {
+ .num_top = ARRAY_SIZE(mt6357_ints),
+ .num_pmic_irqs = MT6357_IRQ_NR,
+ .top_int_status_reg = MT6357_TOP_INT_STATUS0,
+ .pmic_ints = mt6357_ints,
+};
+
static struct pmic_irq_data mt6358_irqd = {
.num_top = ARRAY_SIZE(mt6358_ints),
.num_pmic_irqs = MT6358_IRQ_NR,
@@ -211,6 +231,10 @@ int mt6358_irq_init(struct mt6397_chip *chip)
struct pmic_irq_data *irqd;
switch (chip->chip_id) {
+ case MT6357_CHIP_ID:
+ chip->irq_data = &mt6357_irqd;
+ break;
+
case MT6358_CHIP_ID:
case MT6366_CHIP_ID:
chip->irq_data = &mt6358_irqd;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 1a368ad08f58..f6c1f80f94a4 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -12,10 +12,14 @@
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mt6323/core.h>
+#include <linux/mfd/mt6331/core.h>
+#include <linux/mfd/mt6357/core.h>
#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6359/core.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6331/registers.h>
+#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/registers.h>
@@ -23,6 +27,12 @@
#define MT6323_RTC_BASE 0x8000
#define MT6323_RTC_SIZE 0x40
+#define MT6357_RTC_BASE 0x0588
+#define MT6357_RTC_SIZE 0x3c
+
+#define MT6331_RTC_BASE 0x4000
+#define MT6331_RTC_SIZE 0x40
+
#define MT6358_RTC_BASE 0x0588
#define MT6358_RTC_SIZE 0x3c
@@ -37,6 +47,16 @@ static const struct resource mt6323_rtc_resources[] = {
DEFINE_RES_IRQ(MT6323_IRQ_STATUS_RTC),
};
+static const struct resource mt6357_rtc_resources[] = {
+ DEFINE_RES_MEM(MT6357_RTC_BASE, MT6357_RTC_SIZE),
+ DEFINE_RES_IRQ(MT6357_IRQ_RTC),
+};
+
+static const struct resource mt6331_rtc_resources[] = {
+ DEFINE_RES_MEM(MT6331_RTC_BASE, MT6331_RTC_SIZE),
+ DEFINE_RES_IRQ(MT6331_IRQ_STATUS_RTC),
+};
+
static const struct resource mt6358_rtc_resources[] = {
DEFINE_RES_MEM(MT6358_RTC_BASE, MT6358_RTC_SIZE),
DEFINE_RES_IRQ(MT6358_IRQ_RTC),
@@ -66,6 +86,18 @@ static const struct resource mt6323_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_FCHRKEY, "homekey"),
};
+static const struct resource mt6357_keys_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_HOMEKEY, "homekey"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_PWRKEY_R, "powerkey_r"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_HOMEKEY_R, "homekey_r"),
+};
+
+static const struct resource mt6331_keys_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6331_IRQ_STATUS_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6331_IRQ_STATUS_HOMEKEY, "homekey"),
+};
+
static const struct resource mt6397_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6397_IRQ_PWRKEY, "powerkey"),
DEFINE_RES_IRQ_NAMED(MT6397_IRQ_HOMEKEY, "homekey"),
@@ -100,6 +132,43 @@ static const struct mfd_cell mt6323_devs[] = {
},
};
+static const struct mfd_cell mt6357_devs[] = {
+ {
+ .name = "mt6357-regulator",
+ }, {
+ .name = "mt6357-rtc",
+ .num_resources = ARRAY_SIZE(mt6357_rtc_resources),
+ .resources = mt6357_rtc_resources,
+ .of_compatible = "mediatek,mt6357-rtc",
+ }, {
+ .name = "mtk-pmic-keys",
+ .num_resources = ARRAY_SIZE(mt6357_keys_resources),
+ .resources = mt6357_keys_resources,
+ .of_compatible = "mediatek,mt6357-keys"
+ },
+};
+
+/* MT6331 is always used in combination with MT6332 */
+static const struct mfd_cell mt6331_mt6332_devs[] = {
+ {
+ .name = "mt6331-rtc",
+ .num_resources = ARRAY_SIZE(mt6331_rtc_resources),
+ .resources = mt6331_rtc_resources,
+ .of_compatible = "mediatek,mt6331-rtc",
+ }, {
+ .name = "mt6331-regulator",
+ .of_compatible = "mediatek,mt6331-regulator"
+ }, {
+ .name = "mt6332-regulator",
+ .of_compatible = "mediatek,mt6332-regulator"
+ }, {
+ .name = "mtk-pmic-keys",
+ .num_resources = ARRAY_SIZE(mt6331_keys_resources),
+ .resources = mt6331_keys_resources,
+ .of_compatible = "mediatek,mt6331-keys"
+ },
+};
+
static const struct mfd_cell mt6358_devs[] = {
{
.name = "mt6358-regulator",
@@ -179,6 +248,22 @@ static const struct chip_data mt6323_core = {
.irq_init = mt6397_irq_init,
};
+static const struct chip_data mt6357_core = {
+ .cid_addr = MT6357_SWCID,
+ .cid_shift = 8,
+ .cells = mt6357_devs,
+ .cell_size = ARRAY_SIZE(mt6357_devs),
+ .irq_init = mt6358_irq_init,
+};
+
+static const struct chip_data mt6331_mt6332_core = {
+ .cid_addr = MT6331_HWCID,
+ .cid_shift = 0,
+ .cells = mt6331_mt6332_devs,
+ .cell_size = ARRAY_SIZE(mt6331_mt6332_devs),
+ .irq_init = mt6397_irq_init,
+};
+
static const struct chip_data mt6358_core = {
.cid_addr = MT6358_SWCID,
.cid_shift = 8,
@@ -262,6 +347,12 @@ static const struct of_device_id mt6397_of_match[] = {
.compatible = "mediatek,mt6323",
.data = &mt6323_core,
}, {
+ .compatible = "mediatek,mt6331",
+ .data = &mt6331_mt6332_core,
+ }, {
+ .compatible = "mediatek,mt6357",
+ .data = &mt6357_core,
+ }, {
.compatible = "mediatek,mt6358",
.data = &mt6358_core,
}, {
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
index 2924919da991..eff53fed8fe7 100644
--- a/drivers/mfd/mt6397-irq.c
+++ b/drivers/mfd/mt6397-irq.c
@@ -12,6 +12,8 @@
#include <linux/suspend.h>
#include <linux/mfd/mt6323/core.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6331/core.h>
+#include <linux/mfd/mt6331/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/registers.h>
@@ -172,7 +174,12 @@ int mt6397_irq_init(struct mt6397_chip *chip)
chip->int_status[0] = MT6323_INT_STATUS0;
chip->int_status[1] = MT6323_INT_STATUS1;
break;
-
+ case MT6331_CHIP_ID:
+ chip->int_con[0] = MT6331_INT_CON0;
+ chip->int_con[1] = MT6331_INT_CON1;
+ chip->int_status[0] = MT6331_INT_STATUS_CON0;
+ chip->int_status[1] = MT6331_INT_STATUS_CON1;
+ break;
case MT6391_CHIP_ID:
case MT6397_CHIP_ID:
chip->int_con[0] = MT6397_INT_CON0;
diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
index c472d7f8103c..4b8ff947762f 100644
--- a/drivers/mfd/qcom-pm8008.c
+++ b/drivers/mfd/qcom-pm8008.c
@@ -54,13 +54,6 @@ enum {
#define PM8008_PERIPH_OFFSET(paddr) (paddr - PM8008_PERIPH_0_BASE)
-struct pm8008_data {
- struct device *dev;
- struct regmap *regmap;
- int irq;
- struct regmap_irq_chip_data *irq_data;
-};
-
static unsigned int p0_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_0_BASE)};
static unsigned int p1_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_1_BASE)};
static unsigned int p2_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_2_BASE)};
@@ -150,7 +143,7 @@ static struct regmap_config qcom_mfd_regmap_cfg = {
.max_register = 0xFFFF,
};
-static int pm8008_init(struct pm8008_data *chip)
+static int pm8008_init(struct regmap *regmap)
{
int rc;
@@ -160,34 +153,31 @@ static int pm8008_init(struct pm8008_data *chip)
* This is required to enable the writing of TYPE registers in
* regmap_irq_sync_unlock().
*/
- rc = regmap_write(chip->regmap,
- (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET),
- BIT(0));
+ rc = regmap_write(regmap, (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
if (rc)
return rc;
/* Do the same for GPIO1 and GPIO2 peripherals */
- rc = regmap_write(chip->regmap,
- (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
+ rc = regmap_write(regmap, (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
if (rc)
return rc;
- rc = regmap_write(chip->regmap,
- (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
+ rc = regmap_write(regmap, (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
return rc;
}
-static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
+static int pm8008_probe_irq_peripherals(struct device *dev,
+ struct regmap *regmap,
int client_irq)
{
int rc, i;
struct regmap_irq_type *type;
struct regmap_irq_chip_data *irq_data;
- rc = pm8008_init(chip);
+ rc = pm8008_init(regmap);
if (rc) {
- dev_err(chip->dev, "Init failed: %d\n", rc);
+ dev_err(dev, "Init failed: %d\n", rc);
return rc;
}
@@ -207,10 +197,10 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW);
}
- rc = devm_regmap_add_irq_chip(chip->dev, chip->regmap, client_irq,
+ rc = devm_regmap_add_irq_chip(dev, regmap, client_irq,
IRQF_SHARED, 0, &pm8008_irq_chip, &irq_data);
if (rc) {
- dev_err(chip->dev, "Failed to add IRQ chip: %d\n", rc);
+ dev_err(dev, "Failed to add IRQ chip: %d\n", rc);
return rc;
}
@@ -220,26 +210,23 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
static int pm8008_probe(struct i2c_client *client)
{
int rc;
- struct pm8008_data *chip;
-
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
+ struct device *dev;
+ struct regmap *regmap;
- chip->dev = &client->dev;
- chip->regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
- if (!chip->regmap)
+ dev = &client->dev;
+ regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
+ if (!regmap)
return -ENODEV;
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, regmap);
- if (of_property_read_bool(chip->dev->of_node, "interrupt-controller")) {
- rc = pm8008_probe_irq_peripherals(chip, client->irq);
+ if (of_property_read_bool(dev->of_node, "interrupt-controller")) {
+ rc = pm8008_probe_irq_peripherals(dev, regmap, client->irq);
if (rc)
- dev_err(chip->dev, "Failed to probe irq periphs: %d\n", rc);
+ dev_err(dev, "Failed to probe irq periphs: %d\n", rc);
}
- return devm_of_platform_populate(chip->dev);
+ return devm_of_platform_populate(dev);
}
static const struct of_device_id pm8008_match[] = {
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 191fdb87c424..bdb2ce7ff03b 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -101,8 +101,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
}
}
- syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", np,
- (u64)res.start);
+ syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start);
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 5369c67e3280..663ffd4b8570 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -397,11 +397,8 @@ err_noirq:
static int t7l66xb_remove(struct platform_device *dev)
{
- struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- int ret;
- ret = pdata->disable(dev);
clk_disable_unprepare(t7l66xb->clk48m);
clk_put(t7l66xb->clk48m);
clk_disable_unprepare(t7l66xb->clk32k);
@@ -412,8 +409,7 @@ static int t7l66xb_remove(struct platform_device *dev)
mfd_remove_devices(&dev->dev);
kfree(t7l66xb);
- return ret;
-
+ return 0;
}
static struct platform_driver t7l66xb_platform_driver = {
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 0be5731685b4..aa903a31dd43 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -798,20 +798,19 @@ static int tc6393xb_remove(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
- int ret;
mfd_remove_devices(&dev->dev);
tc6393xb_detach_irq(dev);
- ret = tcpd->disable(dev);
+ tcpd->disable(dev);
clk_disable_unprepare(tc6393xb->clk);
iounmap(tc6393xb->scr);
release_resource(&tc6393xb->rscr);
clk_put(tc6393xb->clk);
kfree(tc6393xb);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index bd6659cf3bc0..2cb9326f3e61 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -656,309 +656,6 @@ static inline struct device *add_child(unsigned mod_no, const char *name,
can_wakeup, irq0, irq1);
}
-static struct device *
-add_regulator_linked(int num, struct regulator_init_data *pdata,
- struct regulator_consumer_supply *consumers,
- unsigned num_consumers, unsigned long features)
-{
- struct twl_regulator_driver_data drv_data;
-
- /* regulator framework demands init_data ... */
- if (!pdata)
- return NULL;
-
- if (consumers) {
- pdata->consumer_supplies = consumers;
- pdata->num_consumer_supplies = num_consumers;
- }
-
- if (pdata->driver_data) {
- /* If we have existing drv_data, just add the flags */
- struct twl_regulator_driver_data *tmp;
- tmp = pdata->driver_data;
- tmp->features |= features;
- } else {
- /* add new driver data struct, used only during init */
- drv_data.features = features;
- drv_data.set_voltage = NULL;
- drv_data.get_voltage = NULL;
- drv_data.data = NULL;
- pdata->driver_data = &drv_data;
- }
-
- /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */
- return add_numbered_child(TWL_MODULE_PM_MASTER, "twl_reg", num,
- pdata, sizeof(*pdata), false, 0, 0);
-}
-
-static struct device *
-add_regulator(int num, struct regulator_init_data *pdata,
- unsigned long features)
-{
- return add_regulator_linked(num, pdata, NULL, 0, features);
-}
-
-/*
- * NOTE: We know the first 8 IRQs after pdata->base_irq are
- * for the PIH, and the next are for the PWR_INT SIH, since
- * that's how twl_init_irq() sets things up.
- */
-
-static int
-add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
- unsigned long features)
-{
- struct device *child;
-
- if (IS_ENABLED(CONFIG_GPIO_TWL4030) && pdata->gpio) {
- child = add_child(TWL4030_MODULE_GPIO, "twl4030_gpio",
- pdata->gpio, sizeof(*pdata->gpio),
- false, irq_base + GPIO_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_KEYBOARD_TWL4030) && pdata->keypad) {
- child = add_child(TWL4030_MODULE_KEYPAD, "twl4030_keypad",
- pdata->keypad, sizeof(*pdata->keypad),
- true, irq_base + KEYPAD_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc &&
- twl_class_is_4030()) {
- child = add_child(TWL4030_MODULE_MADC, "twl4030_madc",
- pdata->madc, sizeof(*pdata->madc),
- true, irq_base + MADC_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_RTC_DRV_TWL4030)) {
- /*
- * REVISIT platform_data here currently might expose the
- * "msecure" line ... but for now we just expect board
- * setup to tell the chip "it's always ok to SET_TIME".
- * Eventually, Linux might become more aware of such
- * HW security concerns, and "least privilege".
- */
- child = add_child(TWL_MODULE_RTC, "twl_rtc", NULL, 0,
- true, irq_base + RTC_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_PWM_TWL)) {
- child = add_child(TWL_MODULE_PWM, "twl-pwm", NULL, 0,
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_PWM_TWL_LED)) {
- child = add_child(TWL_MODULE_LED, "twl-pwmled", NULL, 0,
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_USB) && pdata->usb &&
- twl_class_is_4030()) {
-
- static struct regulator_consumer_supply usb1v5 = {
- .supply = "usb1v5",
- };
- static struct regulator_consumer_supply usb1v8 = {
- .supply = "usb1v8",
- };
- static struct regulator_consumer_supply usb3v1 = {
- .supply = "usb3v1",
- };
-
- /* First add the regulators so that they can be used by transceiver */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030)) {
- /* this is a template that gets copied */
- struct regulator_init_data usb_fixed = {
- .constraints.valid_modes_mask =
- REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .constraints.valid_ops_mask =
- REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- };
-
- child = add_regulator_linked(TWL4030_REG_VUSB1V5,
- &usb_fixed, &usb1v5, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator_linked(TWL4030_REG_VUSB1V8,
- &usb_fixed, &usb1v8, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator_linked(TWL4030_REG_VUSB3V1,
- &usb_fixed, &usb3v1, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- }
-
- child = add_child(TWL_MODULE_USB, "twl4030_usb",
- pdata->usb, sizeof(*pdata->usb), true,
- /* irq0 = USB_PRES, irq1 = USB */
- irq_base + USB_PRES_INTR_OFFSET,
- irq_base + USB_INTR_OFFSET);
-
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- /* we need to connect regulators to this transceiver */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && child) {
- usb1v5.dev_name = dev_name(child);
- usb1v8.dev_name = dev_name(child);
- usb3v1.dev_name = dev_name(child);
- }
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
- child = add_child(TWL_MODULE_PM_RECEIVER, "twl4030_wdt", NULL,
- 0, false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) {
- child = add_child(TWL_MODULE_PM_MASTER, "twl4030_pwrbutton",
- NULL, 0, true, irq_base + 8 + 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio &&
- twl_class_is_4030()) {
- child = add_child(TWL4030_MODULE_AUDIO_VOICE, "twl4030-audio",
- pdata->audio, sizeof(*pdata->audio),
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- /* twl4030 regulators */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && twl_class_is_4030()) {
- child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VIO, pdata->vio,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDAC, pdata->vdac,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator((features & TWL4030_VAUX2)
- ? TWL4030_REG_VAUX2_4030
- : TWL4030_REG_VAUX2,
- pdata->vaux2, features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- /* maybe add LDOs that are omitted on cost-reduced parts */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && !(features & TPS_SUBSET)
- && twl_class_is_4030()) {
- child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VSIM, pdata->vsim,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
- !(features & (TPS_SUBSET | TWL5031))) {
- child = add_child(TWL_MODULE_MAIN_CHARGE, "twl4030_bci",
- pdata->bci, sizeof(*pdata->bci), false,
- /* irq0 = CHG_PRES, irq1 = BCI */
- irq_base + BCI_PRES_INTR_OFFSET,
- irq_base + BCI_INTR_OFFSET);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_POWER) && pdata->power) {
- child = add_child(TWL_MODULE_PM_MASTER, "twl4030_power",
- pdata->power, sizeof(*pdata->power), false,
- 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- return 0;
-}
-
/*----------------------------------------------------------------------*/
/*
@@ -987,8 +684,7 @@ static inline int unprotect_pm_master(void)
return e;
}
-static void clocks_init(struct device *dev,
- struct twl4030_clock_init_data *clock)
+static void clocks_init(struct device *dev)
{
int e = 0;
struct clk *osc;
@@ -1018,8 +714,6 @@ static void clocks_init(struct device *dev,
}
ctrl |= HIGH_PERF_SQ;
- if (clock && clock->ck32k_lowpwr_enable)
- ctrl |= CK32K_LOWPWR_EN;
e |= unprotect_pm_master();
/* effect->MADC+USB ck en */
@@ -1063,7 +757,6 @@ static struct of_dev_auxdata twl_auxdata_lookup[] = {
static int
twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *node = client->dev.of_node;
struct platform_device *pdev;
const struct regmap_config *twl_regmap_config;
@@ -1071,7 +764,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
int status;
unsigned i, num_slaves;
- if (!node && !pdata) {
+ if (!node) {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
@@ -1161,7 +854,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
twl_priv->ready = true;
/* setup clock framework */
- clocks_init(&client->dev, pdata ? pdata->clock : NULL);
+ clocks_init(&client->dev);
/* read TWL IDCODE Register */
if (twl_class_is_4030()) {
@@ -1209,14 +902,8 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
TWL4030_DCDC_GLOBAL_CFG);
}
- if (node) {
- if (pdata)
- twl_auxdata_lookup[0].platform_data = pdata->gpio;
- status = of_platform_populate(node, NULL, twl_auxdata_lookup,
- &client->dev);
- } else {
- status = add_children(pdata, irq_base, id->driver_data);
- }
+ status = of_platform_populate(node, NULL, twl_auxdata_lookup,
+ &client->dev);
fail:
if (status < 0)
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
index 8c3832a58ef6..ac1d18039568 100644
--- a/drivers/mfd/ucb1400_core.c
+++ b/drivers/mfd/ucb1400_core.c
@@ -72,11 +72,9 @@ static int ucb1400_core_probe(struct device *dev)
/* GPIO */
ucb_gpio.ac97 = ac97;
- if (pdata) {
- ucb_gpio.gpio_setup = pdata->gpio_setup;
- ucb_gpio.gpio_teardown = pdata->gpio_teardown;
+ if (pdata)
ucb_gpio.gpio_offset = pdata->gpio_offset;
- }
+
ucb->ucb1400_gpio = platform_device_alloc("ucb1400_gpio", -1);
if (!ucb->ucb1400_gpio) {
err = -ENOMEM;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 85dd6aa33df6..61a2be712bf7 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1585,7 +1585,7 @@ static int vmballoon_register_shrinker(struct vmballoon *b)
b->shrinker.count_objects = vmballoon_shrinker_count;
b->shrinker.seeks = DEFAULT_SEEKS;
- r = register_shrinker(&b->shrinker);
+ r = register_shrinker(&b->shrinker, "vmw-balloon");
if (r == 0)
b->shrinker_registered = true;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index e08e22f0a7c5..ce89611a136e 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -176,7 +176,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
@@ -1302,7 +1302,7 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr_p,
+ int recovery_mode, bool *do_rel_wr_p,
bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
@@ -1368,12 +1368,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks--;
/*
- * After a read error, we redo the request one sector
+ * After a read error, we redo the request one (native) sector
* at a time in order to accurately determine which
* sectors can be read successfully.
*/
- if (disable_multi)
- brq->data.blocks = 1;
+ if (recovery_mode)
+ brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
/*
* Some controllers have HW issues while operating
@@ -1590,7 +1590,7 @@ static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq)
{
u32 readcmd, writecmd;
@@ -1599,7 +1599,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->blkdata;
bool do_rel_wr, do_data_tag;
- mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
+ mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
brq->mrq.cmd = &brq->cmd;
@@ -1690,7 +1690,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
#define MMC_READ_SINGLE_RETRIES 2
-/* Single sector read during recovery */
+/* Single (native) sector read during recovery */
static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
@@ -1698,6 +1698,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
+ size_t bytes_per_read = queue_physical_block_size(mq->queue);
do {
u32 status;
@@ -1732,13 +1733,13 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
else
error = BLK_STS_OK;
- } while (blk_update_request(req, error, 512));
+ } while (blk_update_request(req, error, bytes_per_read));
return;
error_exit:
mrq->data->bytes_xfered = 0;
- blk_update_request(req, BLK_STS_IOERR, 512);
+ blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
/* Let it try the remaining request again */
if (mqrq->retries > MMC_MAX_RETRIES - 1)
mqrq->retries = MMC_MAX_RETRIES - 1;
@@ -1879,10 +1880,9 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
return;
}
- /* FIXME: Missing single sector read for large sector size */
- if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
- brq->data.blocks > 1) {
- /* Read one sector at a time */
+ if (rq_data_dir(req) == READ && brq->data.blocks >
+ queue_physical_block_size(mq->queue) >> 9) {
+ /* Read one (native) sector at a time */
mmc_blk_read_single(mq, req);
return;
}
@@ -2988,7 +2988,7 @@ static int mmc_blk_probe(struct mmc_card *card)
* Don't enable runtime PM for SD-combo cards here. Leave that
* decision to be taken during the SDIO init sequence instead.
*/
- if (card->type != MMC_TYPE_SD_COMBO) {
+ if (!mmc_card_sd_combo(card)) {
pm_runtime_set_active(&card->dev);
pm_runtime_enable(&card->dev);
}
@@ -3015,7 +3015,7 @@ static void mmc_blk_remove(struct mmc_card *card)
mmc_blk_part_switch(card, md->part_type);
mmc_release_host(card->host);
}
- if (card->type != MMC_TYPE_SD_COMBO)
+ if (!mmc_card_sd_combo(card))
pm_runtime_disable(&card->dev);
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 58a60afa650b..d8762fa3d5cd 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -85,7 +85,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
- if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ if (mmc_card_sdio(card) || mmc_card_sd_combo(card)) {
retval = add_uevent_var(env, "SDIO_ID=%04X:%04X",
card->cis.vendor, card->cis.device);
if (retval)
@@ -107,7 +107,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
* SDIO (non-combo) cards are not handled by mmc_block driver and do not
* have accessible CID register which used by mmc_card_name() function.
*/
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
return 0;
retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card));
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 4b70cbfc6d5d..ef53a2578824 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -943,9 +943,11 @@ int mmc_execute_tuning(struct mmc_card *card)
}
/* Only print error when we don't check for card removal */
- if (!host->detect_change)
+ if (!host->detect_change) {
pr_err("%s: tuning execution failed: %d\n",
mmc_hostname(host), err);
+ mmc_debugfs_err_stats_inc(host, MMC_ERR_TUNING);
+ }
return err;
}
@@ -2244,6 +2246,12 @@ void mmc_rescan(struct work_struct *work)
if (freqs[i] <= host->f_min)
break;
}
+
+ /*
+ * Ignore the command timeout errors observed during
+ * the card init as those are excepted.
+ */
+ host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0;
mmc_release_host(host);
out:
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 3fdbc801e64a..fe6808771bc7 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -223,6 +223,81 @@ static int mmc_clock_opt_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
"%llu\n");
+static int mmc_err_state_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+ int i;
+
+ if (!host)
+ return -EINVAL;
+
+ *val = 0;
+ for (i = 0; i < MMC_ERR_MAX; i++) {
+ if (host->err_stats[i]) {
+ *val = 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_err_state, mmc_err_state_get, NULL, "%llu\n");
+
+static int mmc_err_stats_show(struct seq_file *file, void *data)
+{
+ struct mmc_host *host = (struct mmc_host *)file->private;
+ const char *desc[MMC_ERR_MAX] = {
+ [MMC_ERR_CMD_TIMEOUT] = "Command Timeout Occurred",
+ [MMC_ERR_CMD_CRC] = "Command CRC Errors Occurred",
+ [MMC_ERR_DAT_TIMEOUT] = "Data Timeout Occurred",
+ [MMC_ERR_DAT_CRC] = "Data CRC Errors Occurred",
+ [MMC_ERR_AUTO_CMD] = "Auto-Cmd Error Occurred",
+ [MMC_ERR_ADMA] = "ADMA Error Occurred",
+ [MMC_ERR_TUNING] = "Tuning Error Occurred",
+ [MMC_ERR_CMDQ_RED] = "CMDQ RED Errors",
+ [MMC_ERR_CMDQ_GCE] = "CMDQ GCE Errors",
+ [MMC_ERR_CMDQ_ICCE] = "CMDQ ICCE Errors",
+ [MMC_ERR_REQ_TIMEOUT] = "Request Timedout",
+ [MMC_ERR_CMDQ_REQ_TIMEOUT] = "CMDQ Request Timedout",
+ [MMC_ERR_ICE_CFG] = "ICE Config Errors",
+ [MMC_ERR_CTRL_TIMEOUT] = "Controller Timedout errors",
+ [MMC_ERR_UNEXPECTED_IRQ] = "Unexpected IRQ errors",
+ };
+ int i;
+
+ for (i = 0; i < MMC_ERR_MAX; i++) {
+ if (desc[i])
+ seq_printf(file, "# %s:\t %d\n",
+ desc[i], host->err_stats[i]);
+ }
+
+ return 0;
+}
+
+static int mmc_err_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_err_stats_show, inode->i_private);
+}
+
+static ssize_t mmc_err_stats_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mmc_host *host = filp->f_mapping->host->i_private;
+
+ pr_debug("%s: Resetting MMC error statistics\n", __func__);
+ memset(host->err_stats, 0, sizeof(host->err_stats));
+
+ return cnt;
+}
+
+static const struct file_operations mmc_err_stats_fops = {
+ .open = mmc_err_stats_open,
+ .read = seq_read,
+ .write = mmc_err_stats_write,
+ .release = single_release,
+};
+
void mmc_add_host_debugfs(struct mmc_host *host)
{
struct dentry *root;
@@ -236,6 +311,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
debugfs_create_file_unsafe("clock", S_IRUSR | S_IWUSR, root, host,
&mmc_clock_fops);
+ debugfs_create_file_unsafe("err_state", 0600, root, host,
+ &mmc_err_state);
+ debugfs_create_file("err_stats", 0600, root, host,
+ &mmc_err_stats_fops);
+
#ifdef CONFIG_FAIL_MMC_REQUEST
if (fail_request)
setup_fault_attr(&fail_default_attr, fail_request);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 2ed2b4d5e5a5..0fd91f749b3a 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -599,7 +599,7 @@ static int mmc_validate_host_caps(struct mmc_host *host)
}
if (caps2 & (MMC_CAP2_HS400_ES | MMC_CAP2_HS400) &&
- !(caps & MMC_CAP_8_BIT_DATA)) {
+ !(caps & MMC_CAP_8_BIT_DATA) && !(caps2 & MMC_CAP2_NO_MMC)) {
dev_warn(dev, "drop HS400 support since no 8-bit bus\n");
host->caps2 = caps2 & ~MMC_CAP2_HS400_ES & ~MMC_CAP2_HS400;
}
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index f879dc63d936..be4393988086 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -163,8 +163,10 @@ static inline bool mmc_fixup_of_compatible_match(struct mmc_card *card,
struct device_node *np;
for_each_child_of_node(mmc_dev(card->host)->of_node, np) {
- if (of_device_is_compatible(np, compatible))
+ if (of_device_is_compatible(np, compatible)) {
+ of_node_put(np);
return true;
+ }
}
return false;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c5f1df6ce4c0..cee4c0b59f43 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -793,7 +793,7 @@ static umode_t sd_std_is_visible(struct kobject *kobj, struct attribute *attr,
attr == &dev_attr_info2.attr ||
attr == &dev_attr_info3.attr ||
attr == &dev_attr_info4.attr
- ) && card->type != MMC_TYPE_SD_COMBO)
+ ) &&!mmc_card_sd_combo(card))
return 0;
return attr->mode;
@@ -870,7 +870,7 @@ try_again:
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
+ if (!mmc_host_is_spi(host) && rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 25799accf8a0..0b682a31cd3e 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -226,6 +226,20 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
if (data & SDIO_DRIVE_SDTD)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTERRUPT_EXT, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_INTERRUPT_EXT_SAI) {
+ data |= SDIO_INTERRUPT_EXT_EAI;
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_INTERRUPT_EXT,
+ data, NULL);
+ if (ret)
+ goto out;
+
+ card->cccr.enable_async_irq = 1;
+ }
}
/* if no uhs mode ensure we check for high speed */
@@ -335,7 +349,7 @@ static int sdio_disable_4bit_bus(struct mmc_card *card)
{
int err;
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
goto out;
if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
@@ -360,7 +374,7 @@ static int sdio_enable_4bit_bus(struct mmc_card *card)
err = sdio_enable_wide(card);
if (err <= 0)
return err;
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
goto out;
if (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) {
@@ -415,7 +429,7 @@ static int sdio_enable_hs(struct mmc_card *card)
int ret;
ret = mmc_sdio_switch_hs(card, true);
- if (ret <= 0 || card->type == MMC_TYPE_SDIO)
+ if (ret <= 0 || mmc_card_sdio(card))
return ret;
ret = mmc_sd_switch_hs(card);
@@ -441,7 +455,7 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
max_dtr = card->cis.max_dtr;
}
- if (card->type == MMC_TYPE_SD_COMBO)
+ if (mmc_card_sd_combo(card))
max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
return max_dtr;
@@ -689,7 +703,7 @@ try_again:
mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
card->type = MMC_TYPE_SD_COMBO;
- if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
+ if (oldcard && (!mmc_card_sd_combo(oldcard) ||
memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
err = -ENOENT;
goto mismatch;
@@ -697,7 +711,7 @@ try_again:
} else {
card->type = MMC_TYPE_SDIO;
- if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
+ if (oldcard && !mmc_card_sdio(oldcard)) {
err = -ENOENT;
goto mismatch;
}
@@ -754,7 +768,7 @@ try_again:
/*
* Read CSD, before selecting the card
*/
- if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
+ if (!oldcard && mmc_card_sd_combo(card)) {
err = mmc_sd_get_csd(card);
if (err)
goto remove;
@@ -827,7 +841,7 @@ try_again:
mmc_fixup_device(card, sdio_fixup_methods);
- if (card->type == MMC_TYPE_SD_COMBO) {
+ if (mmc_card_sd_combo(card)) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
/* handle as SDIO-only card if memory init failed */
if (err) {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 10c563999d3d..e63608834411 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -171,6 +171,7 @@ config MMC_SDHCI_OF_ASPEED
config MMC_SDHCI_OF_ASPEED_TEST
bool "Tests for the ASPEED SDHCI driver" if !KUNIT_ALL_TESTS
depends on MMC_SDHCI_OF_ASPEED && KUNIT
+ depends on (MMC_SDHCI_OF_ASPEED=m || KUNIT=y)
default KUNIT_ALL_TESTS
help
Enable KUnit tests for the ASPEED SDHCI driver. Select this
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 2c4b2df52adb..12dca91a8ef6 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -277,6 +277,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Error populating slots\n");
octeon_mmc_set_shared_power(host, 0);
+ of_node_put(cn);
goto error;
}
i++;
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index 76013bbbcff3..202b1d6da678 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -142,8 +142,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
continue;
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
- if (ret)
+ if (ret) {
+ of_node_put(child_node);
goto error;
+ }
}
i++;
}
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index b0d30c35c390..b3d7d6d8d654 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -822,8 +822,15 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
- cmd_error || data_error)
+ cmd_error || data_error) {
+ if (status & CQHCI_IS_RED)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_RED);
+ if (status & CQHCI_IS_GCE)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_GCE);
+ if (status & CQHCI_IS_ICCE)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_ICCE);
cqhci_error_irq(mmc, status, cmd_error, data_error);
+ }
if (status & CQHCI_IS_TCC) {
/* read TCN and complete the request */
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index ca5be4445ae0..9f20ac524c8b 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -670,7 +670,9 @@ static int dw_mci_exynos_remove(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
+
+ return 0;
}
static const struct dev_pm_ops dw_mci_exynos_pmops = {
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index e9437ef8ef19..6f22fe054087 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -179,7 +179,9 @@ static int dw_mci_hi3798cv200_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->drive_clk);
clk_disable_unprepare(priv->sample_clk);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
+
+ return 0;
}
static const struct of_device_id dw_mci_hi3798cv200_match[] = {
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index f825487aa739..2a99f15f527f 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -377,7 +377,9 @@ static int dw_mci_rockchip_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
+
+ return 0;
}
static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 2f08d442e557..fc462995cf94 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1172,8 +1172,10 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
ret = device_reset_optional(&pdev->dev);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ goto free_host;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 01159eaf8694..012aa85489d8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -762,7 +762,7 @@ int mmci_dmae_setup(struct mmci_host *host)
/*
* If only an RX channel is specified, the driver will
- * attempt to use it bidirectionally, however if it is
+ * attempt to use it bidirectionally, however if it
* is specified but cannot be located, DMA will be disabled.
*/
if (dmae->rx_channel && !dmae->tx_channel)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 9da4489dc345..69d78604d1fc 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2015 MediaTek Inc.
+ * Copyright (c) 2014-2015, 2022 MediaTek Inc.
* Author: Chaotian.Jing <chaotian.jing@mediatek.com>
*/
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -440,8 +441,10 @@ struct msdc_host {
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default;
struct pinctrl_state *pins_uhs;
+ struct pinctrl_state *pins_eint;
struct delayed_work req_timeout;
int irq; /* host interrupt */
+ int eint_irq; /* interrupt from sdio device for waking up system */
struct reset_control *reset;
struct clk *src_clk; /* msdc source clock */
@@ -1521,17 +1524,46 @@ static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
{
- unsigned long flags;
struct msdc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret;
spin_lock_irqsave(&host->lock, flags);
__msdc_enable_sdio_irq(host, enb);
spin_unlock_irqrestore(&host->lock, flags);
- if (enb)
- pm_runtime_get_noresume(host->dev);
- else
- pm_runtime_put_noidle(host->dev);
+ if (mmc_card_enable_async_irq(mmc->card) && host->pins_eint) {
+ if (enb) {
+ /*
+ * In dev_pm_set_dedicated_wake_irq_reverse(), eint pin will be set to
+ * GPIO mode. We need to restore it to SDIO DAT1 mode after that.
+ * Since the current pinstate is pins_uhs, to ensure pinctrl select take
+ * affect successfully, we change the pinstate to pins_eint firstly.
+ */
+ pinctrl_select_state(host->pinctrl, host->pins_eint);
+ ret = dev_pm_set_dedicated_wake_irq_reverse(host->dev, host->eint_irq);
+
+ if (ret) {
+ dev_err(host->dev, "Failed to register SDIO wakeup irq!\n");
+ host->pins_eint = NULL;
+ pm_runtime_get_noresume(host->dev);
+ } else {
+ dev_dbg(host->dev, "SDIO eint irq: %d!\n", host->eint_irq);
+ }
+
+ pinctrl_select_state(host->pinctrl, host->pins_uhs);
+ } else {
+ dev_pm_clear_wake_irq(host->dev);
+ }
+ } else {
+ if (enb) {
+ /* Ensure host->pins_eint is NULL */
+ host->pins_eint = NULL;
+ pm_runtime_get_noresume(host->dev);
+ } else {
+ pm_runtime_put_noidle(host->dev);
+ }
+ }
}
static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
@@ -2319,7 +2351,7 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
else
val = readl(host->base + PAD_DS_TUNE);
- dev_info(host->dev, "Fianl PAD_DS_TUNE: 0x%x\n", val);
+ dev_info(host->dev, "Final PAD_DS_TUNE: 0x%x\n", val);
return 0;
@@ -2414,6 +2446,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
/* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
@@ -2635,6 +2670,20 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
+ /* Support for SDIO eint irq ? */
+ if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) {
+ host->eint_irq = platform_get_irq_byname(pdev, "sdio_wakeup");
+ if (host->eint_irq > 0) {
+ host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint");
+ if (IS_ERR(host->pins_eint)) {
+ dev_err(&pdev->dev, "Cannot find pinctrl eint!\n");
+ host->pins_eint = NULL;
+ } else {
+ device_init_wakeup(&pdev->dev, true);
+ }
+ }
+ }
+
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
@@ -2849,6 +2898,15 @@ static int __maybe_unused msdc_runtime_suspend(struct device *dev)
struct msdc_host *host = mmc_priv(mmc);
msdc_save_reg(host);
+
+ if (sdio_irq_claimed(mmc)) {
+ if (host->pins_eint) {
+ disable_irq(host->irq);
+ pinctrl_select_state(host->pinctrl, host->pins_eint);
+ }
+
+ __msdc_enable_sdio_irq(host, 0);
+ }
msdc_gate_clock(host);
return 0;
}
@@ -2864,25 +2922,47 @@ static int __maybe_unused msdc_runtime_resume(struct device *dev)
return ret;
msdc_restore_reg(host);
+
+ if (sdio_irq_claimed(mmc) && host->pins_eint) {
+ pinctrl_select_state(host->pinctrl, host->pins_uhs);
+ enable_irq(host->irq);
+ }
return 0;
}
static int __maybe_unused msdc_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc);
if (ret)
return ret;
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
}
+ /*
+ * Bump up runtime PM usage counter otherwise dev->power.needs_force_resume will
+ * not be marked as 1, pm_runtime_force_resume() will go out directly.
+ */
+ if (sdio_irq_claimed(mmc) && host->pins_eint)
+ pm_runtime_get_noresume(dev);
+
return pm_runtime_force_suspend(dev);
}
static int __maybe_unused msdc_resume(struct device *dev)
{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msdc_host *host = mmc_priv(mmc);
+
+ if (sdio_irq_claimed(mmc) && host->pins_eint)
+ pm_runtime_put_noidle(dev);
+
return pm_runtime_force_resume(dev);
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index de04b5afef2e..2cf0413407ea 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -923,7 +923,7 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
* One way to prevent this is to only allow 1-bit transfers.
*/
- if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)
+ if (is_imx31_mmc(mxcmci) && mmc_card_sdio(card))
host->caps &= ~MMC_CAP_4_BIT_DATA;
else
host->caps |= MMC_CAP_4_BIT_DATA;
@@ -1025,7 +1025,7 @@ static int mxcmci_probe(struct platform_device *pdev)
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- host->devtype = (enum mxcmci_type)of_device_get_match_data(&pdev->dev);
+ host->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
/* adjust max_segs after devtype detection */
if (!is_mpc512x_mmc(host))
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0db9490dc659..e4003f6058eb 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -648,7 +648,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_of_init(pdev, mmc);
if (ret)
- return ret;
+ goto out;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -672,7 +672,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_init_ocr(host);
if (ret < 0)
- return ret;
+ goto out;
mmc->caps = 0;
host->cmdat = 0;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 1a1e3e020a8c..c4abfee1ebae 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -43,6 +43,7 @@ struct renesas_sdhi_quirks {
bool hs400_4taps;
bool fixed_addr_mode;
bool dma_one_rx_only;
+ bool manual_tap_correction;
u32 hs400_bad_taps;
const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX];
};
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 4404ca1f98d8..6edbf5c161ab 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -49,9 +49,6 @@
#define HOST_MODE_GEN3_32BIT (HOST_MODE_GEN3_WMODE | HOST_MODE_GEN3_BUSWIDTH)
#define HOST_MODE_GEN3_64BIT 0
-#define CTL_SDIF_MODE 0xe6
-#define SDIF_MODE_HS400 BIT(0)
-
#define SDHI_VER_GEN2_SDR50 0x490c
#define SDHI_VER_RZ_A1 0x820b
/* very old datasheets said 0x490c for SDR104, too. They are wrong! */
@@ -383,8 +380,7 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF,
priv->scc_tappos_hs400);
- /* Gen3 can't do automatic tap correction with HS400, so disable it */
- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC)
+ if (priv->quirks && priv->quirks->manual_tap_correction)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
@@ -562,23 +558,25 @@ static void renesas_sdhi_scc_reset(struct tmio_mmc_host *host, struct renesas_sd
}
/* only populated for TMIO_MMC_MIN_RCAR2 */
-static void renesas_sdhi_reset(struct tmio_mmc_host *host)
+static void renesas_sdhi_reset(struct tmio_mmc_host *host, bool preserve)
{
struct renesas_sdhi *priv = host_to_priv(host);
int ret;
u16 val;
- if (priv->rstc) {
- reset_control_reset(priv->rstc);
- /* Unknown why but without polling reset status, it will hang */
- read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
- false, priv->rstc);
- /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
- sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
- priv->needs_adjust_hs400 = false;
- renesas_sdhi_set_clock(host, host->clk_cache);
- } else if (priv->scc_ctl) {
- renesas_sdhi_scc_reset(host, priv);
+ if (!preserve) {
+ if (priv->rstc) {
+ reset_control_reset(priv->rstc);
+ /* Unknown why but without polling reset status, it will hang */
+ read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
+ false, priv->rstc);
+ /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
+ priv->needs_adjust_hs400 = false;
+ renesas_sdhi_set_clock(host, host->clk_cache);
+ } else if (priv->scc_ctl) {
+ renesas_sdhi_scc_reset(host, priv);
+ }
}
if (sd_ctrl_read16(host, CTL_VERSION) >= SDHI_VER_GEN3_SD) {
@@ -719,7 +717,7 @@ static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
/* Change TAP position according to correction status */
- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC &&
+ if (priv->quirks && priv->quirks->manual_tap_correction &&
host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0;
/*
@@ -938,6 +936,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(priv->clk_cd))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cd), "cannot get cd clock");
+ priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return PTR_ERR(priv->rstc);
+
priv->pinctrl = devm_pinctrl_get(&pdev->dev);
if (!IS_ERR(priv->pinctrl)) {
priv->pins_default = pinctrl_lookup_state(priv->pinctrl,
@@ -1030,10 +1032,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ret)
goto efree;
- priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
- if (IS_ERR(priv->rstc))
- return PTR_ERR(priv->rstc);
-
ver = sd_ctrl_read16(host, CTL_VERSION);
/* GEN2_SDR104 is first known SDHI to use 32bit block count */
if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 3084b15ae2cb..42937596c4c4 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -170,6 +170,7 @@ static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400_one_rx = {
static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
@@ -182,25 +183,30 @@ static const struct renesas_sdhi_quirks sdhi_quirks_fixed_addr = {
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
.hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a7796_es13_calib_table,
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a77965_calib_table,
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
.hs400_calib_table = r8a77990_calib_table,
+ .manual_tap_correction = true,
};
/*
@@ -268,6 +274,7 @@ static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
{ .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
@@ -321,7 +328,7 @@ renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host)
}
/*
- * renesas_sdhi_internal_dmac_map() will be called with two difference
+ * renesas_sdhi_internal_dmac_map() will be called with two different
* sg pointers in two mmc_data by .pre_req(), but tmio host can have a single
* sg_ptr only. So, renesas_sdhi_internal_dmac_{un}map() should use a sg
* pointer in a mmc_data instead of host->sg_ptr.
@@ -355,7 +362,7 @@ renesas_sdhi_internal_dmac_map(struct tmio_mmc_host *host,
data->host_cookie = cookie;
- /* This DMAC cannot handle if buffer is not 128-bytes alignment */
+ /* This DMAC needs buffers to be 128-byte aligned */
if (!IS_ALIGNED(sg_dma_address(data->sg), 128)) {
renesas_sdhi_internal_dmac_unmap(host, data, cookie);
return false;
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 8eb57de48e0c..aff36a933ebe 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -31,6 +31,8 @@
struct sdhci_brcmstb_priv {
void __iomem *cfg_regs;
unsigned int flags;
+ struct clk *base_clk;
+ u32 base_freq_hz;
};
struct brcmstb_match_priv {
@@ -250,9 +252,11 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
const struct of_device_id *match;
struct sdhci_brcmstb_priv *priv;
+ u32 actual_clock_mhz;
struct sdhci_host *host;
struct resource *iomem;
struct clk *clk;
+ struct clk *base_clk = NULL;
int res;
match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
@@ -330,6 +334,35 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ /* Change the base clock frequency if the DT property exists */
+ if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ &priv->base_freq_hz) != 0)
+ goto add_host;
+
+ base_clk = devm_clk_get_optional(&pdev->dev, "sdio_freq");
+ if (IS_ERR(base_clk)) {
+ dev_warn(&pdev->dev, "Clock for \"sdio_freq\" not found\n");
+ goto add_host;
+ }
+
+ res = clk_prepare_enable(base_clk);
+ if (res)
+ goto err;
+
+ /* set improved clock rate */
+ clk_set_rate(base_clk, priv->base_freq_hz);
+ actual_clock_mhz = clk_get_rate(base_clk) / 1000000;
+
+ host->caps &= ~SDHCI_CLOCK_V3_BASE_MASK;
+ host->caps |= (actual_clock_mhz << SDHCI_CLOCK_BASE_SHIFT);
+ /* Disable presets because they are now incorrect */
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
+ dev_dbg(&pdev->dev, "Base Clock Frequency changed to %dMHz\n",
+ actual_clock_mhz);
+ priv->base_clk = base_clk;
+
+add_host:
res = sdhci_brcmstb_add_host(host, priv);
if (res)
goto err;
@@ -340,6 +373,7 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
err:
sdhci_pltfm_free(pdev);
err_clk:
+ clk_disable_unprepare(base_clk);
clk_disable_unprepare(clk);
return res;
}
@@ -351,11 +385,51 @@ static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_brcmstb_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
+ clk_disable_unprepare(priv->base_clk);
+ return sdhci_pltfm_suspend(dev);
+}
+
+static int sdhci_brcmstb_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = sdhci_pltfm_resume(dev);
+ if (!ret && priv->base_freq_hz) {
+ ret = clk_prepare_enable(priv->base_clk);
+ /*
+ * Note: using clk_get_rate() below as clk_get_rate()
+ * honors CLK_GET_RATE_NOCACHE attribute, but clk_set_rate()
+ * may do implicit get_rate() calls that do not honor
+ * CLK_GET_RATE_NOCACHE.
+ */
+ if (!ret &&
+ (clk_get_rate(priv->base_clk) != priv->base_freq_hz))
+ ret = clk_set_rate(priv->base_clk, priv->base_freq_hz);
+ }
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops sdhci_brcmstb_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_brcmstb_suspend, sdhci_brcmstb_resume)
+};
+
static struct platform_driver sdhci_brcmstb_driver = {
.driver = {
.name = "sdhci-brcmstb",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_pltfm_pmops,
+ .pm = &sdhci_brcmstb_pmops,
.of_match_table = of_match_ptr(sdhci_brcm_of_match),
},
.probe = sdhci_brcmstb_probe,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e395411fb6fd..dc2991422a87 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2435,33 +2435,12 @@ static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
};
static const struct of_device_id sdhci_msm_dt_match[] = {
- /* Following two entries are deprecated (kept only for backward compatibility) */
- {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
- /* Add entries for sdcc versions less than 5.0 here */
- {.compatible = "qcom,apq8084-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8226-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8916-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8953-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8974-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8992-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8994-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8996-sdhci", .data = &sdhci_msm_mci_var},
/*
- * Add entries for sdcc version 5.0 here. For SDCC version 5.0.0,
- * MCI registers are removed from SDCC interface and some registers
- * are moved to HC.
+ * Do not add new variants to the driver which are compatible with
+ * generic ones, unless they need customization.
*/
- {.compatible = "qcom,qcs404-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sdx55-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sdx65-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sdm630-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm6125-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm6350-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm8150-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm8250-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sc7280-sdhci", .data = &sdhci_msm_v5_var},
- /* Add entries where soc specific handling is required, here */
+ {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
+ {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
{.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
{},
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 757801dfc308..3997cad1f793 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -1733,7 +1733,6 @@ err_pltfm_free:
static int sdhci_arasan_remove(struct platform_device *pdev)
{
- int ret;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
@@ -1747,11 +1746,11 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
sdhci_arasan_unregister_sdclk(&pdev->dev);
- ret = sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_unregister(pdev);
clk_disable_unprepare(clk_ahb);
- return ret;
+ return 0;
}
static struct platform_driver sdhci_arasan_driver = {
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 10fb4cb2c731..cd0134580a90 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -100,8 +100,13 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
- if (timing == MMC_TIMING_MMC_DDR52)
- sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+ u8 mc1r;
+
+ if (timing == MMC_TIMING_MMC_DDR52) {
+ mc1r = sdhci_readb(host, SDMMC_MC1R);
+ mc1r |= SDMMC_MC1R_DDR;
+ sdhci_writeb(host, mc1r, SDMMC_MC1R);
+ }
sdhci_set_uhs_signaling(host, timing);
}
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index bac874ab0b33..a7343d4bc50e 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
#include "sdhci-pltfm.h"
@@ -30,6 +31,7 @@
/* Offset inside the vendor area 1 */
#define DWCMSHC_HOST_CTRL3 0x8
#define DWCMSHC_EMMC_CONTROL 0x2c
+#define DWCMSHC_CARD_IS_EMMC BIT(0)
#define DWCMSHC_ENHANCED_STROBE BIT(8)
#define DWCMSHC_EMMC_ATCTRL 0x40
@@ -38,7 +40,7 @@
#define DWCMSHC_EMMC_DLL_RXCLK 0x804
#define DWCMSHC_EMMC_DLL_TXCLK 0x808
#define DWCMSHC_EMMC_DLL_STRBIN 0x80c
-#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
+#define DECMSHC_EMMC_DLL_CMDOUT 0x810
#define DWCMSHC_EMMC_DLL_STATUS0 0x840
#define DWCMSHC_EMMC_DLL_START BIT(0)
#define DWCMSHC_EMMC_DLL_LOCKED BIT(8)
@@ -47,22 +49,39 @@
#define DWCMSHC_EMMC_DLL_START_POINT 16
#define DWCMSHC_EMMC_DLL_INC 8
#define DWCMSHC_EMMC_DLL_DLYENA BIT(27)
-#define DLL_TXCLK_TAPNUM_DEFAULT 0x8
-#define DLL_STRBIN_TAPNUM_DEFAULT 0x8
+#define DLL_TXCLK_TAPNUM_DEFAULT 0x10
+#define DLL_TXCLK_TAPNUM_90_DEGREES 0xA
#define DLL_TXCLK_TAPNUM_FROM_SW BIT(24)
+#define DLL_STRBIN_TAPNUM_DEFAULT 0x8
+#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
+#define DLL_STRBIN_DELAY_NUM_SEL BIT(26)
+#define DLL_STRBIN_DELAY_NUM_OFFSET 16
+#define DLL_STRBIN_DELAY_NUM_DEFAULT 0x16
#define DLL_RXCLK_NO_INVERTER 1
#define DLL_RXCLK_INVERTER 0
+#define DLL_CMDOUT_TAPNUM_90_DEGREES 0x8
+#define DLL_CMDOUT_TAPNUM_FROM_SW BIT(24)
+#define DLL_CMDOUT_SRC_CLK_NEG BIT(28)
+#define DLL_CMDOUT_EN_SRC_CLK_NEG BIT(29)
+
#define DLL_LOCK_WO_TMOUT(x) \
((((x) & DWCMSHC_EMMC_DLL_LOCKED) == DWCMSHC_EMMC_DLL_LOCKED) && \
(((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0))
-#define RK3568_MAX_CLKS 3
+#define RK35xx_MAX_CLKS 3
#define BOUNDARY_OK(addr, len) \
((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
-struct rk3568_priv {
+enum dwcmshc_rk_type {
+ DWCMSHC_RK3568,
+ DWCMSHC_RK3588,
+};
+
+struct rk35xx_priv {
/* Rockchip specified optional clocks */
- struct clk_bulk_data rockchip_clks[RK3568_MAX_CLKS];
+ struct clk_bulk_data rockchip_clks[RK35xx_MAX_CLKS];
+ struct reset_control *reset;
+ enum dwcmshc_rk_type devtype;
u8 txclk_tapnum;
};
@@ -131,7 +150,9 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
- u16 ctrl_2;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u16 ctrl, ctrl_2;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
@@ -149,8 +170,15 @@ static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
else if ((timing == MMC_TIMING_UHS_DDR50) ||
(timing == MMC_TIMING_MMC_DDR52))
ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
- else if (timing == MMC_TIMING_MMC_HS400)
+ else if (timing == MMC_TIMING_MMC_HS400) {
+ /* set CARD_IS_EMMC bit to enable Data Strobe for HS400 */
+ ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+ ctrl |= DWCMSHC_CARD_IS_EMMC;
+ sdhci_writew(host, ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+
ctrl_2 |= DWCMSHC_CTRL_HS400;
+ }
+
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
@@ -176,24 +204,18 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *priv = dwc_priv->priv;
+ struct rk35xx_priv *priv = dwc_priv->priv;
u8 txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
u32 extra, reg;
int err;
host->mmc->actual_clock = 0;
- /*
- * DO NOT TOUCH THIS SETTING. RX clk inverter unit is enabled
- * by default, but it shouldn't be enabled. We should anyway
- * disable it before issuing any cmds.
- */
- extra = DWCMSHC_EMMC_DLL_DLYENA |
- DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
- sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
-
- if (clock == 0)
+ if (clock == 0) {
+ /* Disable interface clock at initial state. */
+ sdhci_set_clock(host, clock);
return;
+ }
/* Rockchip platform only support 375KHz for identify mode */
if (clock <= 400000)
@@ -211,9 +233,21 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
extra &= ~BIT(0);
sdhci_writel(host, extra, reg);
- if (clock <= 400000) {
- /* Disable DLL to reset sample clock */
+ if (clock <= 52000000) {
+ /* Disable DLL and reset both of sample and drive clock */
sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_CTRL);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_RXCLK);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
+ sdhci_writel(host, 0, DECMSHC_EMMC_DLL_CMDOUT);
+ /*
+ * Before switching to hs400es mode, the driver will enable
+ * enhanced strobe first. PHY needs to configure the parameters
+ * of enhanced strobe first.
+ */
+ extra = DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_STRBIN_DELAY_NUM_SEL |
+ DLL_STRBIN_DELAY_NUM_DEFAULT << DLL_STRBIN_DELAY_NUM_OFFSET;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
return;
}
@@ -222,6 +256,15 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
udelay(1);
sdhci_writel(host, 0x0, DWCMSHC_EMMC_DLL_CTRL);
+ /*
+ * We shouldn't set DLL_RXCLK_NO_INVERTER for identify mode but
+ * we must set it in higher speed mode.
+ */
+ extra = DWCMSHC_EMMC_DLL_DLYENA;
+ if (priv->devtype == DWCMSHC_RK3568)
+ extra |= DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
+
/* Init DLL settings */
extra = 0x5 << DWCMSHC_EMMC_DLL_START_POINT |
0x2 << DWCMSHC_EMMC_DLL_INC |
@@ -244,8 +287,20 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
host->mmc->ios.timing == MMC_TIMING_MMC_HS400)
txclk_tapnum = priv->txclk_tapnum;
+ if ((priv->devtype == DWCMSHC_RK3588) && host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
+ txclk_tapnum = DLL_TXCLK_TAPNUM_90_DEGREES;
+
+ extra = DLL_CMDOUT_SRC_CLK_NEG |
+ DLL_CMDOUT_EN_SRC_CLK_NEG |
+ DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_CMDOUT_TAPNUM_90_DEGREES |
+ DLL_CMDOUT_TAPNUM_FROM_SW;
+ sdhci_writel(host, extra, DECMSHC_EMMC_DLL_CMDOUT);
+ }
+
extra = DWCMSHC_EMMC_DLL_DLYENA |
DLL_TXCLK_TAPNUM_FROM_SW |
+ DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL |
txclk_tapnum;
sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_TXCLK);
@@ -255,6 +310,21 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
}
+static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
+ struct rk35xx_priv *priv = dwc_priv->priv;
+
+ if (mask & SDHCI_RESET_ALL && priv->reset) {
+ reset_control_assert(priv->reset);
+ udelay(1);
+ reset_control_deassert(priv->reset);
+ }
+
+ sdhci_reset(host, mask);
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -264,12 +334,12 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
.adma_write_desc = dwcmshc_adma_write_desc,
};
-static const struct sdhci_ops sdhci_dwcmshc_rk3568_ops = {
+static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = {
.set_clock = dwcmshc_rk3568_set_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = dwcmshc_set_uhs_signaling,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
- .reset = sdhci_reset,
+ .reset = rk35xx_sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
};
@@ -279,30 +349,46 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
-static const struct sdhci_pltfm_data sdhci_dwcmshc_rk3568_pdata = {
- .ops = &sdhci_dwcmshc_rk3568_ops,
+#ifdef CONFIG_ACPI
+static const struct sdhci_pltfm_data sdhci_dwcmshc_bf3_pdata = {
+ .ops = &sdhci_dwcmshc_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_ACMD23_BROKEN,
+};
+#endif
+
+static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
+ .ops = &sdhci_dwcmshc_rk35xx_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
-static int dwcmshc_rk3568_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
{
int err;
- struct rk3568_priv *priv = dwc_priv->priv;
+ struct rk35xx_priv *priv = dwc_priv->priv;
+
+ priv->reset = devm_reset_control_array_get_optional_exclusive(mmc_dev(host->mmc));
+ if (IS_ERR(priv->reset)) {
+ err = PTR_ERR(priv->reset);
+ dev_err(mmc_dev(host->mmc), "failed to get reset control %d\n", err);
+ return err;
+ }
priv->rockchip_clks[0].id = "axi";
priv->rockchip_clks[1].id = "block";
priv->rockchip_clks[2].id = "timer";
- err = devm_clk_bulk_get_optional(mmc_dev(host->mmc), RK3568_MAX_CLKS,
+ err = devm_clk_bulk_get_optional(mmc_dev(host->mmc), RK35xx_MAX_CLKS,
priv->rockchip_clks);
if (err) {
dev_err(mmc_dev(host->mmc), "failed to get clocks %d\n", err);
return err;
}
- err = clk_bulk_prepare_enable(RK3568_MAX_CLKS, priv->rockchip_clks);
+ err = clk_bulk_prepare_enable(RK35xx_MAX_CLKS, priv->rockchip_clks);
if (err) {
dev_err(mmc_dev(host->mmc), "failed to enable clocks %d\n", err);
return err;
@@ -321,10 +407,28 @@ static int dwcmshc_rk3568_init(struct sdhci_host *host, struct dwcmshc_priv *dwc
return 0;
}
+static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+{
+ /*
+ * Don't support highspeed bus mode with low clk speed as we
+ * cannot use DLL for this condition.
+ */
+ if (host->mmc->f_max <= 52000000) {
+ dev_info(mmc_dev(host->mmc), "Disabling HS200/HS400, frequency too low (%d)\n",
+ host->mmc->f_max);
+ host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400);
+ host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR);
+ }
+}
+
static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
{
+ .compatible = "rockchip,rk3588-dwcmshc",
+ .data = &sdhci_dwcmshc_rk35xx_pdata,
+ },
+ {
.compatible = "rockchip,rk3568-dwcmshc",
- .data = &sdhci_dwcmshc_rk3568_pdata,
+ .data = &sdhci_dwcmshc_rk35xx_pdata,
},
{
.compatible = "snps,dwcmshc-sdhci",
@@ -336,7 +440,10 @@ MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id sdhci_dwcmshc_acpi_ids[] = {
- { .id = "MLNXBF30" },
+ {
+ .id = "MLNXBF30",
+ .driver_data = (kernel_ulong_t)&sdhci_dwcmshc_bf3_pdata,
+ },
{}
};
#endif
@@ -347,12 +454,12 @@ static int dwcmshc_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
struct dwcmshc_priv *priv;
- struct rk3568_priv *rk_priv = NULL;
+ struct rk35xx_priv *rk_priv = NULL;
const struct sdhci_pltfm_data *pltfm_data;
int err;
u32 extra;
- pltfm_data = of_device_get_match_data(&pdev->dev);
+ pltfm_data = device_get_match_data(&pdev->dev);
if (!pltfm_data) {
dev_err(&pdev->dev, "Error: No device match data found\n");
return -ENODEV;
@@ -402,33 +509,47 @@ static int dwcmshc_probe(struct platform_device *pdev)
host->mmc_host_ops.request = dwcmshc_request;
host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe;
- if (pltfm_data == &sdhci_dwcmshc_rk3568_pdata) {
- rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk3568_priv), GFP_KERNEL);
+ if (pltfm_data == &sdhci_dwcmshc_rk35xx_pdata) {
+ rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk35xx_priv), GFP_KERNEL);
if (!rk_priv) {
err = -ENOMEM;
goto err_clk;
}
+ if (of_device_is_compatible(pdev->dev.of_node, "rockchip,rk3588-dwcmshc"))
+ rk_priv->devtype = DWCMSHC_RK3588;
+ else
+ rk_priv->devtype = DWCMSHC_RK3568;
+
priv->priv = rk_priv;
- err = dwcmshc_rk3568_init(host, priv);
+ err = dwcmshc_rk35xx_init(host, priv);
if (err)
goto err_clk;
}
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- err = sdhci_add_host(host);
+ err = sdhci_setup_host(host);
if (err)
goto err_clk;
+ if (rk_priv)
+ dwcmshc_rk35xx_postinit(host, priv);
+
+ err = __sdhci_add_host(host);
+ if (err)
+ goto err_setup_host;
+
return 0;
+err_setup_host:
+ sdhci_cleanup_host(host);
err_clk:
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
+ clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
free_pltfm:
sdhci_pltfm_free(pdev);
@@ -440,14 +561,14 @@ static int dwcmshc_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
+ struct rk35xx_priv *rk_priv = priv->priv;
sdhci_remove_host(host, 0);
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
+ clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
sdhci_pltfm_free(pdev);
@@ -460,7 +581,7 @@ static int dwcmshc_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
+ struct rk35xx_priv *rk_priv = priv->priv;
int ret;
ret = sdhci_suspend_host(host);
@@ -472,7 +593,7 @@ static int dwcmshc_suspend(struct device *dev)
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
+ clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
return ret;
@@ -483,7 +604,7 @@ static int dwcmshc_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
+ struct rk35xx_priv *rk_priv = priv->priv;
int ret;
ret = clk_prepare_enable(pltfm_host->clk);
@@ -497,7 +618,7 @@ static int dwcmshc_resume(struct device *dev)
}
if (rk_priv) {
- ret = clk_bulk_prepare_enable(RK3568_MAX_CLKS,
+ ret = clk_bulk_prepare_enable(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
if (ret)
return ret;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d9dc41143bb3..e0266638381d 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -904,6 +904,7 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
scfg_node = of_find_matching_node(NULL, scfg_device_ids);
if (scfg_node)
scfg_base = of_iomap(scfg_node, 0);
+ of_node_put(scfg_node);
if (scfg_base) {
sdhciovselcr = SDHCIOVSELCR_TGLEN |
SDHCIOVSELCR_VSELVAL;
@@ -1418,7 +1419,7 @@ static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
static int sdhci_esdhc_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
- struct device_node *np;
+ struct device_node *np, *tp;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_esdhc *esdhc;
int ret;
@@ -1463,7 +1464,9 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
if (esdhc->vendor_ver > VENDOR_V_22)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
- if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
+ tp = of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc");
+ if (tp) {
+ of_node_put(tp);
host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index f13c08db3da5..4d509f656188 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -95,6 +95,9 @@
#define PCIE_GLI_9763E_SCR 0x8E0
#define GLI_9763E_SCR_AXI_REQ BIT(9)
+#define PCIE_GLI_9763E_CFG 0x8A0
+#define GLI_9763E_CFG_LPSN_DIS BIT(12)
+
#define PCIE_GLI_9763E_CFG2 0x8A4
#define GLI_9763E_CFG2_L1DLY GENMASK(28, 19)
#define GLI_9763E_CFG2_L1DLY_MID 0x54
@@ -963,12 +966,40 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
}
#ifdef CONFIG_PM
+static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
+{
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
+
+ if (enable)
+ value &= ~GLI_9763E_CFG_LPSN_DIS;
+ else
+ value |= GLI_9763E_CFG_LPSN_DIS;
+
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+}
+
static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
struct sdhci_host *host = slot->host;
u16 clock;
+ /* Enable LPM negotiation to allow entering L1 state */
+ gl9763e_set_low_power_negotiation(slot, true);
+
clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clock &= ~(SDHCI_CLOCK_PLL_EN | SDHCI_CLOCK_CARD_EN);
sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
@@ -1002,6 +1033,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
clock |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+ /* Disable LPM negotiation to avoid entering L1 state. */
+ gl9763e_set_low_power_negotiation(slot, false);
+
return 0;
}
#endif
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index d41582c21aa3..6415916fbd91 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -440,15 +440,14 @@ static int sdhci_st_remove(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
struct reset_control *rstc = pdata->rstc;
- int ret;
- ret = sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_unregister(pdev);
clk_disable_unprepare(pdata->icnclk);
reset_control_assert(rstc);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 22152029e14c..7689ffec5ad1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -224,6 +224,7 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
if (timedout) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -1716,6 +1717,7 @@ static bool sdhci_send_command_retry(struct sdhci_host *host,
if (!timeout--) {
pr_err("%s: Controller never released inhibit bit(s).\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
cmd->error = -EIO;
return false;
@@ -1965,6 +1967,7 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -1987,6 +1990,7 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
if (timedout) {
pr_err("%s: PLL clock never stabilised.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -3161,6 +3165,7 @@ static void sdhci_timeout_timer(struct timer_list *t)
if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, REQ_TIMEOUT);
sdhci_dumpregs(host);
host->cmd->error = -ETIMEDOUT;
@@ -3183,6 +3188,7 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
(host->cmd && sdhci_data_line_cmd(host->cmd))) {
pr_err("%s: Timeout waiting for hardware interrupt.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, REQ_TIMEOUT);
sdhci_dumpregs(host);
if (host->data) {
@@ -3234,17 +3240,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
return;
pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
return;
}
if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
- if (intmask & SDHCI_INT_TIMEOUT)
+ if (intmask & SDHCI_INT_TIMEOUT) {
host->cmd->error = -ETIMEDOUT;
- else
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
+ } else {
host->cmd->error = -EILSEQ;
-
+ if (!mmc_op_tuning(host->cmd->opcode))
+ sdhci_err_stats_inc(host, CMD_CRC);
+ }
/* Treat data command CRC error the same as data CRC error */
if (host->cmd->data &&
(intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
@@ -3266,6 +3276,8 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
-ETIMEDOUT :
-EILSEQ;
+ sdhci_err_stats_inc(host, AUTO_CMD);
+
if (sdhci_auto_cmd23(host, mrq)) {
mrq->sbc->error = err;
__sdhci_finish_mrq(host, mrq);
@@ -3342,6 +3354,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
host->data_cmd = NULL;
data_cmd->error = -ETIMEDOUT;
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
__sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
@@ -3370,23 +3383,30 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
return;
}
- if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ if (intmask & SDHCI_INT_DATA_TIMEOUT) {
host->data->error = -ETIMEDOUT;
- else if (intmask & SDHCI_INT_DATA_END_BIT)
+ sdhci_err_stats_inc(host, DAT_TIMEOUT);
+ } else if (intmask & SDHCI_INT_DATA_END_BIT) {
host->data->error = -EILSEQ;
- else if ((intmask & SDHCI_INT_DATA_CRC) &&
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if ((intmask & SDHCI_INT_DATA_CRC) &&
SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
- != MMC_BUS_TEST_R)
+ != MMC_BUS_TEST_R) {
host->data->error = -EILSEQ;
- else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
intmask);
sdhci_adma_show_error(host);
+ sdhci_err_stats_inc(host, ADMA);
host->data->error = -EIO;
if (host->ops->adma_workaround)
host->ops->adma_workaround(host, intmask);
@@ -3584,6 +3604,7 @@ out:
if (unexpected) {
pr_err("%s: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), unexpected);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
}
@@ -3905,20 +3926,27 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (!host->cqe_on)
return false;
- if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
+ if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
*cmd_error = -EILSEQ;
- else if (intmask & SDHCI_INT_TIMEOUT)
+ if (!mmc_op_tuning(host->cmd->opcode))
+ sdhci_err_stats_inc(host, CMD_CRC);
+ } else if (intmask & SDHCI_INT_TIMEOUT) {
*cmd_error = -ETIMEDOUT;
- else
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
+ } else
*cmd_error = 0;
- if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
+ if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
*data_error = -EILSEQ;
- else if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ if (!mmc_op_tuning(host->cmd->opcode))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
*data_error = -ETIMEDOUT;
- else if (intmask & SDHCI_INT_ADMA_ERROR)
+ sdhci_err_stats_inc(host, DAT_TIMEOUT);
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
*data_error = -EIO;
- else
+ sdhci_err_stats_inc(host, ADMA);
+ } else
*data_error = 0;
/* Clear selected interrupts. */
@@ -3934,6 +3962,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
sdhci_writel(host, intmask, SDHCI_INT_STATUS);
pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index d7929d725730..95a08f09df30 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -356,6 +356,9 @@ struct sdhci_adma2_64_desc {
*/
#define MMC_CMD_TRANSFER_TIME (10 * NSEC_PER_MSEC) /* max 10 ms */
+#define sdhci_err_stats_inc(host, err_name) \
+ mmc_debugfs_err_stats_inc((host)->mmc, MMC_ERR_##err_name)
+
enum sdhci_cookie {
COOKIE_UNMAPPED,
COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index b55a29c53d9c..53a2ad9a24b8 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -75,7 +75,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
tmio_mmc_clk_start(host);
}
-static void tmio_mmc_reset(struct tmio_mmc_host *host)
+static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
{
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
usleep_range(10000, 11000);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index e754bb3f5c32..501613c74406 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -42,6 +42,7 @@
#define CTL_DMA_ENABLE 0xd8
#define CTL_RESET_SD 0xe0
#define CTL_VERSION 0xe2
+#define CTL_SDIF_MODE 0xe6 /* only known on R-Car 2+ */
/* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */
#define TMIO_STOP_STP BIT(0)
@@ -98,6 +99,9 @@
/* Definitions for values the CTL_DMA_ENABLE register can take */
#define DMA_ENABLE_DMASDRW BIT(1)
+/* Definitions for values the CTL_SDIF_MODE register can take */
+#define SDIF_MODE_HS400 BIT(0) /* only known on R-Car 2+ */
+
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
@@ -181,7 +185,7 @@ struct tmio_mmc_host {
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
- void (*reset)(struct tmio_mmc_host *host);
+ void (*reset)(struct tmio_mmc_host *host, bool preserve);
bool (*check_retune)(struct tmio_mmc_host *host, struct mmc_request *mrq);
void (*fixup_request)(struct tmio_mmc_host *host, struct mmc_request *mrq);
unsigned int (*get_timeout_cycles)(struct tmio_mmc_host *host);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index a5850d83908b..437048bb8027 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -179,8 +179,17 @@ static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
}
-static void tmio_mmc_reset(struct tmio_mmc_host *host)
+static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
{
+ u16 card_opt, clk_ctrl, sdif_mode;
+
+ if (preserve) {
+ card_opt = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT);
+ clk_ctrl = sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL);
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ sdif_mode = sd_ctrl_read16(host, CTL_SDIF_MODE);
+ }
+
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
usleep_range(10000, 11000);
@@ -190,7 +199,7 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
tmio_mmc_abort_dma(host);
if (host->reset)
- host->reset(host);
+ host->reset(host, preserve);
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
host->sdcard_irq_mask = host->sdcard_irq_mask_all;
@@ -206,6 +215,13 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
}
+ if (preserve) {
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, card_opt);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk_ctrl);
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ sd_ctrl_write16(host, CTL_SDIF_MODE, sdif_mode);
+ }
+
if (host->mmc->card)
mmc_retune_needed(host->mmc);
}
@@ -248,7 +264,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
spin_unlock_irqrestore(&host->lock, flags);
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, true);
/* Ready for new calls */
host->mrq = NULL;
@@ -961,7 +977,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
tmio_mmc_power_off(host);
/* For R-Car Gen2+, we need to reset SDHI specific SCC */
if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, false);
host->set_clock(host, 0);
break;
@@ -1189,7 +1205,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
_host->sdcard_irq_mask_all = TMIO_MASK_ALL;
_host->set_clock(_host, 0);
- tmio_mmc_reset(_host);
+ tmio_mmc_reset(_host, false);
spin_lock_init(&_host->lock);
mutex_init(&_host->ios_lock);
@@ -1285,7 +1301,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_clk_enable(host);
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, false);
if (host->clk_cache)
host->set_clock(host, host->clk_cache);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 134e27328597..25bad4318305 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -112,6 +112,13 @@ static const struct of_device_id dataflash_dt_ids[] = {
MODULE_DEVICE_TABLE(of, dataflash_dt_ids);
#endif
+static const struct spi_device_id dataflash_spi_ids[] = {
+ { .name = "at45", },
+ { .name = "dataflash", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, dataflash_spi_ids);
+
/* ......................................................................... */
/*
@@ -936,6 +943,7 @@ static struct spi_driver dataflash_driver = {
.probe = dataflash_probe,
.remove = dataflash_remove,
+ .id_table = dataflash_spi_ids,
/* FIXME: investigate suspend and resume... */
};
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 6950a8764815..36e060386e59 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -270,7 +270,9 @@ static int powernv_flash_release(struct platform_device *pdev)
struct powernv_flash *data = dev_get_drvdata(&(pdev->dev));
/* All resources should be freed automatically */
- return mtd_device_unregister(&(data->mtd));
+ WARN_ON(mtd_device_unregister(&data->mtd));
+
+ return 0;
}
static const struct of_device_id powernv_flash_match[] = {
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 24073518587f..f58742486d3d 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -1045,13 +1045,9 @@ static int spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
struct spear_snor_flash *flash;
- int ret, i;
+ int i;
dev = platform_get_drvdata(pdev);
- if (!dev) {
- dev_err(&pdev->dev, "dev is null\n");
- return -ENODEV;
- }
/* clean up for all nor flash */
for (i = 0; i < dev->num_flashes; i++) {
@@ -1060,9 +1056,7 @@ static int spear_smi_remove(struct platform_device *pdev)
continue;
/* clean up mtd stuff */
- ret = mtd_device_unregister(&flash->mtd);
- if (ret)
- dev_err(&pdev->dev, "error removing mtd\n");
+ WARN_ON(mtd_device_unregister(&flash->mtd));
}
clk_disable_unprepare(dev->clk);
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index d3377b10fc0f..54861d889c30 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2084,15 +2084,12 @@ static int stfsm_probe(struct platform_device *pdev)
* Configure READ/WRITE/ERASE sequences according to platform and
* device flags.
*/
- if (info->config) {
+ if (info->config)
ret = info->config(fsm);
- if (ret)
- goto err_clk_unprepare;
- } else {
+ else
ret = stfsm_prepare_rwe_seqs_default(fsm);
- if (ret)
- goto err_clk_unprepare;
- }
+ if (ret)
+ goto err_clk_unprepare;
fsm->mtd.name = info->name;
fsm->mtd.dev.parent = &pdev->dev;
@@ -2115,10 +2112,12 @@ static int stfsm_probe(struct platform_device *pdev)
(long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
- return mtd_device_register(&fsm->mtd, NULL, 0);
-
+ ret = mtd_device_register(&fsm->mtd, NULL, 0);
+ if (ret) {
err_clk_unprepare:
- clk_disable_unprepare(fsm->clk);
+ clk_disable_unprepare(fsm->clk);
+ }
+
return ret;
}
@@ -2126,9 +2125,11 @@ static int stfsm_remove(struct platform_device *pdev)
{
struct stfsm *fsm = platform_get_drvdata(pdev);
+ WARN_ON(mtd_device_unregister(&fsm->mtd));
+
clk_disable_unprepare(fsm->clk);
- return mtd_device_unregister(&fsm->mtd);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index a3439b791eeb..a6161ce340d4 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -233,16 +233,16 @@ static int am654_hbmc_remove(struct platform_device *pdev)
{
struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
- int ret;
- ret = hyperbus_unregister_device(&priv->hbdev);
+ hyperbus_unregister_device(&priv->hbdev);
+
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
if (dev_priv->rx_chan)
dma_release_channel(dev_priv->rx_chan);
- return ret;
+ return 0;
}
static const struct of_device_id am654_hbmc_dt_ids[] = {
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
index 2f9fc4e17d53..4d8047d43e48 100644
--- a/drivers/mtd/hyperbus/hyperbus-core.c
+++ b/drivers/mtd/hyperbus/hyperbus-core.c
@@ -126,16 +126,12 @@ int hyperbus_register_device(struct hyperbus_device *hbdev)
}
EXPORT_SYMBOL_GPL(hyperbus_register_device);
-int hyperbus_unregister_device(struct hyperbus_device *hbdev)
+void hyperbus_unregister_device(struct hyperbus_device *hbdev)
{
- int ret = 0;
-
if (hbdev && hbdev->mtd) {
- ret = mtd_device_unregister(hbdev->mtd);
+ WARN_ON(mtd_device_unregister(hbdev->mtd));
map_destroy(hbdev->mtd);
}
-
- return ret;
}
EXPORT_SYMBOL_GPL(hyperbus_unregister_device);
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
index 6e08ec1d4f09..d00d30243403 100644
--- a/drivers/mtd/hyperbus/rpc-if.c
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -134,7 +134,7 @@ static int rpcif_hb_probe(struct platform_device *pdev)
error = rpcif_hw_init(&hyperbus->rpc, true);
if (error)
- return error;
+ goto out_disable_rpm;
hyperbus->hbdev.map.size = hyperbus->rpc.size;
hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
@@ -145,19 +145,24 @@ static int rpcif_hb_probe(struct platform_device *pdev)
hyperbus->hbdev.np = of_get_next_child(pdev->dev.parent->of_node, NULL);
error = hyperbus_register_device(&hyperbus->hbdev);
if (error)
- rpcif_disable_rpm(&hyperbus->rpc);
+ goto out_disable_rpm;
+
+ return 0;
+out_disable_rpm:
+ rpcif_disable_rpm(&hyperbus->rpc);
return error;
}
static int rpcif_hb_remove(struct platform_device *pdev)
{
struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
- int error = hyperbus_unregister_device(&hyperbus->hbdev);
+
+ hyperbus_unregister_device(&hyperbus->hbdev);
rpcif_disable_rpm(&hyperbus->rpc);
- return error;
+ return 0;
}
static struct platform_driver rpcif_platform_driver = {
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index 72f5c7b30079..367e2d906de0 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -478,7 +478,9 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
*/
static int lpddr2_nvm_remove(struct platform_device *pdev)
{
- return mtd_device_unregister(dev_get_drvdata(&pdev->dev));
+ WARN_ON(mtd_device_unregister(dev_get_drvdata(&pdev->dev)));
+
+ return 0;
}
/* Initialize platform_driver data structure for lpddr2_nvm */
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 4f63b8430c71..85eca6a192e6 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -66,18 +66,12 @@ static int physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
- int i, err = 0;
+ int i;
info = platform_get_drvdata(dev);
- if (!info) {
- err = -EINVAL;
- goto out;
- }
if (info->cmtd) {
- err = mtd_device_unregister(info->cmtd);
- if (err)
- goto out;
+ WARN_ON(mtd_device_unregister(info->cmtd));
if (info->cmtd != info->mtds[0])
mtd_concat_destroy(info->cmtd);
@@ -92,10 +86,9 @@ static int physmap_flash_remove(struct platform_device *dev)
if (physmap_data && physmap_data->exit)
physmap_data->exit(dev);
-out:
pm_runtime_put(&dev->dev);
pm_runtime_disable(&dev->dev);
- return err;
+ return 0;
}
static void physmap_set_vpp(struct map_info *map, int state)
diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c
index ad7cd9cfaee0..a1b8b7b25f88 100644
--- a/drivers/mtd/maps/physmap-versatile.c
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -93,6 +93,7 @@ static int ap_flash_init(struct platform_device *pdev)
return -ENODEV;
}
ebi_base = of_iomap(ebi, 0);
+ of_node_put(ebi);
if (!ebi_base)
return -ENODEV;
@@ -207,6 +208,7 @@ int of_flash_probe_versatile(struct platform_device *pdev,
versatile_flashprot = (enum versatile_flashprot)devid->data;
rmap = syscon_node_to_regmap(sysnp);
+ of_node_put(sysnp);
if (IS_ERR(rmap))
return PTR_ERR(rmap);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index d0f9c4b0285c..05860288a7af 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -615,21 +615,24 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
if (!usr_oob)
req.ooblen = 0;
+ req.len &= 0xffffffff;
+ req.ooblen &= 0xffffffff;
+
if (req.start + req.len > mtd->size)
return -EINVAL;
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
if (datbuf_len > 0) {
- datbuf = kmalloc(datbuf_len, GFP_KERNEL);
+ datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
if (!datbuf)
return -ENOMEM;
}
oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
if (oobbuf_len > 0) {
- oobbuf = kmalloc(oobbuf_len, GFP_KERNEL);
+ oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
if (!oobbuf) {
- kfree(datbuf);
+ kvfree(datbuf);
return -ENOMEM;
}
}
@@ -679,8 +682,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
usr_oob += ops.oobretlen;
}
- kfree(datbuf);
- kfree(oobbuf);
+ kvfree(datbuf);
+ kvfree(oobbuf);
return ret;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9eb0680db312..a9b8be9f40dc 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -546,6 +546,68 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
return 0;
}
+static void mtd_check_of_node(struct mtd_info *mtd)
+{
+ struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
+ const char *pname, *prefix = "partition-";
+ int plen, mtd_name_len, offset, prefix_len;
+ struct mtd_info *parent;
+ bool found = false;
+
+ /* Check if MTD already has a device node */
+ if (dev_of_node(&mtd->dev))
+ return;
+
+ /* Check if a partitions node exist */
+ if (!mtd_is_partition(mtd))
+ return;
+ parent = mtd->parent;
+ parent_dn = dev_of_node(&parent->dev);
+ if (!parent_dn)
+ return;
+
+ partitions = of_get_child_by_name(parent_dn, "partitions");
+ if (!partitions)
+ goto exit_parent;
+
+ prefix_len = strlen(prefix);
+ mtd_name_len = strlen(mtd->name);
+
+ /* Search if a partition is defined with the same name */
+ for_each_child_of_node(partitions, mtd_dn) {
+ offset = 0;
+
+ /* Skip partition with no/wrong prefix */
+ if (!of_node_name_prefix(mtd_dn, "partition-"))
+ continue;
+
+ /* Label have priority. Check that first */
+ if (of_property_read_string(mtd_dn, "label", &pname)) {
+ of_property_read_string(mtd_dn, "name", &pname);
+ offset = prefix_len;
+ }
+
+ plen = strlen(pname) - offset;
+ if (plen == mtd_name_len &&
+ !strncmp(mtd->name, pname + offset, plen)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ goto exit_partitions;
+
+ /* Set of_node only for nvmem */
+ if (of_device_is_compatible(mtd_dn, "nvmem-cells"))
+ mtd_set_of_node(mtd, mtd_dn);
+
+exit_partitions:
+ of_node_put(partitions);
+exit_parent:
+ of_node_put(parent_dn);
+}
+
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
@@ -658,6 +720,7 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
dev_set_drvdata(&mtd->dev, mtd);
+ mtd_check_of_node(mtd);
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
if (error)
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 53bd10738418..296fb16c8dc3 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -347,17 +347,17 @@ static int anfc_select_target(struct nand_chip *chip, int target)
/* Update clock frequency */
if (nfc->cur_clk != anand->clk) {
- clk_disable_unprepare(nfc->controller_clk);
- ret = clk_set_rate(nfc->controller_clk, anand->clk);
+ clk_disable_unprepare(nfc->bus_clk);
+ ret = clk_set_rate(nfc->bus_clk, anand->clk);
if (ret) {
dev_err(nfc->dev, "Failed to change clock rate\n");
return ret;
}
- ret = clk_prepare_enable(nfc->controller_clk);
+ ret = clk_prepare_enable(nfc->bus_clk);
if (ret) {
dev_err(nfc->dev,
- "Failed to re-enable the controller clock\n");
+ "Failed to re-enable the bus clock\n");
return ret;
}
@@ -1043,7 +1043,13 @@ static int anfc_setup_interface(struct nand_chip *chip, int target,
DQS_BUFF_SEL_OUT(dqs_mode);
}
- anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ if (nand_interface_is_sdr(conf)) {
+ anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ } else {
+ /* ONFI timings are defined in picoseconds */
+ anand->clk = div_u64((u64)NSEC_PER_SEC * 1000,
+ conf->timings.nvddr.tCK_min);
+ }
/*
* Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 6ef14442c71a..c9ac3baf68c0 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -2629,7 +2629,9 @@ static int atmel_nand_controller_remove(struct platform_device *pdev)
{
struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
- return nc->caps->ops->remove(nc);
+ WARN_ON(nc->caps->ops->remove(nc));
+
+ return 0;
}
static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index 9dbf031716a6..af119e376352 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -679,8 +679,10 @@ static int cafe_nand_probe(struct pci_dev *pdev,
pci_set_master(pdev);
cafe = kzalloc(sizeof(*cafe), GFP_KERNEL);
- if (!cafe)
- return -ENOMEM;
+ if (!cafe) {
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
mtd = nand_to_mtd(&cafe->nand);
mtd->dev.parent = &pdev->dev;
@@ -801,6 +803,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
pci_iounmap(pdev, cafe->mmio);
out_free_mtd:
kfree(cafe);
+ out_disable_device:
+ pci_disable_device(pdev);
out:
return err;
}
@@ -822,6 +826,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
pci_iounmap(pdev, cafe->mmio);
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(cafe);
+ pci_disable_device(pdev);
}
static const struct pci_device_id cafe_nand_tbl[] = {
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index ac3be92872d0..829b76b303aa 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -1293,26 +1293,20 @@ meson_nfc_nand_chip_init(struct device *dev,
return 0;
}
-static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
+static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
{
struct meson_nfc_nand_chip *meson_chip;
struct mtd_info *mtd;
- int ret;
while (!list_empty(&nfc->chips)) {
meson_chip = list_first_entry(&nfc->chips,
struct meson_nfc_nand_chip, node);
mtd = nand_to_mtd(&meson_chip->nand);
- ret = mtd_device_unregister(mtd);
- if (ret)
- return ret;
+ WARN_ON(mtd_device_unregister(mtd));
- meson_nfc_free_buffer(&meson_chip->nand);
nand_cleanup(&meson_chip->nand);
list_del(&meson_chip->node);
}
-
- return 0;
}
static int meson_nfc_nand_chips_init(struct device *dev,
@@ -1445,16 +1439,11 @@ err_clk:
static int meson_nfc_remove(struct platform_device *pdev)
{
struct meson_nfc *nfc = platform_get_drvdata(pdev);
- int ret;
- ret = meson_nfc_nand_chip_cleanup(nfc);
- if (ret)
- return ret;
+ meson_nfc_nand_chip_cleanup(nfc);
meson_nfc_disable_clk(nfc);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 58c32a11792e..4a9f2b6c772d 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -2278,16 +2278,14 @@ static int omap_nand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct omap_nand_info *info = mtd_to_omap(mtd);
- int ret;
rawnand_sw_bch_cleanup(nand_chip);
if (info->dma)
dma_release_channel(info->dma);
- ret = mtd_device_unregister(mtd);
- WARN_ON(ret);
+ WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(nand_chip);
- return ret;
+ return 0;
}
/* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 048b255faa76..8f80019a9f01 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -80,8 +80,10 @@
#define DISABLE_STATUS_AFTER_WRITE 4
#define CW_PER_PAGE 6
#define UD_SIZE_BYTES 9
+#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
#define ECC_PARITY_SIZE_BYTES_RS 19
#define SPARE_SIZE_BYTES 23
+#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
#define NUM_ADDR_CYCLES 27
#define STATUS_BFR_READ 30
#define SET_RD_MODE_AFTER_STATUS 31
@@ -102,6 +104,7 @@
#define ECC_MODE 4
#define ECC_PARITY_SIZE_BYTES_BCH 8
#define ECC_NUM_DATA_BYTES 16
+#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
#define ECC_FORCE_CLK_OPEN 30
/* NAND_DEV_CMD1 bits */
@@ -238,6 +241,9 @@ nandc_set_reg(chip, reg, \
* @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
+ * @txn_done - completion for NAND transfer.
* @bam_ce_pos - the index in bam_ce which is available for next sgl
* @bam_ce_start - the index in bam_ce which marks the start position ce
* for current sgl. It will be used for size calculation
@@ -250,14 +256,14 @@ nandc_set_reg(chip, reg, \
* @rx_sgl_start - start index in data sgl for rx.
* @wait_second_completion - wait for second DMA desc completion before making
* the NAND transfer completion.
- * @txn_done - completion for NAND transfer.
- * @last_data_desc - last DMA desc in data channel (tx/rx).
- * @last_cmd_desc - last DMA desc in command channel.
*/
struct bam_transaction {
struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+ struct completion txn_done;
u32 bam_ce_pos;
u32 bam_ce_start;
u32 cmd_sgl_pos;
@@ -267,25 +273,23 @@ struct bam_transaction {
u32 rx_sgl_pos;
u32 rx_sgl_start;
bool wait_second_completion;
- struct completion txn_done;
- struct dma_async_tx_descriptor *last_data_desc;
- struct dma_async_tx_descriptor *last_cmd_desc;
};
/*
* This data type corresponds to the nand dma descriptor
+ * @dma_desc - low level DMA engine descriptor
* @list - list for desc_info
- * @dir - DMA transfer direction
+ *
* @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
* ADM
* @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
* @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
- * @dma_desc - low level DMA engine descriptor
+ * @dir - DMA transfer direction
*/
struct desc_info {
+ struct dma_async_tx_descriptor *dma_desc;
struct list_head node;
- enum dma_data_direction dir;
union {
struct scatterlist adm_sgl;
struct {
@@ -293,7 +297,7 @@ struct desc_info {
int sgl_cnt;
};
};
- struct dma_async_tx_descriptor *dma_desc;
+ enum dma_data_direction dir;
};
/*
@@ -337,52 +341,64 @@ struct nandc_regs {
/*
* NAND controller data struct
*
- * @controller: base controller structure
- * @host_list: list containing all the chips attached to the
- * controller
* @dev: parent device
+ *
* @base: MMIO base
- * @base_phys: physical base address of controller registers
- * @base_dma: dma base address of controller registers
+ *
* @core_clk: controller clock
* @aon_clk: another controller clock
*
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ *
+ * @props: properties of current NAND controller,
+ * initialized via DT match data
+ *
+ * @controller: base controller structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ *
* @chan: dma channel
* @cmd_crci: ADM DMA CRCI for command flow control
* @data_crci: ADM DMA CRCI for data flow control
+ *
* @desc_list: DMA descriptor list (list of desc_infos)
*
* @data_buffer: our local DMA buffer for page read/writes,
* used when we can't use the buffer provided
* by upper layers directly
- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
- * functions
* @reg_read_buf: local buffer for reading back registers via DMA
+ *
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
* @reg_read_dma: contains dma address for register read buffer
- * @reg_read_pos: marker for data read in reg_read_buf
*
- * @regs: a contiguous chunk of memory for DMA register
- * writes. contains the register values to be
- * written to controller
- * @cmd1/vld: some fixed controller register values
- * @props: properties of current NAND controller,
- * initialized via DT match data
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
* @max_cwperpage: maximum QPIC codewords required. calculated
* from all connected NAND devices pagesize
+ *
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @cmd1/vld: some fixed controller register values
*/
struct qcom_nand_controller {
- struct nand_controller controller;
- struct list_head host_list;
-
struct device *dev;
void __iomem *base;
- phys_addr_t base_phys;
- dma_addr_t base_dma;
struct clk *core_clk;
struct clk *aon_clk;
+ struct nandc_regs *regs;
+ struct bam_transaction *bam_txn;
+
+ const struct qcom_nandc_props *props;
+
+ struct nand_controller controller;
+ struct list_head host_list;
+
union {
/* will be used only by QPIC for BAM DMA */
struct {
@@ -400,64 +416,89 @@ struct qcom_nand_controller {
};
struct list_head desc_list;
- struct bam_transaction *bam_txn;
u8 *data_buffer;
+ __le32 *reg_read_buf;
+
+ phys_addr_t base_phys;
+ dma_addr_t base_dma;
+ dma_addr_t reg_read_dma;
+
int buf_size;
int buf_count;
int buf_start;
unsigned int max_cwperpage;
- __le32 *reg_read_buf;
- dma_addr_t reg_read_dma;
int reg_read_pos;
- struct nandc_regs *regs;
-
u32 cmd1, vld;
- const struct qcom_nandc_props *props;
+};
+
+/*
+ * NAND special boot partitions
+ *
+ * @page_offset: offset of the partition where spare data is not protected
+ * by ECC (value in pages)
+ * @page_offset: size of the partition where spare data is not protected
+ * by ECC (value in pages)
+ */
+struct qcom_nand_boot_partition {
+ u32 page_offset;
+ u32 page_size;
};
/*
* NAND chip structure
*
+ * @boot_partitions: array of boot partitions where offset and size of the
+ * boot partitions are stored
+ *
* @chip: base NAND chip structure
* @node: list node to add itself to host_list in
* qcom_nand_controller
*
+ * @nr_boot_partitions: count of the boot partitions where spare data is not
+ * protected by ECC
+ *
* @cs: chip select value for this chip
* @cw_size: the number of bytes in a single step/codeword
* of a page, consisting of all data, ecc, spare
* and reserved bytes
* @cw_data: the number of bytes within a codeword protected
* by ECC
- * @use_ecc: request the controller to use ECC for the
- * upcoming read/write
- * @bch_enabled: flag to tell whether BCH ECC mode is used
* @ecc_bytes_hw: ECC bytes used by controller hardware for this
* chip
- * @status: value to be returned if NAND_CMD_STATUS command
- * is executed
+ *
* @last_command: keeps track of last command on this chip. used
* for reading correct status
*
* @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
* ecc/non-ecc mode for the current nand flash
* device
+ *
+ * @status: value to be returned if NAND_CMD_STATUS command
+ * is executed
+ * @codeword_fixup: keep track of the current layout used by
+ * the driver for read/write operation.
+ * @use_ecc: request the controller to use ECC for the
+ * upcoming read/write
+ * @bch_enabled: flag to tell whether BCH ECC mode is used
*/
struct qcom_nand_host {
+ struct qcom_nand_boot_partition *boot_partitions;
+
struct nand_chip chip;
struct list_head node;
+ int nr_boot_partitions;
+
int cs;
int cw_size;
int cw_data;
- bool use_ecc;
- bool bch_enabled;
int ecc_bytes_hw;
int spare_bytes;
int bbm_size;
- u8 status;
+
int last_command;
u32 cfg0, cfg1;
@@ -466,23 +507,30 @@ struct qcom_nand_host {
u32 ecc_bch_cfg;
u32 clrflashstatus;
u32 clrreadstatus;
+
+ u8 status;
+ bool codeword_fixup;
+ bool use_ecc;
+ bool bch_enabled;
};
/*
* This data type corresponds to the NAND controller properties which varies
* among different NAND controllers.
* @ecc_modes - ecc mode for NAND
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
* @is_bam - whether NAND controller is using BAM
* @is_qpic - whether NAND CTRL is part of qpic IP
* @qpic_v2 - flag to indicate QPIC IP version 2
- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
*/
struct qcom_nandc_props {
u32 ecc_modes;
+ u32 dev_cmd_reg_start;
bool is_bam;
bool is_qpic;
bool qpic_v2;
- u32 dev_cmd_reg_start;
+ bool use_codeword_fixup;
};
/* Frees the BAM transaction memory */
@@ -1701,7 +1749,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
oob_size1 = host->bbm_size;
- if (qcom_nandc_is_last_cw(ecc, cw)) {
+ if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
data_size2 = ecc->size - data_size1 -
((ecc->steps - 1) * 4);
oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
@@ -1782,7 +1830,7 @@ check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
}
for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
- if (qcom_nandc_is_last_cw(ecc, cw)) {
+ if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) * 4);
oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
} else {
@@ -1940,7 +1988,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
for (i = 0; i < ecc->steps; i++) {
int data_size, oob_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
@@ -2037,6 +2085,69 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
return ret;
}
+static bool qcom_nandc_is_boot_partition(struct qcom_nand_host *host, int page)
+{
+ struct qcom_nand_boot_partition *boot_partition;
+ u32 start, end;
+ int i;
+
+ /*
+ * Since the frequent access will be to the non-boot partitions like rootfs,
+ * optimize the page check by:
+ *
+ * 1. Checking if the page lies after the last boot partition.
+ * 2. Checking from the boot partition end.
+ */
+
+ /* First check the last boot partition */
+ boot_partition = &host->boot_partitions[host->nr_boot_partitions - 1];
+ start = boot_partition->page_offset;
+ end = start + boot_partition->page_size;
+
+ /* Page is after the last boot partition end. This is NOT a boot partition */
+ if (page > end)
+ return false;
+
+ /* Actually check if it's a boot partition */
+ if (page < end && page >= start)
+ return true;
+
+ /* Check the other boot partitions starting from the second-last partition */
+ for (i = host->nr_boot_partitions - 2; i >= 0; i--) {
+ boot_partition = &host->boot_partitions[i];
+ start = boot_partition->page_offset;
+ end = start + boot_partition->page_size;
+
+ if (page < end && page >= start)
+ return true;
+ }
+
+ return false;
+}
+
+static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
+{
+ bool codeword_fixup = qcom_nandc_is_boot_partition(host, page);
+
+ /* Skip conf write if we are already in the correct mode */
+ if (codeword_fixup == host->codeword_fixup)
+ return;
+
+ host->codeword_fixup = codeword_fixup;
+
+ host->cw_data = codeword_fixup ? 512 : 516;
+ host->spare_bytes = host->cw_size - host->ecc_bytes_hw -
+ host->bbm_size - host->cw_data;
+
+ host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
+ host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
+ host->cw_data << UD_SIZE_BYTES;
+
+ host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
+ host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
+ host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
+}
+
/* implements ecc->read_page() */
static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
@@ -2045,6 +2156,9 @@ static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
u8 *data_buf, *oob_buf = NULL;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -2064,6 +2178,9 @@ static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int cw, ret;
u8 *data_buf = buf, *oob_buf = chip->oob_poi;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
for (cw = 0; cw < ecc->steps; cw++) {
ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
page, cw);
@@ -2084,6 +2201,9 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2104,6 +2224,9 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
u8 *data_buf, *oob_buf;
int i, ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
@@ -2119,7 +2242,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
for (i = 0; i < ecc->steps; i++) {
int data_size, oob_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
@@ -2176,6 +2299,9 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
u8 *data_buf, *oob_buf;
int i, ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2194,7 +2320,7 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
oob_size1 = host->bbm_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size2 = ecc->size - data_size1 -
((ecc->steps - 1) << 2);
oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
@@ -2254,6 +2380,9 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
int data_size, oob_size;
int ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
host->use_ecc = true;
clear_bam_transaction(nandc);
@@ -2915,6 +3044,74 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
+static int qcom_nand_host_parse_boot_partitions(struct qcom_nand_controller *nandc,
+ struct qcom_nand_host *host,
+ struct device_node *dn)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_boot_partition *boot_partition;
+ struct device *dev = nandc->dev;
+ int partitions_count, i, j, ret;
+
+ if (!of_find_property(dn, "qcom,boot-partitions", NULL))
+ return 0;
+
+ partitions_count = of_property_count_u32_elems(dn, "qcom,boot-partitions");
+ if (partitions_count <= 0) {
+ dev_err(dev, "Error parsing boot partition\n");
+ return partitions_count ? partitions_count : -EINVAL;
+ }
+
+ host->nr_boot_partitions = partitions_count / 2;
+ host->boot_partitions = devm_kcalloc(dev, host->nr_boot_partitions,
+ sizeof(*host->boot_partitions), GFP_KERNEL);
+ if (!host->boot_partitions) {
+ host->nr_boot_partitions = 0;
+ return -ENOMEM;
+ }
+
+ for (i = 0, j = 0; i < host->nr_boot_partitions; i++, j += 2) {
+ boot_partition = &host->boot_partitions[i];
+
+ ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j,
+ &boot_partition->page_offset);
+ if (ret) {
+ dev_err(dev, "Error parsing boot partition offset at index %d\n", i);
+ host->nr_boot_partitions = 0;
+ return ret;
+ }
+
+ if (boot_partition->page_offset % mtd->writesize) {
+ dev_err(dev, "Boot partition offset not multiple of writesize at index %i\n",
+ i);
+ host->nr_boot_partitions = 0;
+ return -EINVAL;
+ }
+ /* Convert offset to nand pages */
+ boot_partition->page_offset /= mtd->writesize;
+
+ ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j + 1,
+ &boot_partition->page_size);
+ if (ret) {
+ dev_err(dev, "Error parsing boot partition size at index %d\n", i);
+ host->nr_boot_partitions = 0;
+ return ret;
+ }
+
+ if (boot_partition->page_size % mtd->writesize) {
+ dev_err(dev, "Boot partition size not multiple of writesize at index %i\n",
+ i);
+ host->nr_boot_partitions = 0;
+ return -EINVAL;
+ }
+ /* Convert size to nand pages */
+ boot_partition->page_size /= mtd->writesize;
+ }
+
+ return 0;
+}
+
static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
struct qcom_nand_host *host,
struct device_node *dn)
@@ -2972,6 +3169,14 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
if (ret)
nand_cleanup(chip);
+ if (nandc->props->use_codeword_fixup) {
+ ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+ }
+
return ret;
}
@@ -3137,6 +3342,7 @@ static int qcom_nandc_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq806x_nandc_props = {
.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
.is_bam = false,
+ .use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
};
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index ba24cb36d0b9..b2b42dd1a2de 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -52,7 +52,7 @@ static const struct mtd_ooblayout_ops oob_sm_ops = {
.free = oob_sm_ooblayout_free,
};
-/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* NOTE: This layout is not compatabable with SmartMedia, */
/* because the 256 byte devices have page depenent oob layout */
/* However it does preserve the bad block markers */
/* If you use smftl, it will bypass this and work correctly */
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index b36e5260ae27..e12f9f580a15 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -1223,11 +1223,8 @@ static int tegra_nand_remove(struct platform_device *pdev)
struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
struct nand_chip *chip = ctrl->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
- int ret;
- ret = mtd_device_unregister(mtd);
- if (ret)
- return ret;
+ WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(chip);
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 80dabe6ff0f3..b520fe634041 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o
+spinand-objs := core.o ato.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
new file mode 100644
index 000000000000..82b377c06812
--- /dev/null
+++ b/drivers/mtd/nand/spi/ato.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Aidan MacDonald
+ *
+ * Author: Aidan MacDonald <aidanmacdonald.0x0@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+
+#define SPINAND_MFR_ATO 0x9b
+
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+
+static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+ return 0;
+}
+
+static int ato25d1ga_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = (16 * section);
+ region->length = 8;
+ } else {
+ /* first byte of section 0 is reserved for the BBM */
+ region->offset = 1;
+ region->length = 7;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ato25d1ga_ooblayout = {
+ .ecc = ato25d1ga_ooblayout_ecc,
+ .free = ato25d1ga_ooblayout_free,
+};
+
+
+static const struct spinand_info ato_spinand_table[] = {
+ SPINAND_INFO("ATO25D1GA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x12),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&ato25d1ga_ooblayout, NULL)),
+};
+
+static const struct spinand_manufacturer_ops ato_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer ato_spinand_manufacturer = {
+ .id = SPINAND_MFR_ATO,
+ .name = "ATO",
+ .chips = ato_spinand_table,
+ .nchips = ARRAY_SIZE(ato_spinand_table),
+ .ops = &ato_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index d5b685d1605e..9d73910a7ae8 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -927,6 +927,7 @@ static const struct nand_ops spinand_ops = {
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &ato_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index 23763d16e4f9..b43df73927a0 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -186,3 +186,12 @@ config MTD_QCOMSMEM_PARTS
help
This provides support for parsing partitions from Shared Memory (SMEM)
for NAND and SPI flash on Qualcomm platforms.
+
+config MTD_SERCOMM_PARTS
+ tristate "Sercomm partition table parser"
+ depends on MTD && RALINK
+ help
+ This provides partitions table parser for devices with Sercomm
+ partition map. This partition table contains real partition
+ offsets, which may differ from device to device depending on the
+ number and location of bad blocks on NAND.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 2e98aa048278..2fcf0ab9e7da 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -10,6 +10,7 @@ ofpart-$(CONFIG_MTD_OF_PARTS_LINKSYS_NS)+= ofpart_linksys_ns.o
obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
+obj-$(CONFIG_MTD_SERCOMM_PARTS) += scpart.o
obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_QCOMSMEM_PARTS) += qcomsmempart.o
diff --git a/drivers/mtd/parsers/ofpart_bcm4908.c b/drivers/mtd/parsers/ofpart_bcm4908.c
index 0eddef4c198e..bb072a0940e4 100644
--- a/drivers/mtd/parsers/ofpart_bcm4908.c
+++ b/drivers/mtd/parsers/ofpart_bcm4908.c
@@ -35,12 +35,15 @@ static long long bcm4908_partitions_fw_offset(void)
err = kstrtoul(s + len + 1, 0, &offset);
if (err) {
pr_err("failed to parse %s\n", s + len + 1);
+ of_node_put(root);
return err;
}
+ of_node_put(root);
return offset << 10;
}
+ of_node_put(root);
return -ENOENT;
}
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
index feb44a573d44..a16b42a88581 100644
--- a/drivers/mtd/parsers/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -58,6 +58,7 @@ static void parse_redboot_of(struct mtd_info *master)
return;
ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
+ of_node_put(npart);
if (ret)
return;
diff --git a/drivers/mtd/parsers/scpart.c b/drivers/mtd/parsers/scpart.c
new file mode 100644
index 000000000000..02601bb33de4
--- /dev/null
+++ b/drivers/mtd/parsers/scpart.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * drivers/mtd/scpart.c: Sercomm Partition Parser
+ *
+ * Copyright (C) 2018 NOGUCHI Hiroshi
+ * Copyright (C) 2022 Mikhail Zhilkin
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/module.h>
+
+#define MOD_NAME "scpart"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) MOD_NAME ": " fmt
+
+#define ID_ALREADY_FOUND 0xffffffffUL
+
+#define MAP_OFFS_IN_BLK 0x800
+#define MAP_MIRROR_NUM 2
+
+static const char sc_part_magic[] = {
+ 'S', 'C', 'F', 'L', 'M', 'A', 'P', 'O', 'K', '\0',
+};
+#define PART_MAGIC_LEN sizeof(sc_part_magic)
+
+/* assumes that all fields are set by CPU native endian */
+struct sc_part_desc {
+ uint32_t part_id;
+ uint32_t part_offs;
+ uint32_t part_bytes;
+};
+
+static uint32_t scpart_desc_is_valid(struct sc_part_desc *pdesc)
+{
+ return ((pdesc->part_id != 0xffffffffUL) &&
+ (pdesc->part_offs != 0xffffffffUL) &&
+ (pdesc->part_bytes != 0xffffffffUL));
+}
+
+static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs,
+ struct sc_part_desc **ppdesc)
+{
+ int cnt = 0;
+ int res = 0;
+ int res2;
+ loff_t offs;
+ size_t retlen;
+ struct sc_part_desc *pdesc = NULL;
+ struct sc_part_desc *tmpdesc;
+ uint8_t *buf;
+
+ buf = kzalloc(master->erasesize, GFP_KERNEL);
+ if (!buf) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ res2 = mtd_read(master, partmap_offs, master->erasesize, &retlen, buf);
+ if (res2 || retlen != master->erasesize) {
+ res = -EIO;
+ goto free;
+ }
+
+ for (offs = MAP_OFFS_IN_BLK;
+ offs < master->erasesize - sizeof(*tmpdesc);
+ offs += sizeof(*tmpdesc)) {
+ tmpdesc = (struct sc_part_desc *)&buf[offs];
+ if (!scpart_desc_is_valid(tmpdesc))
+ break;
+ cnt++;
+ }
+
+ if (cnt > 0) {
+ int bytes = cnt * sizeof(*pdesc);
+
+ pdesc = kcalloc(cnt, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc) {
+ res = -ENOMEM;
+ goto free;
+ }
+ memcpy(pdesc, &(buf[MAP_OFFS_IN_BLK]), bytes);
+
+ *ppdesc = pdesc;
+ res = cnt;
+ }
+
+free:
+ kfree(buf);
+
+out:
+ return res;
+}
+
+static int scpart_find_partmap(struct mtd_info *master,
+ struct sc_part_desc **ppdesc)
+{
+ int magic_found = 0;
+ int res = 0;
+ int res2;
+ loff_t offs = 0;
+ size_t retlen;
+ uint8_t rdbuf[PART_MAGIC_LEN];
+
+ while ((magic_found < MAP_MIRROR_NUM) &&
+ (offs < master->size) &&
+ !mtd_block_isbad(master, offs)) {
+ res2 = mtd_read(master, offs, PART_MAGIC_LEN, &retlen, rdbuf);
+ if (res2 || retlen != PART_MAGIC_LEN) {
+ res = -EIO;
+ goto out;
+ }
+ if (!memcmp(rdbuf, sc_part_magic, PART_MAGIC_LEN)) {
+ pr_debug("Signature found at 0x%llx\n", offs);
+ magic_found++;
+ res = scpart_scan_partmap(master, offs, ppdesc);
+ if (res > 0)
+ goto out;
+ }
+ offs += master->erasesize;
+ }
+
+out:
+ if (res > 0)
+ pr_info("Valid 'SC PART MAP' (%d partitions) found at 0x%llx\n", res, offs);
+ else
+ pr_info("No valid 'SC PART MAP' was found\n");
+
+ return res;
+}
+
+static int scpart_parse(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ const char *partname;
+ int n;
+ int nr_scparts;
+ int nr_parts = 0;
+ int res = 0;
+ struct sc_part_desc *scpart_map = NULL;
+ struct mtd_partition *parts = NULL;
+ struct device_node *mtd_node;
+ struct device_node *ofpart_node;
+ struct device_node *pp;
+
+ mtd_node = mtd_get_of_node(master);
+ if (!mtd_node) {
+ res = -ENOENT;
+ goto out;
+ }
+
+ ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+ if (!ofpart_node) {
+ pr_info("%s: 'partitions' subnode not found on %pOF.\n",
+ master->name, mtd_node);
+ res = -ENOENT;
+ goto out;
+ }
+
+ nr_scparts = scpart_find_partmap(master, &scpart_map);
+ if (nr_scparts <= 0) {
+ pr_info("No any partitions was found in 'SC PART MAP'.\n");
+ res = -ENOENT;
+ goto free;
+ }
+
+ parts = kcalloc(of_get_child_count(ofpart_node), sizeof(*parts),
+ GFP_KERNEL);
+ if (!parts) {
+ res = -ENOMEM;
+ goto free;
+ }
+
+ for_each_child_of_node(ofpart_node, pp) {
+ u32 scpart_id;
+
+ if (of_property_read_u32(pp, "sercomm,scpart-id", &scpart_id))
+ continue;
+
+ for (n = 0 ; n < nr_scparts ; n++)
+ if ((scpart_map[n].part_id != ID_ALREADY_FOUND) &&
+ (scpart_id == scpart_map[n].part_id))
+ break;
+ if (n >= nr_scparts)
+ /* not match */
+ continue;
+
+ /* add the partition found in OF into MTD partition array */
+ parts[nr_parts].offset = scpart_map[n].part_offs;
+ parts[nr_parts].size = scpart_map[n].part_bytes;
+ parts[nr_parts].of_node = pp;
+
+ if (!of_property_read_string(pp, "label", &partname))
+ parts[nr_parts].name = partname;
+ if (of_property_read_bool(pp, "read-only"))
+ parts[nr_parts].mask_flags |= MTD_WRITEABLE;
+ if (of_property_read_bool(pp, "lock"))
+ parts[nr_parts].mask_flags |= MTD_POWERUP_LOCK;
+
+ /* mark as 'done' */
+ scpart_map[n].part_id = ID_ALREADY_FOUND;
+
+ nr_parts++;
+ }
+
+ if (nr_parts > 0) {
+ *pparts = parts;
+ res = nr_parts;
+ } else
+ pr_info("No partition in OF matches partition ID with 'SC PART MAP'.\n");
+
+ of_node_put(pp);
+
+free:
+ of_node_put(ofpart_node);
+ kfree(scpart_map);
+ if (res <= 0)
+ kfree(parts);
+
+out:
+ return res;
+}
+
+static const struct of_device_id scpart_parser_of_match_table[] = {
+ { .compatible = "sercomm,sc-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, scpart_parser_of_match_table);
+
+static struct mtd_part_parser scpart_parser = {
+ .parse_fn = scpart_parse,
+ .name = "scpart",
+ .of_match_table = scpart_parser_of_match_table,
+};
+module_mtd_part_parser(scpart_parser);
+
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("scpart");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NOGUCHI Hiroshi <drvlabo@gmail.com>");
+MODULE_AUTHOR("Mikhail Zhilkin <csharper2005@gmail.com>");
+MODULE_DESCRIPTION("Sercomm partition parser");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 0cff2cda1b5a..7f955fade838 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1111,9 +1111,9 @@ static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
- mutex_lock(&ftl->mutex);
del_timer_sync(&ftl->timer);
cancel_work_sync(&ftl->flush_work);
+ mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
}
diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 94a969185ceb..5070d72835ec 100644
--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -237,7 +237,7 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
reg = readl(host->regbase + FMC_CFG);
reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK);
reg |= FMC_CFG_OP_MODE_NORMAL;
- reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES
+ reg |= (nor->addr_nbytes == 4) ? SPI_NOR_ADDR_MODE_4BYTES
: SPI_NOR_ADDR_MODE_3BYTES;
writel(reg, host->regbase + FMC_CFG);
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 9032b9ab2eaf..ab3990e6ac25 100644
--- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -203,7 +203,7 @@ static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
SPIFI_CMD_DATALEN(len) |
SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_OPCODE(nor->program_opcode) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
for (i = 0; i < len; i++)
@@ -230,7 +230,7 @@ static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_OPCODE(nor->erase_opcode) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
return nxp_spifi_wait_for_cmd(spifi);
@@ -252,12 +252,12 @@ static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
}
/* Memory mode supports address length between 1 and 4 */
- if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4)
+ if (spifi->nor.addr_nbytes < 1 || spifi->nor.addr_nbytes > 4)
return -EINVAL;
spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
return 0;
}
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 502967c76c5f..f2c64006f8d7 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -38,7 +38,7 @@
*/
#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
-#define SPI_NOR_MAX_ADDR_WIDTH 4
+#define SPI_NOR_MAX_ADDR_NBYTES 4
#define SPI_NOR_SRST_SLEEP_MIN 200
#define SPI_NOR_SRST_SLEEP_MAX 400
@@ -177,7 +177,7 @@ int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
{
- if (spi_nor_protocol_is_dtr(nor->write_proto))
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
return -EOPNOTSUPP;
return nor->controller_ops->erase(nor, offs);
@@ -198,7 +198,7 @@ static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, from, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(len, buf, 0));
bool usebouncebuf;
@@ -262,7 +262,7 @@ static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, to, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(len, buf, 0));
ssize_t nbytes;
@@ -972,7 +972,7 @@ static int spi_nor_erase_chip(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
- spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
@@ -1113,9 +1113,9 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
if (nor->spimem) {
struct spi_mem_op op =
SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
- nor->addr_width, addr);
+ nor->addr_nbytes, addr);
- spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
return spi_mem_exec_op(nor->spimem, &op);
} else if (nor->controller_ops->erase) {
@@ -1126,13 +1126,13 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
* Default implementation, if driver doesn't have a specialized HW
* control
*/
- for (i = nor->addr_width - 1; i >= 0; i--) {
+ for (i = nor->addr_nbytes - 1; i >= 0; i--) {
nor->bouncebuf[i] = addr & 0xff;
addr >>= 8;
}
return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
- nor->bouncebuf, nor->addr_width);
+ nor->bouncebuf, nor->addr_nbytes);
}
/**
@@ -2249,43 +2249,43 @@ static int spi_nor_default_setup(struct spi_nor *nor,
return 0;
}
-static int spi_nor_set_addr_width(struct spi_nor *nor)
+static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
{
- if (nor->addr_width) {
- /* already configured from SFDP */
+ if (nor->params->addr_nbytes) {
+ nor->addr_nbytes = nor->params->addr_nbytes;
} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
/*
* In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
- * in this protocol an odd address width cannot be used because
+ * in this protocol an odd addr_nbytes cannot be used because
* then the address phase would only span a cycle and a half.
* Half a cycle would be left over. We would then have to start
* the dummy phase in the middle of a cycle and so too the data
* phase, and we will end the transaction with half a cycle left
* over.
*
- * Force all 8D-8D-8D flashes to use an address width of 4 to
+ * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
* avoid this situation.
*/
- nor->addr_width = 4;
- } else if (nor->info->addr_width) {
- nor->addr_width = nor->info->addr_width;
+ nor->addr_nbytes = 4;
+ } else if (nor->info->addr_nbytes) {
+ nor->addr_nbytes = nor->info->addr_nbytes;
} else {
- nor->addr_width = 3;
+ nor->addr_nbytes = 3;
}
- if (nor->addr_width == 3 && nor->params->size > 0x1000000) {
+ if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
+ nor->addr_nbytes = 4;
}
- if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_dbg(nor->dev, "address width is too large: %u\n",
- nor->addr_width);
+ if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
+ dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
+ nor->addr_nbytes);
return -EINVAL;
}
/* Set 4byte opcodes when possible. */
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
!(nor->flags & SNOR_F_HAS_4BAIT))
spi_nor_set_4byte_opcodes(nor);
@@ -2304,7 +2304,7 @@ static int spi_nor_setup(struct spi_nor *nor,
if (ret)
return ret;
- return spi_nor_set_addr_width(nor);
+ return spi_nor_set_addr_nbytes(nor);
}
/**
@@ -2382,12 +2382,7 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
*/
erase_mask = 0;
i = 0;
- if (no_sfdp_flags & SECT_4K_PMC) {
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], 4096u,
- SPINOR_OP_BE_4K_PMC);
- i++;
- } else if (no_sfdp_flags & SECT_4K) {
+ if (no_sfdp_flags & SECT_4K) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K);
@@ -2497,7 +2492,6 @@ static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
if (spi_nor_parse_sfdp(nor)) {
memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
- nor->addr_width = 0;
nor->flags &= ~SNOR_F_4B_OPCODES;
}
}
@@ -2718,7 +2712,7 @@ static int spi_nor_init(struct spi_nor *nor)
nor->flags & SNOR_F_SWP_IS_VOLATILE))
spi_nor_try_unlock_all(nor);
- if (nor->addr_width == 4 &&
+ if (nor->addr_nbytes == 4 &&
nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
!(nor->flags & SNOR_F_4B_OPCODES)) {
/*
@@ -2730,7 +2724,7 @@ static int spi_nor_init(struct spi_nor *nor)
*/
WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
"enabling reset hack; may not recover from unexpected reboots\n");
- nor->params->set_4byte_addr_mode(nor, true);
+ return nor->params->set_4byte_addr_mode(nor, true);
}
return 0;
@@ -2845,7 +2839,7 @@ static void spi_nor_put_device(struct mtd_info *mtd)
void spi_nor_restore(struct spi_nor *nor)
{
/* restore the addressing mode */
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+ if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
nor->flags & SNOR_F_BROKEN_RESET)
nor->params->set_4byte_addr_mode(nor, false);
@@ -2989,7 +2983,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
* - select op codes for (Fast) Read, Page Program and Sector Erase.
* - set the number of dummy cycles (mode cycles + wait states).
* - set the SPI protocols for register and memory accesses.
- * - set the address width.
+ * - set the number of address bytes.
*/
ret = spi_nor_setup(nor, hwcaps);
if (ret)
@@ -3030,7 +3024,7 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(0, NULL, 0)),
.offset = 0,
@@ -3061,7 +3055,7 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
.offset = 0,
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 3f841ec36e56..85b0cf254e97 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -84,9 +84,9 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_width, addr) \
+#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_nbytes, addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
- SPI_MEM_OP_ADDR(addr_width, addr, 0), \
+ SPI_MEM_OP_ADDR(addr_nbytes, addr, 0), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
@@ -340,6 +340,11 @@ struct spi_nor_otp {
* @writesize Minimal writable flash unit size. Defaults to 1. Set to
* ECC unit size for ECC-ed flashes.
* @page_size: the page size of the SPI NOR flash memory.
+ * @addr_nbytes: number of address bytes to send.
+ * @addr_mode_nbytes: number of address bytes of current address mode. Useful
+ * when the flash operates with 4B opcodes but needs the
+ * internal address mode for opcodes that don't have a 4B
+ * opcode correspondent.
* @rdsr_dummy: dummy cycles needed for Read Status Register command
* in octal DTR mode.
* @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register
@@ -372,6 +377,8 @@ struct spi_nor_flash_parameter {
u64 size;
u32 writesize;
u32 page_size;
+ u8 addr_nbytes;
+ u8 addr_mode_nbytes;
u8 rdsr_dummy;
u8 rdsr_addr_nbytes;
@@ -429,7 +436,7 @@ struct spi_nor_fixups {
* isn't necessarily called a "sector" by the vendor.
* @n_sectors: the number of sectors.
* @page_size: the flash's page size.
- * @addr_width: the flash's address width.
+ * @addr_nbytes: number of address bytes to send.
*
* @parse_sfdp: true when flash supports SFDP tables. The false value has no
* meaning. If one wants to skip the SFDP tables, one should
@@ -457,7 +464,6 @@ struct spi_nor_fixups {
* flags are used together with the SPI_NOR_SKIP_SFDP flag.
* SPI_NOR_SKIP_SFDP: skip parsing of SFDP tables.
* SECT_4K: SPINOR_OP_BE_4K works uniformly.
- * SECT_4K_PMC: SPINOR_OP_BE_4K_PMC works uniformly.
* SPI_NOR_DUAL_READ: flash supports Dual Read.
* SPI_NOR_QUAD_READ: flash supports Quad Read.
* SPI_NOR_OCTAL_READ: flash supports Octal Read.
@@ -488,7 +494,7 @@ struct flash_info {
unsigned sector_size;
u16 n_sectors;
u16 page_size;
- u16 addr_width;
+ u8 addr_nbytes;
bool parse_sfdp;
u16 flags;
@@ -505,7 +511,6 @@ struct flash_info {
u8 no_sfdp_flags;
#define SPI_NOR_SKIP_SFDP BIT(0)
#define SECT_4K BIT(1)
-#define SECT_4K_PMC BIT(2)
#define SPI_NOR_DUAL_READ BIT(3)
#define SPI_NOR_QUAD_READ BIT(4)
#define SPI_NOR_OCTAL_READ BIT(5)
@@ -550,11 +555,11 @@ struct flash_info {
.n_sectors = (_n_sectors), \
.page_size = 256, \
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
- .addr_width = (_addr_width), \
+ .addr_nbytes = (_addr_nbytes), \
.flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \
#define OTP_INFO(_len, _n_regions, _base, _offset) \
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index eaf84f7a0676..df76cb5de3f9 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -86,7 +86,7 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
seq_printf(s, "size\t\t%s\n", buf);
seq_printf(s, "write size\t%u\n", params->writesize);
seq_printf(s, "page size\t%u\n", params->page_size);
- seq_printf(s, "address width\t%u\n", nor->addr_width);
+ seq_printf(s, "address nbytes\t%u\n", nor->addr_nbytes);
seq_puts(s, "flags\t\t");
spi_nor_print_flags(s, nor->flags, snor_f_names, sizeof(snor_f_names));
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
index 79e2408f4998..fcc3b0e7cda9 100644
--- a/drivers/mtd/spi-nor/esmt.c
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -13,7 +13,7 @@ static const struct flash_info esmt_nor_parts[] = {
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K) },
- { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64)
+ { "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index c012bc2486e1..89a66a19d754 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -14,13 +14,13 @@ is25lp256_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_bfpt *bfpt)
{
/*
- * IS25LP256 supports 4B opcodes, but the BFPT advertises a
- * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
- * Overwrite the address width advertised by the BFPT.
+ * IS25LP256 supports 4B opcodes, but the BFPT advertises
+ * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY.
+ * Overwrite the number of address bytes advertised by the BFPT.
*/
if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
- nor->addr_width = 4;
+ nor->params->addr_nbytes = 4;
return 0;
}
@@ -29,6 +29,21 @@ static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
+static void pm25lv_nor_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ int i;
+
+ /* The PM25LV series has a different 4k sector erase opcode */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (map->erase_type[i].size == 4096)
+ map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
+}
+
+static const struct spi_nor_fixups pm25lv_nor_fixups = {
+ .late_init = pm25lv_nor_late_init,
+};
+
static const struct flash_info issi_nor_parts[] = {
/* ISSI */
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
@@ -62,9 +77,13 @@ static const struct flash_info issi_nor_parts[] = {
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2)
- NO_SFDP_FLAGS(SECT_4K_PMC) },
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &pm25lv_nor_fixups
+ },
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K_PMC) },
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &pm25lv_nor_fixups
+ },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index a96f74e0f568..3c9681a3f7a3 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -399,8 +399,16 @@ static int micron_st_nor_ready(struct spi_nor *nor)
return sr_ready;
ret = micron_st_nor_read_fsr(nor, nor->bouncebuf);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * Some controllers, such as Intel SPI, do not support low
+ * level operations such as reading the flag status
+ * register. They only expose small amount of high level
+ * operations to the software. If this is the case we use
+ * only the status register value.
+ */
+ return ret == -EOPNOTSUPP ? sr_ready : ret;
+ }
if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
if (nor->bouncebuf[0] & FSR_E_ERR)
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index fa63d8571218..00ab0d2d6d2f 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -35,13 +35,13 @@
*/
int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
{
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
struct spi_mem_dirmap_desc *rdesc;
enum spi_nor_protocol read_proto;
int ret;
read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
read_proto = nor->read_proto;
rdesc = nor->dirmap.rdesc;
@@ -54,7 +54,7 @@ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
ret = spi_nor_read_data(nor, addr, len, buf);
nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
nor->read_proto = read_proto;
nor->dirmap.rdesc = rdesc;
@@ -85,11 +85,11 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
{
enum spi_nor_protocol write_proto;
struct spi_mem_dirmap_desc *wdesc;
- u8 addr_width, program_opcode;
+ u8 addr_nbytes, program_opcode;
int ret, written;
program_opcode = nor->program_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
write_proto = nor->write_proto;
wdesc = nor->dirmap.wdesc;
@@ -113,7 +113,7 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
out:
nor->program_opcode = program_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->write_proto = write_proto;
nor->dirmap.wdesc = wdesc;
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index a5211543d30d..2257f1b4c2e2 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -134,7 +134,7 @@ struct sfdp_4bait {
/**
* spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
- * addr_width and read_dummy members of the struct spi_nor
+ * addr_nbytes and read_dummy members of the struct spi_nor
* should be previously
* set.
* @nor: pointer to a 'struct spi_nor'
@@ -178,21 +178,21 @@ static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
size_t len, void *buf)
{
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
int ret;
read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
nor->read_opcode = SPINOR_OP_RDSFDP;
- nor->addr_width = 3;
+ nor->addr_nbytes = 3;
nor->read_dummy = 8;
ret = spi_nor_read_raw(nor, addr, len, buf);
nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
return ret;
@@ -462,11 +462,13 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
- nor->addr_width = 3;
+ params->addr_nbytes = 3;
+ params->addr_mode_nbytes = 3;
break;
case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
- nor->addr_width = 4;
+ params->addr_nbytes = 4;
+ params->addr_mode_nbytes = 4;
break;
default:
@@ -637,12 +639,12 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
}
/**
- * spi_nor_smpt_addr_width() - return the address width used in the
+ * spi_nor_smpt_addr_nbytes() - return the number of address bytes used in the
* configuration detection command.
* @nor: pointer to a 'struct spi_nor'
* @settings: configuration detection command descriptor, dword1
*/
-static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
+static u8 spi_nor_smpt_addr_nbytes(const struct spi_nor *nor, const u32 settings)
{
switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
case SMPT_CMD_ADDRESS_LEN_0:
@@ -653,7 +655,7 @@ static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
return 4;
case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
default:
- return nor->addr_width;
+ return nor->params->addr_mode_nbytes;
}
}
@@ -690,7 +692,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
u32 addr;
int err;
u8 i;
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
u8 read_data_mask, map_id;
/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
@@ -698,7 +700,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
if (!buf)
return ERR_PTR(-ENOMEM);
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
read_opcode = nor->read_opcode;
@@ -709,7 +711,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
break;
read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
- nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
+ nor->addr_nbytes = spi_nor_smpt_addr_nbytes(nor, smpt[i]);
nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
addr = smpt[i + 1];
@@ -756,7 +758,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
/* fall through */
out:
kfree(buf);
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
nor->read_opcode = read_opcode;
return ret;
@@ -1044,7 +1046,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
/*
* We need at least one 4-byte op code per read, program and erase
* operation; the .read(), .write() and .erase() hooks share the
- * nor->addr_width value.
+ * nor->addr_nbytes value.
*/
if (!read_hwcaps || !pp_hwcaps || !erase_mask)
goto out;
@@ -1098,7 +1100,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
* Spansion memory. However this quirk is no longer needed with new
* SFDP compliant memories.
*/
- nor->addr_width = 4;
+ params->addr_nbytes = 4;
nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
/* fall through */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 43cd6cd92537..0150049007be 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -14,6 +14,8 @@
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
+#define SPINOR_REG_CYPRESS_CFR1V 0x00800002
+#define SPINOR_REG_CYPRESS_CFR1V_QUAD_EN BIT(1) /* Quad Enable */
#define SPINOR_REG_CYPRESS_CFR2V 0x00800003
#define SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24 0xb
#define SPINOR_REG_CYPRESS_CFR3V 0x00800004
@@ -114,6 +116,150 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
}
/**
+ * cypress_nor_quad_enable_volatile() - enable Quad I/O mode in volatile
+ * register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * It is recommended to update volatile registers in the field application due
+ * to a risk of the non-volatile registers corruption by power interrupt. This
+ * function sets Quad Enable bit in CFR1 volatile. If users set the Quad Enable
+ * bit in the CFR1 non-volatile in advance (typically by a Flash programmer
+ * before mounting Flash on PCB), the Quad Enable bit in the CFR1 volatile is
+ * also set during Flash power-up.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+ u8 cfr1v_written;
+ int ret;
+
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V,
+ nor->bouncebuf);
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1V_QUAD_EN)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1V_QUAD_EN;
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V, 1,
+ nor->bouncebuf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ cfr1v_written = nor->bouncebuf[0];
+
+ /* Read back and check it. */
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V,
+ nor->bouncebuf);
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != cfr1v_written) {
+ dev_err(nor->dev, "CFR1: Read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * cypress_nor_set_page_size() - Set page size which corresponds to the flash
+ * configuration.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * The BFPT table advertises a 512B or 256B page size depending on part but the
+ * page size is actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_set_page_size(struct spi_nor *nor)
+{
+ struct spi_mem_op op =
+ CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_CFR3V,
+ nor->bouncebuf);
+ int ret;
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
+ nor->params->page_size = 512;
+ else
+ nor->params->page_size = 256;
+
+ return 0;
+}
+
+static int
+s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /* Replace Quad Enable with volatile version */
+ nor->params->quad_enable = cypress_nor_quad_enable_volatile;
+
+ return cypress_nor_set_page_size(nor);
+}
+
+static void s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
+{
+ struct spi_nor_erase_type *erase_type =
+ nor->params->erase_map.erase_type;
+ unsigned int i;
+
+ /*
+ * In some parts, 3byte erase opcodes are advertised by 4BAIT.
+ * Convert them to 4byte erase opcodes.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ switch (erase_type[i].opcode) {
+ case SPINOR_OP_SE:
+ erase_type[i].opcode = SPINOR_OP_SE_4B;
+ break;
+ case SPINOR_OP_BE_4K:
+ erase_type[i].opcode = SPINOR_OP_BE_4K_4B;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void s25hx_t_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ /* Fast Read 4B requires mode cycles */
+ params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
+
+ /* The writesize should be ECC data unit size */
+ params->writesize = 16;
+}
+
+static struct spi_nor_fixups s25hx_t_fixups = {
+ .post_bfpt = s25hx_t_post_bfpt_fixup,
+ .post_sfdp = s25hx_t_post_sfdp_fixup,
+ .late_init = s25hx_t_late_init,
+};
+
+/**
* cypress_nor_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
@@ -167,28 +313,7 @@ static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
- /*
- * The BFPT table advertises a 512B page size but the page size is
- * actually configurable (with the default being 256B). Read from
- * CFR3V[4] and set the correct size.
- */
- struct spi_mem_op op =
- CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_CFR3V,
- nor->bouncebuf);
- int ret;
-
- spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
- nor->params->page_size = 512;
- else
- nor->params->page_size = 256;
-
- return 0;
+ return cypress_nor_set_page_size(nor);
}
static const struct spi_nor_fixups s28hs512t_fixups = {
@@ -310,6 +435,22 @@ static const struct flash_info spansion_nor_parts[] = {
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 256 * 1024, 256)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 256 * 1024, 512)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 256 * 1024, 256)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 256 * 1024, 512)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
FLAGS(SPI_NOR_NO_ERASE) },
{ "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256)
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
index 1d2f5db047bd..5723157739fc 100644
--- a/drivers/mtd/spi-nor/xilinx.c
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -31,7 +31,7 @@
.sector_size = (8 * (_page_size)), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
- .addr_width = 3, \
+ .addr_nbytes = 3, \
.flags = SPI_NOR_NO_FR
/* Xilinx S3AN share MFR with Atmel SPI NOR */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d7fb33c078e8..184608bd8999 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -84,7 +84,8 @@ enum ad_link_speed_type {
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
0, 0, 0, 0, 0, 0
};
-static u16 ad_ticks_per_sec;
+
+static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
@@ -2001,36 +2002,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
- * @tick_resolution: tick duration (millisecond resolution)
*
* Can be called only after the mac address of the bond is set.
*/
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+void bond_3ad_initialize(struct bonding *bond)
{
- /* check that the bond is not initialized yet */
- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
- bond->dev->dev_addr)) {
-
- BOND_AD_INFO(bond).aggregator_identifier = 0;
-
- BOND_AD_INFO(bond).system.sys_priority =
- bond->params.ad_actor_sys_prio;
- if (is_zero_ether_addr(bond->params.ad_actor_system))
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->dev->dev_addr);
- else
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->params.ad_actor_system);
-
- /* initialize how many times this module is called in one
- * second (should be about every 100ms)
- */
- ad_ticks_per_sec = tick_resolution;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
- bond_3ad_initiate_agg_selection(bond,
- AD_AGGREGATOR_SELECTION_TIMER *
- ad_ticks_per_sec);
- }
+ bond_3ad_initiate_agg_selection(bond,
+ AD_AGGREGATOR_SELECTION_TIMER *
+ ad_ticks_per_sec);
}
/**
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 007d43e46dcb..b9dbad3a8af8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -653,6 +653,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb,
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
struct slave *tx_slave = NULL;
+ struct net_device *dev;
struct arp_pkt *arp;
if (!pskb_network_may_pull(skb, sizeof(*arp)))
@@ -665,6 +666,15 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
return NULL;
+ dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
+ if (dev) {
+ if (netif_is_bridge_master(dev)) {
+ dev_put(dev);
+ return NULL;
+ }
+ dev_put(dev);
+ }
+
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
tx_slave = rlb_choose_channel(skb, bond, arp);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e75acb14d066..2f4da2c13c0a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2001,6 +2001,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
new_slave->target_last_arp_rx[i] = new_slave->last_rx;
+ new_slave->last_tx = new_slave->last_rx;
+
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -2079,7 +2081,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
- bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
+ bond_3ad_initialize(bond);
} else {
SLAVE_AD_INFO(new_slave)->id =
SLAVE_AD_INFO(prev_slave)->id + 1;
@@ -2884,8 +2886,11 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
return;
}
- if (bond_handle_vlan(slave, tags, skb))
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
arp_xmit(skb);
+ }
+
return;
}
@@ -3074,8 +3079,7 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_arp(bond, slave, sip, tip);
out_unlock:
@@ -3103,8 +3107,10 @@ static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
}
addrconf_addr_solict_mult(daddr, &mcaddr);
- if (bond_handle_vlan(slave, tags, skb))
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
ndisc_send_skb(skb, &mcaddr, saddr);
+ }
}
static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
@@ -3246,8 +3252,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
curr_active_slave->last_link_up))
bond_validate_ns(bond, slave, saddr, daddr);
else if (curr_arp_slave &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_ns(bond, slave, saddr, daddr);
out:
@@ -3335,12 +3340,12 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* so it can wait
*/
bond_for_each_slave_rcu(bond, slave, iter) {
- unsigned long trans_start = dev_trans_start(slave->dev);
+ unsigned long last_tx = slave_last_tx(slave);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) {
- if (bond_time_in_interval(bond, trans_start, 1) &&
+ if (bond_time_in_interval(bond, last_tx, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
@@ -3365,7 +3370,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
@@ -3431,7 +3436,7 @@ re_arm:
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
- unsigned long trans_start, last_rx;
+ unsigned long last_tx, last_rx;
struct list_head *iter;
struct slave *slave;
int commit = 0;
@@ -3482,9 +3487,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* - (more than missed_max*delta since receive AND
* the bond has an IP address)
*/
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (bond_is_active_slave(slave) &&
- (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
@@ -3501,8 +3506,8 @@ static int bond_ab_arp_inspect(struct bonding *bond)
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
- unsigned long trans_start;
struct list_head *iter;
+ unsigned long last_tx;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
@@ -3511,10 +3516,10 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (rtnl_dereference(bond->curr_active_slave) != slave ||
(!rtnl_dereference(bond->curr_active_slave) &&
- bond_time_in_interval(bond, trans_start, 1))) {
+ bond_time_in_interval(bond, last_tx, 1))) {
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
@@ -5333,8 +5338,14 @@ static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *dev)
{
- if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
- return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
+
+ /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
+ * was true, if tls_device_down is running in parallel, but it's OK,
+ * because bond_get_slave_by_dev has a NULL check.
+ */
+ if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_netdev);
return bond_tx_drop(dev, skb);
}
#endif
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 65ba6697bd7d..c469b2f3e57d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -63,7 +63,7 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
else
*mscan_clksrc = MSCAN_CLKSRC_XTAL;
- freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
+ freq = mpc5xxx_get_bus_frequency(&ofdev->dev);
if (!freq)
return 0;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index e750d13c8841..c320de474f40 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1070,9 +1070,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
- /* mask out flags we don't care about */
- intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
/* receive buffer 0 */
if (intf & CANINTF_RX0IF) {
mcp251x_hw_rx(spi, 0);
@@ -1082,6 +1079,18 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (mcp251x_is_2510(spi))
mcp251x_write_bits(spi, CANINTF,
CANINTF_RX0IF, 0x00);
+
+ /* check if buffer 1 is already known to be full, no need to re-read */
+ if (!(intf & CANINTF_RX1IF)) {
+ u8 intf1, eflag1;
+
+ /* intf needs to be read again to avoid a race condition */
+ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1);
+
+ /* combine flags from both operations for error handling */
+ intf |= intf1;
+ eflag |= eflag1;
+ }
}
/* receive buffer 1 */
@@ -1092,6 +1101,9 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
clear_intf |= CANINTF_RX1IF;
}
+ /* mask out flags we don't care about */
+ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
/* any error or tx interrupt we need to clear? */
if (intf & (CANINTF_ERR | CANINTF_TX))
clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index d1e1a459c045..d31191686a54 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -195,7 +195,7 @@ struct __packed ems_cpc_msg {
__le32 ts_sec; /* timestamp in seconds */
__le32 ts_nsec; /* timestamp in nano seconds */
- union {
+ union __packed {
u8 generic[64];
struct cpc_can_msg can_msg;
struct cpc_can_params can_params;
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 4b14d80d27ed..e4f446db0ca1 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -613,6 +613,9 @@ int ksz9477_fdb_dump(struct ksz_device *dev, int port,
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index ed7d137cba99..6bd69a7e6809 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -803,9 +803,15 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->info->supports_rgmii[port])
phy_interface_set_rgmii(config->supported_interfaces);
- if (dev->info->internal_phy[port])
+ if (dev->info->internal_phy[port]) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
@@ -962,6 +968,7 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
const u16 *regs;
int ret;
@@ -1001,6 +1008,14 @@ static int ksz_setup(struct dsa_switch *ds)
return ret;
}
+ /* Start with learning disabled on standalone user ports, and enabled
+ * on the CPU port. In lack of other finer mechanisms, learning on the
+ * CPU port will avoid flooding bridge local addresses on the network
+ * in some cases.
+ */
+ p = &dev->ports[dev->cpu_port];
+ p->learning = true;
+
/* start switch */
regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
SW_START, SW_START);
@@ -1277,6 +1292,8 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ p = &dev->ports[port];
+
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
@@ -1286,9 +1303,13 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
break;
case BR_STATE_LEARNING:
data |= PORT_RX_ENABLE;
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
@@ -1300,12 +1321,38 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
- p = &dev->ports[port];
p->stp_state = state;
ksz_update_port_member(dev, port);
}
+static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ if (flags.mask & BR_LEARNING) {
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ /* Make the change take effect immediately */
+ ksz_port_stp_state_set(ds, port, p->stp_state);
+ }
+
+ return 0;
+}
+
static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
@@ -1719,6 +1766,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_bridge_join = ksz_port_bridge_join,
.port_bridge_leave = ksz_port_bridge_leave,
.port_stp_state_set = ksz_port_stp_state_set,
+ .port_pre_bridge_flags = ksz_port_pre_bridge_flags,
+ .port_bridge_flags = ksz_port_bridge_flags,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz_port_vlan_filtering,
.port_vlan_add = ksz_port_vlan_add,
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 764ada3a0f42..0d9520dc6d2d 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -65,6 +65,7 @@ struct ksz_chip_data {
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
+ bool learning;
int stp_state;
struct phy_device phydev;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a4c6eb9a52d0..83dca9179aa0 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 859196898a7d..aadb0bd7c24f 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -610,6 +610,9 @@ static int felix_change_tag_protocol(struct dsa_switch *ds,
old_proto_ops = felix->tag_proto_ops;
+ if (proto_ops == old_proto_ops)
+ return 0;
+
err = proto_ops->setup(ds);
if (err)
goto setup_failed;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 61ed317602e7..1cdce8a98d1d 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -274,27 +274,98 @@ static const u32 vsc9959_rew_regmap[] = {
static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_UNICAST, 0x000204),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000208),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
REG(SYS_COUNT_TX_COLLISION, 0x000210),
REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_PAUSE, 0x000218),
REG(SYS_COUNT_TX_64, 0x00021c),
REG(SYS_COUNT_TX_65_127, 0x000220),
- REG(SYS_COUNT_TX_128_511, 0x000224),
- REG(SYS_COUNT_TX_512_1023, 0x000228),
- REG(SYS_COUNT_TX_1024_1526, 0x00022c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000230),
+ REG(SYS_COUNT_TX_128_255, 0x000224),
+ REG(SYS_COUNT_TX_256_511, 0x000228),
+ REG(SYS_COUNT_TX_512_1023, 0x00022c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000230),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000234),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000400),
+ REG(SYS_COUNT_DROP_TAIL, 0x000404),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -547,100 +618,379 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
-static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x80, .name = "tx_octets", },
- { .offset = 0x81, .name = "tx_unicast", },
- { .offset = 0x82, .name = "tx_multicast", },
- { .offset = 0x83, .name = "tx_broadcast", },
- { .offset = 0x84, .name = "tx_collision", },
- { .offset = 0x85, .name = "tx_drops", },
- { .offset = 0x86, .name = "tx_pause", },
- { .offset = 0x87, .name = "tx_frames_below_65_octets", },
- { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x89, .name = "tx_frames_128_255_octets", },
- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
- { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x8E, .name = "tx_yellow_prio_0", },
- { .offset = 0x8F, .name = "tx_yellow_prio_1", },
- { .offset = 0x90, .name = "tx_yellow_prio_2", },
- { .offset = 0x91, .name = "tx_yellow_prio_3", },
- { .offset = 0x92, .name = "tx_yellow_prio_4", },
- { .offset = 0x93, .name = "tx_yellow_prio_5", },
- { .offset = 0x94, .name = "tx_yellow_prio_6", },
- { .offset = 0x95, .name = "tx_yellow_prio_7", },
- { .offset = 0x96, .name = "tx_green_prio_0", },
- { .offset = 0x97, .name = "tx_green_prio_1", },
- { .offset = 0x98, .name = "tx_green_prio_2", },
- { .offset = 0x99, .name = "tx_green_prio_3", },
- { .offset = 0x9A, .name = "tx_green_prio_4", },
- { .offset = 0x9B, .name = "tx_green_prio_5", },
- { .offset = 0x9C, .name = "tx_green_prio_6", },
- { .offset = 0x9D, .name = "tx_green_prio_7", },
- { .offset = 0x9E, .name = "tx_aged", },
- { .offset = 0x100, .name = "drop_local", },
- { .offset = 0x101, .name = "drop_tail", },
- { .offset = 0x102, .name = "drop_yellow_prio_0", },
- { .offset = 0x103, .name = "drop_yellow_prio_1", },
- { .offset = 0x104, .name = "drop_yellow_prio_2", },
- { .offset = 0x105, .name = "drop_yellow_prio_3", },
- { .offset = 0x106, .name = "drop_yellow_prio_4", },
- { .offset = 0x107, .name = "drop_yellow_prio_5", },
- { .offset = 0x108, .name = "drop_yellow_prio_6", },
- { .offset = 0x109, .name = "drop_yellow_prio_7", },
- { .offset = 0x10A, .name = "drop_green_prio_0", },
- { .offset = 0x10B, .name = "drop_green_prio_1", },
- { .offset = 0x10C, .name = "drop_green_prio_2", },
- { .offset = 0x10D, .name = "drop_green_prio_3", },
- { .offset = 0x10E, .name = "drop_green_prio_4", },
- { .offset = 0x10F, .name = "drop_green_prio_5", },
- { .offset = 0x110, .name = "drop_green_prio_6", },
- { .offset = 0x111, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -1137,6 +1487,7 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
{
struct tc_taprio_sched_entry *entry;
u64 gate_len[OCELOT_NUM_TC];
+ u8 gates_ever_opened = 0;
int tc, i, n;
/* Initialize arrays */
@@ -1164,16 +1515,28 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
if (entry->gate_mask & BIT(tc)) {
gate_len[tc] += entry->interval;
+ gates_ever_opened |= BIT(tc);
} else {
/* Gate closes now, record a potential new
* minimum and reinitialize length
*/
- if (min_gate_len[tc] > gate_len[tc])
+ if (min_gate_len[tc] > gate_len[tc] &&
+ gate_len[tc])
min_gate_len[tc] = gate_len[tc];
gate_len[tc] = 0;
}
}
}
+
+ /* min_gate_len[tc] actually tracks minimum *open* gate time, so for
+ * permanently closed gates, min_gate_len[tc] will still be U64_MAX.
+ * Therefore they are currently indistinguishable from permanently
+ * open gates. Overwrite the gate len with 0 when we know they're
+ * actually permanently closed, i.e. after the loop above.
+ */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (!(gates_ever_opened & BIT(tc)))
+ min_gate_len[tc] = 0;
}
/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
@@ -2153,7 +2516,7 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
struct felix_stream_filter_counters *counters)
{
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
SYS_STAT_CFG_STAT_VIEW_M,
@@ -2170,7 +2533,7 @@ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
SYS_STAT_CFG);
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
}
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index ea0649211356..b34f4cdfe814 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -270,27 +270,98 @@ static const u32 vsc9953_rew_regmap[] = {
static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000318),
REG_RESERVED(SYS_SR_ETYPE_CFG),
REG(SYS_VLAN_ETYPE_CFG, 0x000320),
@@ -543,101 +614,379 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
-static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x40, .name = "tx_octets", },
- { .offset = 0x41, .name = "tx_unicast", },
- { .offset = 0x42, .name = "tx_multicast", },
- { .offset = 0x43, .name = "tx_broadcast", },
- { .offset = 0x44, .name = "tx_collision", },
- { .offset = 0x45, .name = "tx_drops", },
- { .offset = 0x46, .name = "tx_pause", },
- { .offset = 0x47, .name = "tx_frames_below_65_octets", },
- { .offset = 0x48, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x49, .name = "tx_frames_128_255_octets", },
- { .offset = 0x4A, .name = "tx_frames_256_511_octets", },
- { .offset = 0x4B, .name = "tx_frames_512_1023_octets", },
- { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x4D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x4E, .name = "tx_yellow_prio_0", },
- { .offset = 0x4F, .name = "tx_yellow_prio_1", },
- { .offset = 0x50, .name = "tx_yellow_prio_2", },
- { .offset = 0x51, .name = "tx_yellow_prio_3", },
- { .offset = 0x52, .name = "tx_yellow_prio_4", },
- { .offset = 0x53, .name = "tx_yellow_prio_5", },
- { .offset = 0x54, .name = "tx_yellow_prio_6", },
- { .offset = 0x55, .name = "tx_yellow_prio_7", },
- { .offset = 0x56, .name = "tx_green_prio_0", },
- { .offset = 0x57, .name = "tx_green_prio_1", },
- { .offset = 0x58, .name = "tx_green_prio_2", },
- { .offset = 0x59, .name = "tx_green_prio_3", },
- { .offset = 0x5A, .name = "tx_green_prio_4", },
- { .offset = 0x5B, .name = "tx_green_prio_5", },
- { .offset = 0x5C, .name = "tx_green_prio_6", },
- { .offset = 0x5D, .name = "tx_green_prio_7", },
- { .offset = 0x5E, .name = "tx_aged", },
- { .offset = 0x80, .name = "drop_local", },
- { .offset = 0x81, .name = "drop_tail", },
- { .offset = 0x82, .name = "drop_yellow_prio_0", },
- { .offset = 0x83, .name = "drop_yellow_prio_1", },
- { .offset = 0x84, .name = "drop_yellow_prio_2", },
- { .offset = 0x85, .name = "drop_yellow_prio_3", },
- { .offset = 0x86, .name = "drop_yellow_prio_4", },
- { .offset = 0x87, .name = "drop_yellow_prio_5", },
- { .offset = 0x88, .name = "drop_yellow_prio_6", },
- { .offset = 0x89, .name = "drop_yellow_prio_7", },
- { .offset = 0x8A, .name = "drop_green_prio_0", },
- { .offset = 0x8B, .name = "drop_green_prio_1", },
- { .offset = 0x8C, .name = "drop_green_prio_2", },
- { .offset = 0x8D, .name = "drop_green_prio_3", },
- { .offset = 0x8E, .name = "drop_green_prio_4", },
- { .offset = 0x8F, .name = "drop_green_prio_5", },
- { .offset = 0x90, .name = "drop_green_prio_6", },
- { .offset = 0x91, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 0569ff066634..10c6fea1227f 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -93,7 +93,7 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
- while (i-- >= 0)
+ while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
return PTR_ERR(region);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index e11cc29d3264..06508eebb585 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -265,12 +265,10 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, polling_timer);
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_isr(i, (void *)aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_isr(i, (void *)self->aq_vec[i]);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
@@ -1014,7 +1012,6 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
@@ -1064,11 +1061,11 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data += i;
for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
- for (i = 0U, aq_vec = self->aq_vec[0];
- aq_vec && self->aq_vecs > i;
- ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ if (!self->aq_vec[i])
+ break;
data += count;
- count = aq_vec_get_sw_stats(aq_vec, tc, data);
+ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
}
}
@@ -1382,7 +1379,6 @@ int aq_nic_set_loopback(struct aq_nic_s *self)
int aq_nic_stop(struct aq_nic_s *self)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
netif_tx_disable(self->ndev);
@@ -1400,9 +1396,8 @@ int aq_nic_stop(struct aq_nic_s *self)
aq_ptp_irq_free(self);
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_stop(aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_stop(self->aq_vec[i]);
aq_ptp_ring_stop(self);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 2dfc1e32bbb3..93580484a3f4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -189,8 +189,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
}
slot->skb = skb;
- ring->end += nr_frags + 1;
netdev_sent_queue(net_dev, skb->len);
+ ring->end += nr_frags + 1;
wmb();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 7071604f9984..02808513ffe4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13844,7 +13844,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* Since some switches tend to reinit the AN process and clear the
- * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ba0f1ffac507..f46eefb5a029 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -11178,10 +11178,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
- if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
- features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
-
- if (!(bp->flags & BNXT_FLAG_TPA))
+ if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
if (!(features & NETIF_F_GRO))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 075c6206325c..b1b17f911300 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2130,6 +2130,7 @@ struct bnxt {
#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
+ u8 xdp_has_frags;
struct bnxt_ptp_cfg *ptp_cfg;
u8 ptp_all_rx_tstamp;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 14df8cfc2946..a36803e79e92 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,7 +21,6 @@
#include "bnxt_ptp.h"
#include "bnxt_coredump.h"
#include "bnxt_nvm_defs.h"
-#include "bnxt_ethtool.h"
static void __bnxt_fw_recover(struct bnxt *bp)
{
@@ -1307,6 +1306,7 @@ int bnxt_dl_register(struct bnxt *bp)
if (rc)
goto err_dl_port_unreg;
+ devlink_set_features(dl, DEVLINK_F_RELOAD);
out:
devlink_register(dl);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 730febd19330..a4cba7cb2783 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -623,7 +623,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f53387ed0167..c3065ec0a479 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -181,6 +181,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp)
{
struct bnxt_sw_rx_bd *rx_buf;
+ u32 buflen = PAGE_SIZE;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 offset;
@@ -192,7 +193,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
- xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
+ if (bp->xdp_has_frags)
+ buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
+
+ xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
}
@@ -397,8 +401,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
}
- if (prog)
+ if (prog) {
tx_xdp = bp->rx_nr_rings;
+ bp->xdp_has_frags = prog->aux->xdp_has_frags;
+ }
tc = netdev_get_num_tc(dev);
if (!tc)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c888ddee1fc4..7ded559842e8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -393,6 +393,9 @@ int bcmgenet_mii_probe(struct net_device *dev)
if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_MAC_INTERRUPT;
+ /* Indicate that the MAC is responsible for PHY PM */
+ dev->phydev->mac_managed_pm = true;
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 84604aff53ce..89256b866840 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -243,7 +243,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
/*
* on rx, the iscsi pdu has to be < rx page size and the
- * the max rx data length programmed in TP
+ * max rx data length programmed in TP
*/
val = min(adapter->params.tp.rx_pg_size,
((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 26433a62d7f0..fed5f93bf620 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2;
__be64 opt0;
__be32 iss;
- __be32 rsvd[3];
+ __be32 rsvd;
};
struct cpl_act_open_req {
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index bfee0e4e54b1..da9973b711f4 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1932,6 +1932,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
struct sge_eth_txq *q;
struct adapter *adap;
@@ -1945,7 +1946,12 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != dev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't quit on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (unlikely(tls_netdev && tls_netdev != dev))
goto out;
tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index cb069a0af7b9..a5f7152a1716 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -340,14 +340,14 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
return 0;
}
-static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
+static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
int i;
for (i = 0; i < count; i++) {
- entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE];
+ entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
if (entry->len) {
if (i == 0)
@@ -395,7 +395,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
retval = tsnep_tx_map(skb, tx, count);
if (retval != 0) {
- tsnep_tx_unmap(tx, count);
+ tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
@@ -464,7 +464,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
if (skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
- tsnep_tx_unmap(tx, count);
+ tsnep_tx_unmap(tx, tx->read, count);
if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
@@ -1282,7 +1282,7 @@ MODULE_DEVICE_TABLE(of, tsnep_of_match);
static struct platform_driver tsnep_driver = {
.driver = {
.name = TSNEP,
- .of_match_table = of_match_ptr(tsnep_of_match),
+ .of_match_table = tsnep_of_match,
},
.probe = tsnep_probe,
.remove = tsnep_remove,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 45634579adb6..a770bab4d1ed 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2886,6 +2886,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
/* The Aquantia PHYs are capable of performing rate adaptation */
#define PHY_VEND_AQUANTIA 0x03a1b400
+#define PHY_VEND_AQUANTIA2 0x31c31c00
static int dpaa_phy_init(struct net_device *net_dev)
{
@@ -2893,6 +2894,7 @@ static int dpaa_phy_init(struct net_device *net_dev)
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
+ u32 phy_vendor;
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
@@ -2905,9 +2907,11 @@ static int dpaa_phy_init(struct net_device *net_dev)
return -ENODEV;
}
+ phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
/* Unless the PHY is capable of rate adaptation */
if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
- ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+ (phy_vendor != PHY_VEND_AQUANTIA &&
+ phy_vendor != PHY_VEND_AQUANTIA2)) {
/* remove any features not supported by the controller */
ethtool_convert_legacy_u32_to_link_mode(mask,
mac_dev->if_support);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index cd9ec80522e7..75d51572693d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1660,8 +1660,8 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
buf_array[i] = addr;
/* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev,
- page, DPAA2_ETH_RX_BUF_RAW_SIZE,
+ trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, priv->rx_buf_size,
bpid);
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ed7301b69169..0cebe4b63adb 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -634,6 +634,13 @@ struct fec_enet_private {
int pps_enable;
unsigned int next_counter;
+ struct {
+ struct timespec64 ts_phc;
+ u64 ns_sys;
+ u32 at_corr;
+ u8 at_inc_corr;
+ } ptp_saved_state;
+
u64 ethtool_stats[];
};
@@ -644,5 +651,8 @@ void fec_ptp_disable_hwts(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
+void fec_ptp_save_state(struct fec_enet_private *fep);
+int fec_ptp_restore_state(struct fec_enet_private *fep);
+
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e8e2aa1e7f01..b0d60f898249 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -285,8 +285,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */
-#define FEC_ECR_MAGICEN (1 << 2)
-#define FEC_ECR_SLEEP (1 << 3)
+#define FEC_ECR_RESET BIT(0)
+#define FEC_ECR_ETHEREN BIT(1)
+#define FEC_ECR_MAGICEN BIT(2)
+#define FEC_ECR_SLEEP BIT(3)
+#define FEC_ECR_EN1588 BIT(4)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -982,6 +985,9 @@ fec_restart(struct net_device *ndev)
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
+ struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
+
+ fec_ptp_save_state(fep);
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1135,7 +1141,7 @@ fec_restart(struct net_device *ndev)
}
if (fep->bufdesc_ex)
- ecntl |= (1 << 4);
+ ecntl |= FEC_ECR_EN1588;
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
fep->rgmii_txc_dly)
@@ -1156,6 +1162,14 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_start_cyclecounter(ndev);
+ /* Restart PPS if needed */
+ if (fep->pps_enable) {
+ /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
+ fep->pps_enable = 0;
+ fec_ptp_restore_state(fep);
+ fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
+ }
+
/* Enable interrupts we wish to service */
if (fep->link)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1206,6 +1220,8 @@ fec_stop(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
u32 val;
+ struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
+ u32 ecntl = 0;
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -1215,6 +1231,8 @@ fec_stop(struct net_device *ndev)
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
+ fec_ptp_save_state(fep);
+
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
@@ -1234,12 +1252,28 @@ fec_stop(struct net_device *ndev)
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ if (fep->bufdesc_ex)
+ ecntl |= FEC_ECR_EN1588;
+
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- writel(2, fep->hwp + FEC_ECNTRL);
+ ecntl |= FEC_ECR_ETHEREN;
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
+
+ writel(ecntl, fep->hwp + FEC_ECNTRL);
+
+ if (fep->bufdesc_ex)
+ fec_ptp_start_cyclecounter(ndev);
+
+ /* Restart PPS if needed */
+ if (fep->pps_enable) {
+ /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
+ fep->pps_enable = 0;
+ fec_ptp_restore_state(fep);
+ fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
+ }
}
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 5ddb769bdfb4..a7f4c3c29f3e 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -924,7 +924,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(&op->dev) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
prop = of_get_property(np, "current-speed", &prop_size);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index f85b5e81dfc1..95f778cce98c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -100,8 +100,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
dev_set_drvdata(dev, bus);
/* set MII speed */
- out_be32(&priv->regs->mii_speed,
- ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
+ out_be32(&priv->regs->mii_speed, ((mpc5xxx_get_bus_frequency(dev) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7d49c28215f3..c74d04f4b2fd 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -135,11 +135,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fep->cc.read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -637,7 +633,36 @@ void fec_ptp_stop(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ if (fep->pps_enable)
+ fec_ptp_enable_pps(fep, 0);
+
cancel_delayed_work_sync(&fep->time_keep);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
}
+
+void fec_ptp_save_state(struct fec_enet_private *fep)
+{
+ u32 atime_inc_corr;
+
+ fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
+ fep->ptp_saved_state.ns_sys = ktime_get_ns();
+
+ fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
+ atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
+ fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
+}
+
+int fec_ptp_restore_state(struct fec_enet_private *fep)
+{
+ u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+ u64 ns_sys;
+
+ writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
+ atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
+ writel(atime_inc, fep->hwp + FEC_ATIME_INC);
+
+ ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys;
+ timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys);
+ return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
+}
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 152f4d83765a..d37d7a19a759 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -102,7 +102,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
- int (*get_bus_freq)(struct device_node *);
+ int (*get_bus_freq)(struct device *);
int ret = -ENOMEM, clock, speed;
match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
@@ -136,7 +136,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
}
if (get_bus_freq) {
- clock = get_bus_freq(ofdev->dev.of_node);
+ clock = get_bus_freq(&ofdev->dev);
if (!clock) {
/* Use maximum divider if clock is unknown */
dev_warn(&ofdev->dev, "could not determine IPS clock\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 156e92c43780..e9cd0fa6a0d2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -4485,7 +4485,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
(struct in6_addr *)&ipv6_full_mask))
new_mask |= I40E_L3_V6_DST_MASK;
else if (ipv6_addr_any((struct in6_addr *)
- &usr_ip6_spec->ip6src))
+ &usr_ip6_spec->ip6dst))
new_mask &= ~I40E_L3_V6_DST_MASK;
else
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b36bf9c3e1e4..9f1d5de7bf16 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -384,7 +384,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f6ba97a0166e..d4226161a3ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3203,11 +3203,13 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
protocol = vlan_get_protocol(skb);
- if (eth_p_mpls(protocol))
+ if (eth_p_mpls(protocol)) {
ip.hdr = skb_inner_network_header(skb);
- else
+ l4.hdr = skb_checksum_start(skb);
+ } else {
ip.hdr = skb_network_header(skb);
- l4.hdr = skb_checksum_start(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* set the tx_flags to indicate the IP protocol type. this is
* required so that checksum header computation below is accurate.
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index cd4e6a22d0f9..9ffbd24d83cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
+init_free_asq_bufs:
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
@@ -383,6 +389,7 @@ init_adminq_exit:
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
+init_free_arq_bufs:
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 45d097a164ad..f39440ad5c50 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2367,7 +2367,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
err = iavf_get_vf_config(adapter);
if (err == -EALREADY) {
err = iavf_send_vf_config_msg(adapter);
- goto err_alloc;
+ goto err;
} else if (err == -EINVAL) {
/* We only get -EINVAL if the device is in a very bad
* state or if we've been disabled for previous bad
@@ -3086,12 +3086,15 @@ continue_reset:
return;
reset_err:
+ if (running) {
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+ iavf_free_traffic_irqs(adapter);
+ }
+ iavf_disable_vf(adapter);
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running)
- iavf_change_state(adapter, __IAVF_RUNNING);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- iavf_close(netdev);
}
/**
@@ -4085,8 +4088,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock))
+ while (!mutex_trylock(&adapter->crit_lock)) {
+ /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
+ * is already taken and iavf_open is called from an upper
+ * device's notifier reacting on NETDEV_REGISTER event.
+ * We have to leave here to avoid dead lock.
+ */
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ return -EBUSY;
+
usleep_range(500, 1000);
+ }
if (adapter->state != __IAVF_DOWN) {
err = -EBUSY;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index cc5b85afd437..841fa149c407 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -684,8 +684,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise.
+ * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
+ * present, NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
@@ -699,23 +699,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
}
/**
- * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: Tx ring to use
+ * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ * Sets XSK buff pool pointer on XDP ring.
+ *
+ * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
+ * queue id. Reason for doing so is that queue vectors might have assigned more
+ * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
+ * carries a pointer to one of these XDP rings for its own purposes, such as
+ * handling XDP_TX action, therefore we can piggyback here on the
+ * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
*/
-static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
{
- struct ice_vsi *vsi = ring->vsi;
- u16 qid;
+ struct ice_tx_ring *ring;
- qid = ring->q_index - vsi->alloc_txq;
+ ring = vsi->rx_rings[qid]->xdp_ring;
+ if (!ring)
+ return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+ ring->xsk_pool = NULL;
+ return;
+ }
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 85a94483c2ed..40e678cfb507 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -62,7 +62,7 @@ ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -86,7 +86,7 @@ ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -109,7 +109,7 @@ ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
@@ -132,7 +132,7 @@ ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a830f7f9aed0..0c4ec9264071 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1986,8 +1986,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- ice_for_each_xdp_txq(vsi, i)
- vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
return ret;
}
@@ -3181,7 +3181,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
pf = vsi->back;
vtype = vsi->type;
- if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf)
+ if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
ice_vsi_init_vlan_ops(vsi);
@@ -4062,7 +4062,11 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
if (err && err != -EEXIST)
return err;
- return 0;
+ /* when deleting the last VLAN filter, make sure to disable the VLAN
+ * promisc mode so the filter isn't left by accident
+ */
+ return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index eb40526ee179..173fe6c31341 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -267,8 +267,10 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
promisc_m, 0);
}
+ if (status && status != -EEXIST)
+ return status;
- return status;
+ return 0;
}
/**
@@ -2579,7 +2581,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
spin_lock_init(&xdp_ring->tx_lock);
for (j = 0; j < xdp_ring->count; j++) {
tx_desc = ICE_TX_DESC(xdp_ring, j);
@@ -2587,13 +2588,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
}
}
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key))
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- else
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
- }
-
return 0;
free_xdp_rings:
@@ -2683,6 +2677,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
xdp_rings_rem -= xdp_rings_per_v;
}
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key)) {
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ } else {
+ struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx) {
+ if (ice_ring_is_xdp(ring)) {
+ vsi->rx_rings[i]->xdp_ring = ring;
+ break;
+ }
+ }
+ }
+ ice_tx_xsk_pool(vsi, i);
+ }
+
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
@@ -3573,6 +3584,14 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
usleep_range(1000, 2000);
+ ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+ if (ret) {
+ netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ }
+
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Make sure VLAN delete is successful before updating VLAN
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index fce204693dbb..3808034f7e7e 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -4445,6 +4445,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
goto free_fltr_list;
list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ /* Avoid enabling or disabling VLAN zero twice when in double
+ * VLAN mode
+ */
+ if (ice_is_dvm_ena(hw) &&
+ list_itr->fltr_info.l_data.vlan.tpid == 0)
+ continue;
+
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = ice_clear_vsi_promisc(hw, vsi_handle,
@@ -4452,7 +4459,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
- if (status)
+ if (status && status != -EEXIST)
break;
}
@@ -4971,7 +4978,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
- bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
+ bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
/* For each profile we are going to associate the recipe with, add the
* recipes that are associated with that profile. This will give us
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 8fd7c3e37f5e..0abeed092de1 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -571,8 +571,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
- if (WARN_ON(!vsi))
+ if (!vsi) {
+ dev_dbg(dev, "VF is already removed\n");
return -EINVAL;
+ }
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
ice_vsi_stop_all_rx_rings(vsi);
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
@@ -762,13 +764,16 @@ static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops;
- int err;
+ int err = 0;
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- err = vlan_ops->ena_tx_filtering(vsi);
- if (err)
- return err;
+ /* Allow VF with VLAN 0 only to send all tagged traffic */
+ if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vlan_ops->ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
return ice_cfg_mac_antispoof(vsi, true);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 094e3c97a1ea..2b4c791b6cba 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2288,6 +2288,15 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Enable VLAN filtering on first non-zero VLAN */
if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+ if (vf->spoofchk) {
+ status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
+ }
if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
@@ -2333,8 +2342,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
/* Disable VLAN filtering when only VLAN 0 is left */
- if (!ice_vsi_has_non_zero_vlans(vsi))
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ vsi->inner_vlan_ops.dis_tx_filtering(vsi);
vsi->inner_vlan_ops.dis_rx_filtering(vsi);
+ }
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
@@ -2838,6 +2849,13 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2853,8 +2871,17 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
- ice_vf_dis_vlan_promisc(vsi, &vlan);
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
}
}
@@ -2931,6 +2958,13 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (err)
return err;
}
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2946,10 +2980,19 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
- if (err)
- return err;
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid) {
+ err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 49ba8bfdbf04..e48e29258450 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -329,6 +329,12 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
+ if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
+ netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
+ pool_failure = -EINVAL;
+ goto failure;
+ }
+
if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
!is_power_of_2(vsi->tx_rings[qid]->count)) {
netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
@@ -353,7 +359,7 @@ xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
if (!ret && pool_present)
- napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
@@ -944,13 +950,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
if (!ice_is_xdp_ena_vsi(vsi))
return -EINVAL;
- if (queue_id >= vsi->num_txq)
+ if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
return -EINVAL;
- if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -EINVAL;
+ ring = vsi->rx_rings[queue_id]->xdp_ring;
- ring = vsi->xdp_rings[queue_id];
+ if (!ring->xsk_pool)
+ return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
* it will run again. If not, trigger an interrupt and
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2d3daf022651..015b78144114 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -664,6 +664,8 @@ struct igb_adapter {
struct igb_mac_addr *mac_table;
struct vf_mac_filter vf_macs;
struct vf_mac_filter *vf_mac_list;
+ /* lock for VF resources */
+ spinlock_t vfs_lock;
};
/* flags controlling PTP/1588 function */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d8b836a85cc3..2796e81d2726 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3637,6 +3637,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
@@ -3649,12 +3650,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
msleep(500);
}
-
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
kfree(adapter->vf_mac_list);
adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
wrfl();
msleep(100);
@@ -3814,7 +3816,9 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
+ rtnl_lock();
igb_disable_sriov(pdev);
+ rtnl_unlock();
#endif
unregister_netdev(netdev);
@@ -3974,6 +3978,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
+
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -7958,8 +7965,10 @@ unlock:
static void igb_msg_task(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
@@ -7973,6 +7982,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9f06896a049b..f8605f57bd06 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1214,7 +1214,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
struct cyclecounter cc;
unsigned long flags;
u32 incval = 0;
- u32 tsauxc = 0;
u32 fuse0 = 0;
/* For some of the boards below this mask is technically incorrect.
@@ -1249,18 +1248,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
-
- /* enable SYSTIME counter */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
-
- IXGBE_WRITE_FLUSH(hw);
break;
case ixgbe_mac_X540:
cc.read = ixgbe_ptp_read_82599;
@@ -1293,6 +1280,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
+ * @adapter: the ixgbe private board structure
+ *
+ * Initialize and start the SYSTIME registers.
+ */
+static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 tsauxc;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ case ixgbe_mac_X550:
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+
+ /* Reset interrupt settings */
+ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+
+ /* Activate the SYSTIME counter */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+ break;
+ default:
+ /* Other devices aren't supported */
+ return;
+ };
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
@@ -1318,6 +1349,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
ixgbe_ptp_start_cyclecounter(adapter);
+ ixgbe_ptp_init_systime(adapter);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 5edb68a8aab1..57f27cc7724e 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -193,6 +193,7 @@ static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int
ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
if (!ch->rx_buff[ch->dma.desc]) {
+ ch->rx_buff[ch->dma.desc] = buf;
ret = -ENOMEM;
goto skip;
}
@@ -239,6 +240,12 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
}
skb = build_skb(buf, priv->rx_skb_size);
+ if (!skb) {
+ skb_free_frag(buf);
+ net_dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
@@ -288,7 +295,7 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
continue;
if (ret != XRX200_DMA_PACKET_COMPLETE)
- return ret;
+ break;
rx++;
} else {
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 6809b8b4c556..7282a826d81e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2580,6 +2580,12 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
+ * entries, check and free the MCAM entries explicitly to avoid leak.
+ * Since LF is detached use LF number as -1.
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+
mutex_unlock(&rvu->flr_lock);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 583ead4dd246..1e348fd0d930 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -1097,6 +1097,9 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
/* Delete multicast and promisc MCAM entries */
@@ -1136,6 +1139,9 @@ bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bo
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
/* Enables only broadcast match entry. Promisc/Allmulti are enabled
* in set_rx_mode mbox handler.
*/
@@ -1675,7 +1681,7 @@ static void npc_load_kpu_profile(struct rvu *rvu)
* Firmware database method.
* Default KPU profile.
*/
- if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) {
dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
kpu_profile);
rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
@@ -1939,6 +1945,7 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
{
+ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
u64 nibble_ena, rx_kex, tx_kex;
@@ -1951,15 +1958,15 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
mcam->counters.max--;
mcam->rx_miss_act_cntr = mcam->counters.max;
- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
+ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
if (nibble_ena) {
tx_kex &= ~NPC_PARSE_NIBBLE;
tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
- npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex;
+ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
}
/* Configure RX interfaces */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index a400aa22da79..7c4e1acd0f77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -467,7 +467,8 @@ do { \
NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
- NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* SMAC follows the DMAC(which is 6 bytes) */
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
/* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index fb8db5888d2f..d686c7b6252f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -632,6 +632,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
@@ -641,11 +647,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
- req->num_regs++;
- req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
-
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
* For VF this is always ignored.
@@ -1591,6 +1598,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
+
+ pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
}
EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e795f9ee76dd..b28029cc4316 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -195,6 +195,7 @@ struct otx2_hw {
u16 sqb_size;
/* NIX */
+ u8 txschq_link_cfg_lvl;
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d9426b01f462..5ace4609de47 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1732,7 +1732,7 @@ static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
case XDP_TX: {
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
- if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
act = XDP_DROP;
break;
@@ -1891,10 +1891,19 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd3;
- else
+ } else {
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd4;
+ }
if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1902,16 +1911,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
- if (hash != MTK_RXD4_FOE_ENTRY) {
- hash = jhash_1word(hash, 0);
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
- }
-
reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe, skb,
- trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
+ mtk_ppe_check_skb(eth->ppe, skb, hash);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 7405c97cda66..ecf85e9ed824 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -314,6 +314,11 @@
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_SPECIAL_TAG BIT(22)
+/* PDMA descriptor rxd5 */
+#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
+#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
+#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
+
#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 42c96c9d7fb1..dcb9eb1899ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -463,7 +463,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = min(
bitmap_weight(actv_ports.ports, dev->caps.num_ports),
- dev->caps.num_ports);
+ (unsigned int) dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = dev->caps.function_caps; /* set PF behaviours */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index 37522352e4b2..c8e5ca65bb6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -79,6 +79,10 @@ tc_act_police_offload(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
int err = 0;
+ err = mlx5e_policer_validate(&fl_act->action, act, fl_act->extack);
+ if (err)
+ return err;
+
err = fill_meter_params_from_act(act, &params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index d87bbb0be7c8..e6f64d890fb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -506,7 +506,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
int err;
attr.ttl = tun_key->ttl;
- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
attr.fl.fl6.saddr = tun_key->u.ipv6.src;
@@ -620,7 +620,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
attr.ttl = tun_key->ttl;
- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
attr.fl.fl6.saddr = tun_key->u.ipv6.src;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 6b6c7044b64a..3a1f76eac542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -246,7 +246,7 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
struct list_head *list, int size)
{
- struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_ktls_offload_context_tx *obj, *n;
struct mlx5e_async_ctx *bulk_async;
int i;
@@ -255,7 +255,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
return;
i = 0;
- list_for_each_entry(obj, list, list_node) {
+ list_for_each_entry_safe(obj, n, list, list_node) {
mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
i++;
}
@@ -808,6 +808,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
int datalen;
u32 seq;
@@ -819,7 +820,12 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
mlx5e_tx_mpwqe_ensure_complete(sq);
tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't WARN on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index e2a9b9be5c1f..e0ce5a233d0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1395,10 +1395,11 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
}
return fs;
-err_free_fs:
- kvfree(fs);
+
err_free_vlan:
mlx5e_fs_vlan_free(fs);
+err_free_fs:
+ kvfree(fs);
err:
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d858667736a3..02eb2f0fa2ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3682,7 +3682,9 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
- if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
+ int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
+ MLX5_TC_FLAG(NIC_OFFLOAD);
+ if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
@@ -4769,14 +4771,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* RQ */
mlx5e_build_rq_params(mdev, params);
- /* HW LRO */
- if (MLX5_CAP_ETH(mdev, lro_cap) &&
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- /* No XSK params: checking the availability of striding RQ in general. */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- params->packet_merge.type = slow_pci_heuristic(mdev) ?
- MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
- }
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 4c1599de652c..759f7d3c2cfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -662,6 +662,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ params->vlan_strip_disable = true;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -696,6 +698,13 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
mlx5e_build_rep_params(netdev);
mlx5e_timestamp_init(priv);
@@ -708,12 +717,21 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
mlx5e_vxlan_set_netdev_info(priv);
- return mlx5e_init_rep(mdev, netdev);
+ mlx5e_build_rep_params(netdev);
+ mlx5e_timestamp_init(priv);
+ return 0;
}
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
@@ -836,13 +854,6 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
- if (!priv->fs) {
- netdev_err(priv->netdev, "FS allocation failed\n");
- return -ENOMEM;
- }
-
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index ed73132129aa..a9f4c652f859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -427,7 +427,8 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (mlx5_lag_mpesw_is_activated(esw->dev))
+ if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
+ mlx5_lag_mpesw_is_activated(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
@@ -3115,8 +3116,10 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
MLX5_VPORT_UC_ADDR_CHANGE);
- if (err)
+ if (err) {
+ devl_unlock(devlink);
return;
+ }
}
esw->esw_funcs.num_vfs = new_num_vfs;
devl_unlock(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 735dc805dad7..e735e19461ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -50,10 +50,12 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
- ft->max_fte = size ? roundup_pow_of_two(size) : 1;
+ int max_fte = ft_attr->max_fte;
+
+ ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1;
return 0;
}
@@ -258,7 +260,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
@@ -267,17 +269,19 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
+ unsigned int size;
int err;
- if (size != POOL_NEXT_SIZE)
- size = roundup_pow_of_two(size);
- size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
+ if (ft_attr->max_fte != POOL_NEXT_SIZE)
+ size = roundup_pow_of_two(ft_attr->max_fte);
+ size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
if (!size)
return -ENOSPC;
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 274004e80f03..8ef4254b9ea1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -38,7 +38,7 @@
struct mlx5_flow_cmds {
int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft);
int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f1b908d40fa6..e3960cdf5131 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1155,7 +1155,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
ft->ns = ns;
- err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
+ err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
if (err)
goto free_ft;
@@ -1195,6 +1195,12 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
}
EXPORT_SYMBOL(mlx5_create_flow_table);
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
+{
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_flow_table_id);
+
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr, u16 vport)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 0f34e3c80d1f..065102278cb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1067,30 +1067,32 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
+ unsigned long flags;
if (fn >= ldev->ports)
return;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
for (i = 0; i < ldev->ports; i++) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
}
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
@@ -1234,7 +1236,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mlx5_ldev_add_netdev(ldev, dev, netdev);
for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
+ if (!ldev->pf[i].netdev)
break;
if (i >= ldev->ports)
@@ -1246,12 +1248,13 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1260,12 +1263,13 @@ EXPORT_SYMBOL(mlx5_lag_is_roce);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1274,13 +1278,14 @@ EXPORT_SYMBOL(mlx5_lag_is_active);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev) &&
dev == ldev->pf[MLX5_LAG_P1].dev;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1289,12 +1294,13 @@ EXPORT_SYMBOL(mlx5_lag_is_master);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1303,13 +1309,14 @@ EXPORT_SYMBOL(mlx5_lag_is_sriov);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev) &&
test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1352,9 +1359,10 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
{
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
@@ -1373,7 +1381,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return ndev;
}
@@ -1383,10 +1391,11 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
u8 port = 0;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
@@ -1401,7 +1410,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
port = ldev->v2p_map[port * ldev->buckets];
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return port;
}
EXPORT_SYMBOL(mlx5_lag_get_slave_port);
@@ -1422,8 +1431,9 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_core_dev *peer_dev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!ldev)
goto unlock;
@@ -1433,7 +1443,7 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
ldev->pf[MLX5_LAG_P1].dev;
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return peer_dev;
}
EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
@@ -1446,6 +1456,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
struct mlx5_lag *ldev;
+ unsigned long flags;
int num_ports;
int ret, i, j;
void *out;
@@ -1462,7 +1473,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
@@ -1472,7 +1483,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bec8d6d0b5f6..c085b031abfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1530,7 +1530,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
+ lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1597,6 +1599,7 @@ err_timeout_init:
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
return err;
}
@@ -1618,6 +1621,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
}
static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index ec76a8b1acc1..60596357bfc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -376,8 +376,8 @@ retry:
goto out_dropped;
}
}
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
- err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_dropped;
@@ -524,10 +524,13 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
dev->priv.reclaim_pages_discard += npages;
}
/* if triggered by FW event and failed by FW then ignore */
- if (event && err == -EREMOTEIO)
+ if (event && err == -EREMOTEIO) {
err = 0;
+ goto out_free;
+ }
+
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
- err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index ee2e1b7c1310..c0e6c487c63c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -159,11 +159,11 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
devl_lock(devlink);
err = mlx5_device_enable_sriov(dev, num_vfs);
+ devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
return err;
}
- devl_unlock(devlink);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 223c8741b7ae..16d65fe4f654 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -439,6 +439,7 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
+ MLX5_SET(create_flow_table_in, in, uid, attr->uid);
ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index e5f6412baea9..31d443dd8386 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -214,7 +214,7 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
tbl->table_type);
}
-static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
+static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl, u16 uid)
{
bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
@@ -236,6 +236,7 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
ft_attr.sw_owner = true;
ft_attr.decap_en = en_decap;
ft_attr.reformat_en = en_encap;
+ ft_attr.uid = uid;
ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr,
NULL, &tbl->table_id);
@@ -243,7 +244,8 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
return ret;
}
-struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u32 flags)
+struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level,
+ u32 flags, u16 uid)
{
struct mlx5dr_table *tbl;
int ret;
@@ -263,7 +265,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u
if (ret)
goto free_tbl;
- ret = dr_table_create_sw_owned_tbl(tbl);
+ ret = dr_table_create_sw_owned_tbl(tbl, uid);
if (ret)
goto uninit_tbl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index feed227944b7..062c7c74a1f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1217,6 +1217,7 @@ struct mlx5dr_cmd_query_flow_table_details {
struct mlx5dr_cmd_create_flow_table_attr {
u32 table_type;
+ u16 uid;
u64 icm_addr_rx;
u64 icm_addr_tx;
u8 level;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 75672663359d..13b6d4721e17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -62,7 +62,7 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
struct mlx5dr_table *tbl;
@@ -71,7 +71,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
- size,
+ ft_attr,
next_ft);
flags = ft->flags;
/* turn off encap/decap if not supported for sw-str by fw */
@@ -79,7 +79,8 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags);
+ tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags,
+ ft_attr->uid);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index ecffc2cbe6fe..226a0d7bb06d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -51,7 +51,8 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn);
struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
+ u16 uid);
struct mlx5dr_table *
mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index d9bf584234a6..bb1cd4bae82e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -328,7 +328,6 @@ static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
- struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
u8 last_module = max_ports;
int i;
int err;
@@ -357,7 +356,6 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
}
/* Create port objects for each valid entry */
- devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0) {
err = mlxsw_m_port_create(mlxsw_m,
@@ -367,7 +365,6 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
goto err_module_to_port_create;
}
}
- devl_unlock(devlink);
return 0;
@@ -377,7 +374,6 @@ err_module_to_port_create:
mlxsw_m_port_remove(mlxsw_m,
mlxsw_m->module_to_port[i]);
}
- devl_unlock(devlink);
i = max_ports;
err_module_to_port_map:
for (i--; i > 0; i--)
@@ -390,10 +386,8 @@ err_module_to_port_alloc:
static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
int i;
- devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0) {
mlxsw_m_port_remove(mlxsw_m,
@@ -401,7 +395,6 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
mlxsw_m_port_module_unmap(mlxsw_m, i);
}
}
- devl_unlock(devlink);
kfree(mlxsw_m->module_to_port);
kfree(mlxsw_m->ports);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1e240cdd9cbd..30c7b0e15721 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1897,9 +1897,9 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
- mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 2e0b704b8a31..7b01b9c20722 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -46,6 +46,7 @@ struct mlxsw_sp2_ptp_state {
* enabled.
*/
struct hwtstamp_config config;
+ struct mutex lock; /* Protects 'config' and HW configuration. */
};
struct mlxsw_sp1_ptp_key {
@@ -1374,6 +1375,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
goto err_ptp_traps_set;
refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ mutex_init(&ptp_state->lock);
return &ptp_state->common;
err_ptp_traps_set:
@@ -1388,6 +1390,7 @@ void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ mutex_destroy(&ptp_state->lock);
mlxsw_sp_ptp_traps_unset(mlxsw_sp);
kfree(ptp_state);
}
@@ -1461,7 +1464,10 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
*config = ptp_state->config;
+ mutex_unlock(&ptp_state->lock);
+
return 0;
}
@@ -1523,6 +1529,9 @@ mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
return -EINVAL;
}
+ if ((ing_types && !egr_types) || (!ing_types && egr_types))
+ return -EINVAL;
+
*p_ing_types = ing_types;
*p_egr_types = egr_types;
return 0;
@@ -1574,8 +1583,6 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
- ASSERT_RTNL();
-
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
@@ -1597,8 +1604,6 @@ static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
- ASSERT_RTNL();
-
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
@@ -1618,16 +1623,20 @@ err_ptp_disable:
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config)
{
+ struct mlxsw_sp2_ptp_state *ptp_state;
enum hwtstamp_rx_filters rx_filter;
struct hwtstamp_config new_config;
u16 new_ing_types, new_egr_types;
bool ptp_enabled;
int err;
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
+
err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
&new_egr_types, &rx_filter);
if (err)
- return err;
+ goto err_get_message_types;
new_config.flags = config->flags;
new_config.tx_type = config->tx_type;
@@ -1640,11 +1649,11 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
new_egr_types, new_config);
if (err)
- return err;
+ goto err_configure_port;
} else if (!new_ing_types && !new_egr_types && ptp_enabled) {
err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
if (err)
- return err;
+ goto err_deconfigure_port;
}
mlxsw_sp_port->ptp.ing_types = new_ing_types;
@@ -1652,8 +1661,15 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
/* Notify the ioctl caller what we are actually timestamping. */
config->rx_filter = rx_filter;
+ mutex_unlock(&ptp_state->lock);
return 0;
+
+err_deconfigure_port:
+err_configure_port:
+err_get_message_types:
+ mutex_unlock(&ptp_state->lock);
+ return err;
}
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 2d1628fdefc1..a8b88230959a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -171,10 +171,11 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
{
}
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
return -EOPNOTSUPP;
}
@@ -231,10 +232,11 @@ static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1d6e3b641b2e..d928b75f3780 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -710,7 +710,7 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->xtr_irq);
lan966x->xtr_irq = -ENXIO;
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
disable_irq(lan966x->ana_irq);
lan966x->ana_irq = -ENXIO;
}
@@ -718,10 +718,10 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (lan966x->fdma)
devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
- if (lan966x->ptp_irq)
+ if (lan966x->ptp_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
- if (lan966x->ptp_ext_irq)
+ if (lan966x->ptp_ext_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
}
@@ -1049,7 +1049,7 @@ static int lan966x_probe(struct platform_device *pdev)
}
lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
lan966x_ana_irq_handler, IRQF_ONESHOT,
"ana irq", lan966x);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index a3214a762e4b..9e57d23e57bf 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -62,9 +62,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
{
struct sockaddr *address = addr;
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
@@ -74,11 +71,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
static void moxart_mac_free_memory(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < RX_DESC_NUM; i++)
- dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
- priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
dma_free_coherent(&priv->pdev->dev,
@@ -147,11 +139,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
- priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
priv->rx_buf[i],
priv->rx_buf_size,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n");
moxart_desc_write(priv->rx_mapping[i],
@@ -172,9 +164,6 @@ static int moxart_mac_open(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EADDRNOTAVAIL;
-
napi_enable(&priv->napi);
moxart_mac_reset(ndev);
@@ -193,6 +182,7 @@ static int moxart_mac_open(struct net_device *ndev)
static int moxart_mac_stop(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
napi_disable(&priv->napi);
@@ -204,6 +194,11 @@ static int moxart_mac_stop(struct net_device *ndev)
/* disable all functions */
writel(0, priv->base + REG_MAC_CTRL);
+ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
return 0;
}
@@ -240,7 +235,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- dma_sync_single_for_cpu(&ndev->dev,
+ dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_mapping[rx_head],
priv->rx_buf_size, DMA_FROM_DEVICE);
skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +289,7 @@ static void moxart_tx_finished(struct net_device *ndev)
unsigned int tx_tail = priv->tx_tail;
while (tx_tail != tx_head) {
- dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
ndev->stats.tx_packets++;
@@ -358,9 +353,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
- priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n");
goto out_unlock;
}
@@ -379,7 +374,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = ETH_ZLEN;
}
- dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
priv->tx_buf_size, DMA_TO_DEVICE);
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -488,12 +483,19 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
ndev->base_addr = res->start;
+ ret = platform_get_ethdev_address(p_dev, ndev);
+ if (ret == -EPROBE_DEFER)
+ goto init_fail;
+ if (ret)
+ eth_hw_addr_random(ndev);
+ moxart_update_mac_address(ndev);
+
spin_lock_init(&priv->txlock);
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
- priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+ priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -501,7 +503,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+ priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index d4649e4ee0e7..306026e6aa11 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1860,16 +1860,20 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
- for (i = 0; i < ocelot->num_stats; i++)
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
ETH_GSTRING_LEN);
+ }
}
EXPORT_SYMBOL(ocelot_get_strings);
/* Caller must hold &ocelot->stats_lock */
static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
{
- unsigned int idx = port * ocelot->num_stats;
+ unsigned int idx = port * OCELOT_NUM_STATS;
struct ocelot_stats_region *region;
int err, j;
@@ -1877,9 +1881,8 @@ static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
list_for_each_entry(region, &ocelot->stats_regions, node) {
- err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
- region->offset, region->buf,
- region->count);
+ err = ocelot_bulk_read(ocelot, region->base, region->buf,
+ region->count);
if (err)
return err;
@@ -1906,13 +1909,13 @@ static void ocelot_check_stats_work(struct work_struct *work)
stats_work);
int i, err;
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
for (i = 0; i < ocelot->num_phys_ports; i++) {
err = ocelot_port_update_stats(ocelot, i);
if (err)
break;
}
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1925,16 +1928,22 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
int i, err;
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
/* check and update now */
err = ocelot_port_update_stats(ocelot, port);
- /* Copy all counters */
- for (i = 0; i < ocelot->num_stats; i++)
- *data++ = ocelot->stats[port * ocelot->num_stats + i];
+ /* Copy all supported counters */
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ int index = port * OCELOT_NUM_STATS + i;
+
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ *data++ = ocelot->stats[index];
+ }
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1943,10 +1952,16 @@ EXPORT_SYMBOL(ocelot_get_ethtool_stats);
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
{
+ int i, num_stats = 0;
+
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return ocelot->num_stats;
+ for (i = 0; i < OCELOT_NUM_STATS; i++)
+ if (ocelot->stats_layout[i].name[0] != '\0')
+ num_stats++;
+
+ return num_stats;
}
EXPORT_SYMBOL(ocelot_get_sset_count);
@@ -1958,8 +1973,11 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->stats_regions);
- for (i = 0; i < ocelot->num_stats; i++) {
- if (region && ocelot->stats_layout[i].offset == last + 1) {
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ if (region && ocelot->stats_layout[i].reg == last + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -1967,12 +1985,12 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!region)
return -ENOMEM;
- region->offset = ocelot->stats_layout[i].offset;
+ region->base = ocelot->stats_layout[i].reg;
region->count = 1;
list_add_tail(&region->node, &ocelot->stats_regions);
}
- last = ocelot->stats_layout[i].offset;
+ last = ocelot->stats_layout[i].reg;
}
list_for_each_entry(region, &ocelot->stats_regions, node) {
@@ -3340,7 +3358,6 @@ static void ocelot_detect_features(struct ocelot *ocelot)
int ocelot_init(struct ocelot *ocelot)
{
- const struct ocelot_stat_layout *stat;
char queue_name[32];
int i, ret;
u32 port;
@@ -3353,17 +3370,13 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- ocelot->num_stats = 0;
- for_each_stat(ocelot, stat)
- ocelot->num_stats++;
-
ocelot->stats = devm_kcalloc(ocelot->dev,
- ocelot->num_phys_ports * ocelot->num_stats,
+ ocelot->num_phys_ports * OCELOT_NUM_STATS,
sizeof(u64), GFP_KERNEL);
if (!ocelot->stats)
return -ENOMEM;
- mutex_init(&ocelot->stats_lock);
+ spin_lock_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
@@ -3511,7 +3524,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
destroy_workqueue(ocelot->owq);
- mutex_destroy(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 5e6136e80282..330d30841cdc 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -725,37 +725,42 @@ static void ocelot_get_stats64(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
+ u64 *s;
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
- SYS_STAT_CFG);
+ spin_lock(&ocelot->stats_lock);
+
+ s = &ocelot->stats[port * OCELOT_NUM_STATS];
/* Get Rx stats */
- stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
- stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
- ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
- ocelot_read(ocelot, SYS_COUNT_RX_64) +
- ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
- ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
- ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
- stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
+ stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
+ stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
+ s[OCELOT_STAT_RX_FRAGMENTS] +
+ s[OCELOT_STAT_RX_JABBERS] +
+ s[OCELOT_STAT_RX_LONGS] +
+ s[OCELOT_STAT_RX_64] +
+ s[OCELOT_STAT_RX_65_127] +
+ s[OCELOT_STAT_RX_128_255] +
+ s[OCELOT_STAT_RX_256_511] +
+ s[OCELOT_STAT_RX_512_1023] +
+ s[OCELOT_STAT_RX_1024_1526] +
+ s[OCELOT_STAT_RX_1527_MAX];
+ stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
stats->rx_dropped = dev->stats.rx_dropped;
/* Get Tx stats */
- stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
- stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
- ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
- ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
- ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
- stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
- ocelot_read(ocelot, SYS_COUNT_TX_AGING);
- stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+ stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
+ stats->tx_packets = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
+ s[OCELOT_STAT_TX_AGED];
+ stats->collisions = s[OCELOT_STAT_TX_COLLISION];
+
+ spin_unlock(&ocelot->stats_lock);
}
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 961f803aca19..9c488953f541 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -96,101 +96,379 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
};
-static const struct ocelot_stat_layout ocelot_stats_layout[] = {
- { .name = "rx_octets", .offset = 0x00, },
- { .name = "rx_unicast", .offset = 0x01, },
- { .name = "rx_multicast", .offset = 0x02, },
- { .name = "rx_broadcast", .offset = 0x03, },
- { .name = "rx_shorts", .offset = 0x04, },
- { .name = "rx_fragments", .offset = 0x05, },
- { .name = "rx_jabbers", .offset = 0x06, },
- { .name = "rx_crc_align_errs", .offset = 0x07, },
- { .name = "rx_sym_errs", .offset = 0x08, },
- { .name = "rx_frames_below_65_octets", .offset = 0x09, },
- { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
- { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
- { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
- { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
- { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
- { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
- { .name = "rx_pause", .offset = 0x10, },
- { .name = "rx_control", .offset = 0x11, },
- { .name = "rx_longs", .offset = 0x12, },
- { .name = "rx_classified_drops", .offset = 0x13, },
- { .name = "rx_red_prio_0", .offset = 0x14, },
- { .name = "rx_red_prio_1", .offset = 0x15, },
- { .name = "rx_red_prio_2", .offset = 0x16, },
- { .name = "rx_red_prio_3", .offset = 0x17, },
- { .name = "rx_red_prio_4", .offset = 0x18, },
- { .name = "rx_red_prio_5", .offset = 0x19, },
- { .name = "rx_red_prio_6", .offset = 0x1A, },
- { .name = "rx_red_prio_7", .offset = 0x1B, },
- { .name = "rx_yellow_prio_0", .offset = 0x1C, },
- { .name = "rx_yellow_prio_1", .offset = 0x1D, },
- { .name = "rx_yellow_prio_2", .offset = 0x1E, },
- { .name = "rx_yellow_prio_3", .offset = 0x1F, },
- { .name = "rx_yellow_prio_4", .offset = 0x20, },
- { .name = "rx_yellow_prio_5", .offset = 0x21, },
- { .name = "rx_yellow_prio_6", .offset = 0x22, },
- { .name = "rx_yellow_prio_7", .offset = 0x23, },
- { .name = "rx_green_prio_0", .offset = 0x24, },
- { .name = "rx_green_prio_1", .offset = 0x25, },
- { .name = "rx_green_prio_2", .offset = 0x26, },
- { .name = "rx_green_prio_3", .offset = 0x27, },
- { .name = "rx_green_prio_4", .offset = 0x28, },
- { .name = "rx_green_prio_5", .offset = 0x29, },
- { .name = "rx_green_prio_6", .offset = 0x2A, },
- { .name = "rx_green_prio_7", .offset = 0x2B, },
- { .name = "tx_octets", .offset = 0x40, },
- { .name = "tx_unicast", .offset = 0x41, },
- { .name = "tx_multicast", .offset = 0x42, },
- { .name = "tx_broadcast", .offset = 0x43, },
- { .name = "tx_collision", .offset = 0x44, },
- { .name = "tx_drops", .offset = 0x45, },
- { .name = "tx_pause", .offset = 0x46, },
- { .name = "tx_frames_below_65_octets", .offset = 0x47, },
- { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
- { .name = "tx_frames_128_255_octets", .offset = 0x49, },
- { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
- { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
- { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
- { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
- { .name = "tx_yellow_prio_0", .offset = 0x4E, },
- { .name = "tx_yellow_prio_1", .offset = 0x4F, },
- { .name = "tx_yellow_prio_2", .offset = 0x50, },
- { .name = "tx_yellow_prio_3", .offset = 0x51, },
- { .name = "tx_yellow_prio_4", .offset = 0x52, },
- { .name = "tx_yellow_prio_5", .offset = 0x53, },
- { .name = "tx_yellow_prio_6", .offset = 0x54, },
- { .name = "tx_yellow_prio_7", .offset = 0x55, },
- { .name = "tx_green_prio_0", .offset = 0x56, },
- { .name = "tx_green_prio_1", .offset = 0x57, },
- { .name = "tx_green_prio_2", .offset = 0x58, },
- { .name = "tx_green_prio_3", .offset = 0x59, },
- { .name = "tx_green_prio_4", .offset = 0x5A, },
- { .name = "tx_green_prio_5", .offset = 0x5B, },
- { .name = "tx_green_prio_6", .offset = 0x5C, },
- { .name = "tx_green_prio_7", .offset = 0x5D, },
- { .name = "tx_aged", .offset = 0x5E, },
- { .name = "drop_local", .offset = 0x80, },
- { .name = "drop_tail", .offset = 0x81, },
- { .name = "drop_yellow_prio_0", .offset = 0x82, },
- { .name = "drop_yellow_prio_1", .offset = 0x83, },
- { .name = "drop_yellow_prio_2", .offset = 0x84, },
- { .name = "drop_yellow_prio_3", .offset = 0x85, },
- { .name = "drop_yellow_prio_4", .offset = 0x86, },
- { .name = "drop_yellow_prio_5", .offset = 0x87, },
- { .name = "drop_yellow_prio_6", .offset = 0x88, },
- { .name = "drop_yellow_prio_7", .offset = 0x89, },
- { .name = "drop_green_prio_0", .offset = 0x8A, },
- { .name = "drop_green_prio_1", .offset = 0x8B, },
- { .name = "drop_green_prio_2", .offset = 0x8C, },
- { .name = "drop_green_prio_3", .offset = 0x8D, },
- { .name = "drop_green_prio_4", .offset = 0x8E, },
- { .name = "drop_green_prio_5", .offset = 0x8F, },
- { .name = "drop_green_prio_6", .offset = 0x90, },
- { .name = "drop_green_prio_7", .offset = 0x91, },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static void ocelot_pll5_init(struct ocelot *ocelot)
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index c2af4eb8ca5d..9cf82ecf191c 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -180,13 +180,38 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_PAUSE, 0x00003c),
- REG(SYS_COUNT_RX_CONTROL, 0x000040),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
- REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
REG(SYS_COUNT_TX_UNICAST, 0x000104),
REG(SYS_COUNT_TX_MULTICAST, 0x000108),
@@ -196,11 +221,46 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index c922dfab8080..eeb1455a4e5d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1395,6 +1395,8 @@ nfp_port_get_module_info(struct net_device *netdev,
u8 data;
port = nfp_port_from_netdev(netdev);
+ /* update port state to get latest interface */
+ set_bit(NFP_PORT_CHANGED, &port->flags);
eth_port = nfp_port_get_eth_port(port);
if (!eth_port)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 34c0d2ddf9ef..a8286d0032d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -874,7 +874,6 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
}
/* Adjust the start address to be cache size aligned */
- cache->id = id;
cache->addr = addr & ~(u64)(cache->size - 1);
/* Re-init to the new ID and address */
@@ -894,6 +893,8 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
return NULL;
}
+ cache->id = id;
+
exit:
/* Adjust offset */
*offset = addr - cache->addr;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 1443f788ee37..0be79c516781 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1564,8 +1564,67 @@ static int ionic_set_features(struct net_device *netdev,
return err;
}
+static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+
+ ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_getattr = {
+ .opcode = IONIC_CMD_LIF_GETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
+ return 0;
+}
+
+static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
+{
+ u8 get_mac[ETH_ALEN];
+ int err;
+
+ err = ionic_set_attr_mac(lif, mac);
+ if (err)
+ return err;
+
+ err = ionic_get_attr_mac(lif, get_mac);
+ if (err)
+ return err;
+
+ /* To deal with older firmware that silently ignores the set attr mac:
+ * doesn't actually change the mac and doesn't return an error, so we
+ * do the get attr to verify whether or not the set actually happened
+ */
+ if (!ether_addr_equal(get_mac, mac))
+ return 1;
+
+ return 0;
+}
+
static int ionic_set_mac_address(struct net_device *netdev, void *sa)
{
+ struct ionic_lif *lif = netdev_priv(netdev);
struct sockaddr *addr = sa;
u8 *mac;
int err;
@@ -1574,6 +1633,14 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
if (ether_addr_equal(netdev->dev_addr, mac))
return 0;
+ err = ionic_program_mac(lif, mac);
+ if (err < 0)
+ return err;
+
+ if (err > 0)
+ netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
+ __func__);
+
err = eth_prepare_mac_addr_change(netdev, addr);
if (err)
return err;
@@ -2963,6 +3030,9 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
mutex_lock(&lif->queue_lock);
+ if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
+ dev_info(ionic->dev, "FW Up: clearing broken state\n");
+
err = ionic_qcqs_alloc(lif);
if (err)
goto err_unlock;
@@ -3169,6 +3239,7 @@ static int ionic_station_set(struct ionic_lif *lif)
.attr = IONIC_LIF_ATTR_MAC,
},
};
+ u8 mac_address[ETH_ALEN];
struct sockaddr addr;
int err;
@@ -3177,8 +3248,23 @@ static int ionic_station_set(struct ionic_lif *lif)
return err;
netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
ctx.comp.lif_getattr.mac);
- if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
- return 0;
+ ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
+
+ if (is_zero_ether_addr(mac_address)) {
+ eth_hw_addr_random(netdev);
+ netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
+ ether_addr_copy(mac_address, netdev->dev_addr);
+
+ err = ionic_program_mac(lif, mac_address);
+ if (err < 0)
+ return err;
+
+ if (err > 0) {
+ netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
+ __func__);
+ return 0;
+ }
+ }
if (!is_zero_ether_addr(netdev->dev_addr)) {
/* If the netdev mac is non-zero and doesn't match the default
@@ -3186,12 +3272,11 @@ static int ionic_station_set(struct ionic_lif *lif)
* likely here again after a fw-upgrade reset. We need to be
* sure the netdev mac is in our filter list.
*/
- if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
- netdev->dev_addr))
+ if (!ether_addr_equal(mac_address, netdev->dev_addr))
ionic_lif_addr_add(lif, netdev->dev_addr);
} else {
/* Update the netdev mac with the device's mac */
- memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+ ether_addr_copy(addr.sa_data, mac_address);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 4029b4e021f8..56f93b030551 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -474,8 +474,8 @@ try_again:
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
- msleep(1000);
iowrite32(0, &idev->dev_cmd_regs->done);
+ msleep(1000);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
goto try_again;
}
@@ -488,6 +488,8 @@ try_again:
return ionic_error_to_errno(err);
}
+ ionic_dev_cmd_clean(ionic);
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 52f9ed8db9c9..4f2b82a884b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -1134,6 +1134,7 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(&pdev->dev);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
pcim_iounmap_regions(pdev, BIT(0));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index caa4bfc4c1d6..9b6138b11776 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -258,14 +258,18 @@ EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
/* Enable disable MAC RX/TX */
void stmmac_set_mac(void __iomem *ioaddr, bool enable)
{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
+ u32 old_val, value;
+
+ old_val = readl(ioaddr + MAC_CTRL_REG);
+ value = old_val;
if (enable)
value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
else
value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
- writel(value, ioaddr + MAC_CTRL_REG);
+ if (value != old_val)
+ writel(value, ioaddr + MAC_CTRL_REG);
}
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 070b5ef165eb..592d29abcb1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -986,10 +986,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- u32 ctrl;
+ u32 old_ctrl, ctrl;
- ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
- ctrl &= ~priv->hw->link.speed_mask;
+ old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl = old_ctrl & ~priv->hw->link.speed_mask;
if (interface == PHY_INTERFACE_MODE_USXGMII) {
switch (speed) {
@@ -1064,7 +1064,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (tx_pause && rx_pause)
stmmac_mac_flow_ctrl(priv, duplex);
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (ctrl != old_ctrl)
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index baa1f0a5cc37..b4a4fa0a58f8 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -7,12 +7,12 @@ config NET_VENDOR_WANGXUN
bool "Wangxun devices"
default y
help
- If you have a network (Ethernet) card belonging to this class, say Y.
+ If you have a network (Ethernet) card from Wangxun(R), say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about Intel cards. If you say Y, you will be asked for
- your specific card in the following questions.
+ the questions about Wangxun(R) cards. If you say Y, you will
+ be asked for your specific card in the following questions.
if NET_VENDOR_WANGXUN
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 76c4a709d73d..e97db826cdd4 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -348,7 +348,7 @@ do { \
* This macro is invoked by the OS-specific before it left the
* function mac_drv_rx_complete. This macro calls mac_drv_fill_rxd
* if the number of used RxDs is equal or lower than the
- * the given low water mark.
+ * given low water mark.
*
* para low_water low water mark of used RxD's
*
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 018d365f9deb..7962c37b3f14 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -797,7 +797,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct geneve_sock *gs4,
struct flowi4 *fl4,
const struct ip_tunnel_info *info,
- __be16 dport, __be16 sport)
+ __be16 dport, __be16 sport,
+ __u8 *full_tos)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -823,6 +824,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
use_cache = false;
}
fl4->flowi4_tos = RT_TOS(tos);
+ if (full_tos)
+ *full_tos = tos;
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
@@ -876,8 +879,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
use_cache = false;
}
- fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
- info->key.label);
+ fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label);
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
@@ -911,6 +913,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
struct flowi4 fl4;
+ __u8 full_tos;
__u8 tos, ttl;
__be16 df = 0;
__be16 sport;
@@ -921,7 +924,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, &full_tos);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -965,7 +968,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
- tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
+ tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb);
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
else
@@ -1149,7 +1152,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, NULL);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 1e9eae208e44..53a1dbeaffa6 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -568,7 +568,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
}
/* Align the address down and the size up to a page boundary */
- addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+ addr = qcom_smem_virt_to_phys(virt);
phys = addr & PAGE_MASK;
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index a5b355384d4a..6f35438cda89 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -48,7 +48,7 @@ struct ipa;
*
* The offset of registers related to resource types is computed by a macro
* that is supplied a parameter "rt". The "rt" represents a resource type,
- * which is is a member of the ipa_resource_type_src enumerated type for
+ * which is a member of the ipa_resource_type_src enumerated type for
* source endpoint resources or the ipa_resource_type_dst enumerated type
* for destination endpoint resources.
*
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index ef02f2cf5ce1..cbabca167a07 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
.notifier_call = ipvtap_device_event,
};
-static int ipvtap_init(void)
+static int __init ipvtap_init(void)
{
int err;
@@ -228,7 +228,7 @@ out1:
}
module_init(ipvtap_init);
-static void ipvtap_exit(void)
+static void __exit ipvtap_exit(void)
{
rtnl_link_unregister(&ipvtap_link_ops);
unregister_netdevice_notifier(&ipvtap_notifier_block);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index f1683ce6b561..c6d271e5687e 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -162,6 +162,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
+static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
+{
+ struct macsec_rx_sa *sa = NULL;
+ int an;
+
+ for (an = 0; an < MACSEC_NUM_AN; an++) {
+ sa = macsec_rxsa_get(rx_sc->sa[an]);
+ if (sa)
+ break;
+ }
+ return sa;
+}
+
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@@ -449,11 +462,6 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
- return make_sci(dev->dev_addr, port);
-}
-
static void __macsec_pn_wrapped(struct macsec_secy *secy,
struct macsec_tx_sa *tx_sa)
{
@@ -500,18 +508,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
skb->protocol = eth_hdr(skb)->h_proto;
}
+static unsigned int macsec_msdu_len(struct sk_buff *skb)
+{
+ struct macsec_dev *macsec = macsec_priv(skb->dev);
+ struct macsec_secy *secy = &macsec->secy;
+ bool sci_present = macsec_skb_cb(skb)->has_sci;
+
+ return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
+}
+
static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
struct macsec_tx_sa *tx_sa)
{
+ unsigned int msdu_len = macsec_msdu_len(skb);
struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
u64_stats_update_begin(&txsc_stats->syncp);
if (tx_sc->encrypt) {
- txsc_stats->stats.OutOctetsEncrypted += skb->len;
+ txsc_stats->stats.OutOctetsEncrypted += msdu_len;
txsc_stats->stats.OutPktsEncrypted++;
this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
} else {
- txsc_stats->stats.OutOctetsProtected += skb->len;
+ txsc_stats->stats.OutOctetsProtected += msdu_len;
txsc_stats->stats.OutPktsProtected++;
this_cpu_inc(tx_sa->stats->OutPktsProtected);
}
@@ -541,9 +559,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
aead_request_free(macsec_skb_cb(skb)->req);
rcu_read_lock_bh();
- macsec_encrypt_finish(skb, dev);
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
- len = skb->len;
+ /* packet is encrypted/protected so tx_bytes must be calculated */
+ len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
+ macsec_encrypt_finish(skb, dev);
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
rcu_read_unlock_bh();
@@ -702,6 +721,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_skb_cb(skb)->req = req;
macsec_skb_cb(skb)->tx_sa = tx_sa;
+ macsec_skb_cb(skb)->has_sci = sci_present;
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
dev_hold(skb->dev);
@@ -743,15 +763,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ secy->netdev->stats.rx_dropped++;
return false;
}
if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
+ unsigned int msdu_len = macsec_msdu_len(skb);
u64_stats_update_begin(&rxsc_stats->syncp);
if (hdr->tci_an & MACSEC_TCI_E)
- rxsc_stats->stats.InOctetsDecrypted += skb->len;
+ rxsc_stats->stats.InOctetsDecrypted += msdu_len;
else
- rxsc_stats->stats.InOctetsValidated += skb->len;
+ rxsc_stats->stats.InOctetsValidated += msdu_len;
u64_stats_update_end(&rxsc_stats->syncp);
}
@@ -764,6 +786,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotValid++;
u64_stats_update_end(&rxsc_stats->syncp);
+ this_cpu_inc(rx_sa->stats->InPktsNotValid);
+ secy->netdev->stats.rx_errors++;
return false;
}
@@ -856,9 +880,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
macsec_finalize_skb(skb, macsec->secy.icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, macsec->secy.netdev);
- len = skb->len;
if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -1049,6 +1073,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
+ macsec->secy.netdev->stats.rx_dropped++;
continue;
}
@@ -1158,6 +1183,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
+ secy->netdev->stats.rx_errors++;
goto drop_nosa;
}
@@ -1168,11 +1194,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
+ struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ secy->netdev->stats.rx_errors++;
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
}
@@ -1182,6 +1212,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}
@@ -1202,6 +1234,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ macsec->secy.netdev->stats.rx_dropped++;
goto drop;
}
}
@@ -1230,6 +1263,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
deliver:
macsec_finalize_skb(skb, secy->icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, secy->netdev);
if (rx_sa)
@@ -1237,7 +1271,6 @@ deliver:
macsec_rxsc_put(rx_sc);
skb_orphan(skb);
- len = skb->len;
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -1279,6 +1312,7 @@ nosci:
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoSCI++;
u64_stats_update_end(&secy_stats->syncp);
+ macsec->secy.netdev->stats.rx_errors++;
continue;
}
@@ -3404,6 +3438,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ len = skb->len;
skb = macsec_encrypt(skb, dev);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EINPROGRESS)
@@ -3414,7 +3449,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
macsec_encrypt_finish(skb, dev);
- len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
return ret;
@@ -3622,7 +3656,6 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
out:
eth_hw_addr_set(dev, addr->sa_data);
- macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
@@ -3662,6 +3695,7 @@ static void macsec_get_stats64(struct net_device *dev,
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
+ s->rx_errors = dev->stats.rx_errors;
}
static int macsec_get_iflink(const struct net_device *dev)
@@ -3960,6 +3994,11 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
return false;
}
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+ return make_sci(dev->dev_addr, port);
+}
+
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
{
struct macsec_dev *macsec = macsec_priv(dev);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 1e38039c5c56..6939563d3b7c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -535,7 +535,7 @@ static int dp83867_of_init_io_impedance(struct phy_device *phydev)
cell = of_nvmem_cell_get(of_node, "io_impedance_ctrl");
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
- if (ret != -ENOENT)
+ if (ret != -ENOENT && ret != -EOPNOTSUPP)
return phydev_err_probe(phydev, ret,
"failed to get nvmem cell io_impedance_ctrl\n");
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 29b1df03f3e8..a87a4b3ffce4 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -190,44 +190,42 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced);
*/
static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
{
+ u16 adv_l_mask, adv_l = 0;
+ u16 adv_m_mask, adv_m = 0;
int changed = 0;
- u16 adv_l = 0;
- u16 adv_m = 0;
int ret;
+ adv_l_mask = MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP |
+ MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+ adv_m_mask = MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
+
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ fallthrough;
case MASTER_SLAVE_CFG_SLAVE_FORCE:
adv_l |= MDIO_AN_T1_ADV_L_FORCE_MS;
break;
case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ fallthrough;
case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
break;
case MASTER_SLAVE_CFG_UNKNOWN:
case MASTER_SLAVE_CFG_UNSUPPORTED:
- return 0;
+ /* if master/slave role is not specified, do not overwrite it */
+ adv_l_mask &= ~MDIO_AN_T1_ADV_L_FORCE_MS;
+ adv_m_mask &= ~MDIO_AN_T1_ADV_M_MST;
+ break;
default:
phydev_warn(phydev, "Unsupported Master/Slave mode\n");
return -EOPNOTSUPP;
}
- switch (phydev->master_slave_set) {
- case MASTER_SLAVE_CFG_MASTER_FORCE:
- case MASTER_SLAVE_CFG_MASTER_PREFERRED:
- adv_m |= MDIO_AN_T1_ADV_M_MST;
- break;
- case MASTER_SLAVE_CFG_SLAVE_FORCE:
- case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
- break;
- default:
- break;
- }
-
adv_l |= linkmode_adv_to_mii_t1_adv_l_t(phydev->advertising);
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L,
- (MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP
- | MDIO_AN_T1_ADV_L_PAUSE_ASYM), adv_l);
+ adv_l_mask, adv_l);
if (ret < 0)
return ret;
if (ret > 0)
@@ -236,7 +234,7 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
adv_m |= linkmode_adv_to_mii_t1_adv_m_t(phydev->advertising);
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M,
- MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L, adv_m);
+ adv_m_mask, adv_m);
if (ret < 0)
return ret;
if (ret > 0)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index a74b320f5b27..12ff276b80ae 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -316,6 +316,12 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
+ /* If we manged to get here with the PHY state machine in a state neither
+ * PHY_HALTED nor PHY_READY this is an indication that something went wrong
+ * and we should most likely be using MAC managed PM and we are not.
+ */
+ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
+
ret = phy_init_hw(phydev);
if (ret < 0)
return ret;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index dafd3e9ebbf8..c8791e9b451d 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1111,7 +1111,7 @@ plip_open(struct net_device *dev)
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
- const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
+ const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
if (ifa != NULL) {
dev_addr_mod(dev, 2, &ifa->ifa_local, 4);
}
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index c3d42062559d..9e75ed3f08ce 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -716,10 +716,20 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_reset_mac_header(skb);
skb->protocol = eth_hdr(skb)->h_proto;
+ rcu_read_lock();
+ tap = rcu_dereference(q->tap);
+ if (!tap) {
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return total_len;
+ }
+ skb->dev = tap->dev;
+
if (vnet_hdr_len) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
tap_is_little_endian(q));
if (err) {
+ rcu_read_unlock();
drop_reason = SKB_DROP_REASON_DEV_HDR;
goto err_kfree;
}
@@ -732,8 +742,6 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
- rcu_read_lock();
- tap = rcu_dereference(q->tap);
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
skb_zcopy_init(skb, msg_control);
@@ -742,14 +750,8 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
uarg->callback(NULL, uarg, false);
}
- if (tap) {
- skb->dev = tap->dev;
- dev_queue_xmit(skb);
- } else {
- kfree_skb(skb);
- }
+ dev_queue_xmit(skb);
rcu_read_unlock();
-
return total_len;
err_kfree:
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 0ad468a00064..aff39bf3161d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1680,7 +1680,7 @@ static const struct driver_info ax88179_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1693,7 +1693,7 @@ static const struct driver_info ax88178a_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1706,7 +1706,7 @@ static const struct driver_info cypress_GX3_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1719,7 +1719,7 @@ static const struct driver_info dlink_dub1312_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1732,7 +1732,7 @@ static const struct driver_info sitecom_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1745,7 +1745,7 @@ static const struct driver_info samsung_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1758,7 +1758,7 @@ static const struct driver_info lenovo_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1771,7 +1771,7 @@ static const struct driver_info belkin_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1784,7 +1784,7 @@ static const struct driver_info toshiba_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1797,7 +1797,7 @@ static const struct driver_info mct_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1810,7 +1810,7 @@ static const struct driver_info at_umc2000_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1823,7 +1823,7 @@ static const struct driver_info at_umc200_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1836,7 +1836,7 @@ static const struct driver_info at_umc2000sp_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 571a399c195d..709e3c59e340 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1390,6 +1390,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
{QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
{QMI_FIXED_INTF(0x1e2d, 0x00b9, 0)}, /* Cinterion MV31 RmNet based on new baseline */
+ {QMI_FIXED_INTF(0x1e2d, 0x00f3, 0)}, /* Cinterion MV32-W-A RmNet */
+ {QMI_FIXED_INTF(0x1e2d, 0x00f4, 0)}, /* Cinterion MV32-W-B RmNet */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0f6efaabaa32..d142ac8fcf6e 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5906,6 +5906,11 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
@@ -6431,21 +6436,8 @@ static void r8156_fc_parameter(struct r8152 *tp)
u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
- switch (tp->version) {
- case RTL_VER_10:
- case RTL_VER_11:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8);
- break;
- case RTL_VER_12:
- case RTL_VER_13:
- case RTL_VER_15:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
- break;
- default:
- break;
- }
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
}
static void rtl8156_change_mtu(struct r8152 *tp)
@@ -6557,6 +6549,11 @@ static void rtl8156_down(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2cb833b3006a..466da01ba2e3 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -312,7 +312,6 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
- struct netdev_queue *queue = NULL;
struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
@@ -330,7 +329,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rxq = skb_get_queue_mapping(skb);
if (rxq < rcv->real_num_rx_queues) {
rq = &rcv_priv->rq[rxq];
- queue = netdev_get_tx_queue(dev, rxq);
/* The napi pointer is available when an XDP program is
* attached or when GRO is enabled
@@ -342,8 +340,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
- if (queue)
- txq_trans_cond_update(queue);
if (!use_napi)
dev_lstats_add(dev, length);
} else {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ec8e1b3108c3..9cce7dec7366 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -135,6 +135,9 @@ struct send_queue {
struct virtnet_sq_stats stats;
struct napi_struct napi;
+
+ /* Record whether sq is in reset state. */
+ bool reset;
};
/* Internal representation of a receive virtqueue */
@@ -267,6 +270,12 @@ struct virtnet_info {
u8 duplex;
u32 speed;
+ /* Interrupt coalescing settings */
+ u32 tx_usecs;
+ u32 rx_usecs;
+ u32 tx_max_packets;
+ u32 rx_max_packets;
+
unsigned long guest_offloads;
unsigned long guest_offloads_capable;
@@ -284,6 +293,9 @@ struct padded_vnet_hdr {
char padding[12];
};
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+
static bool is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
@@ -1057,8 +1069,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ if (unlikely(xdp_page != page))
+ put_page(xdp_page);
goto err_xdp;
+ }
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(!err)) {
xdp_return_frame_rx_napi(xdpf);
@@ -1196,7 +1211,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
if (!hdr_hash || !skb)
return;
- switch ((int)hdr_hash->hash_report) {
+ switch (__le16_to_cpu(hdr_hash->hash_report)) {
case VIRTIO_NET_HASH_REPORT_TCPv4:
case VIRTIO_NET_HASH_REPORT_UDPv4:
case VIRTIO_NET_HASH_REPORT_TCPv6:
@@ -1214,7 +1229,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
default:
rss_hash_type = PKT_HASH_TYPE_NONE;
}
- skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
+ skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
}
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
@@ -1625,6 +1640,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
+ if (sq->reset) {
+ __netif_tx_unlock(txq);
+ return;
+ }
+
do {
virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
@@ -1872,6 +1892,70 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int virtnet_rx_resize(struct virtnet_info *vi,
+ struct receive_queue *rq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ if (running)
+ napi_disable(&rq->napi);
+
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
+ if (running)
+ virtnet_napi_enable(rq->vq, &rq->napi);
+ return err;
+}
+
+static int virtnet_tx_resize(struct virtnet_info *vi,
+ struct send_queue *sq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ struct netdev_queue *txq;
+ int err, qindex;
+
+ qindex = sq - vi->sq;
+
+ if (running)
+ virtnet_napi_tx_disable(&sq->napi);
+
+ txq = netdev_get_tx_queue(vi->dev, qindex);
+
+ /* 1. wait all ximt complete
+ * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
+ */
+ __netif_tx_lock_bh(txq);
+
+ /* Prevent rx poll from accessing sq. */
+ sq->reset = true;
+
+ /* Prevent the upper layer from trying to send packets. */
+ netif_stop_subqueue(vi->dev, qindex);
+
+ __netif_tx_unlock_bh(txq);
+
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+ __netif_tx_lock_bh(txq);
+ sq->reset = false;
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock_bh(txq);
+
+ if (running)
+ virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ return err;
+}
+
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
@@ -2282,10 +2366,57 @@ static void virtnet_get_ringparam(struct net_device *dev,
{
struct virtnet_info *vi = netdev_priv(dev);
- ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
- ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
- ring->rx_pending = ring->rx_max_pending;
- ring->tx_pending = ring->tx_max_pending;
+ ring->rx_max_pending = vi->rq[0].vq->num_max;
+ ring->tx_max_pending = vi->sq[0].vq->num_max;
+ ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+}
+
+static int virtnet_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ u32 rx_pending, tx_pending;
+ struct receive_queue *rq;
+ struct send_queue *sq;
+ int i, err;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+
+ if (ring->rx_pending == rx_pending &&
+ ring->tx_pending == tx_pending)
+ return 0;
+
+ if (ring->rx_pending > vi->rq[0].vq->num_max)
+ return -EINVAL;
+
+ if (ring->tx_pending > vi->sq[0].vq->num_max)
+ return -EINVAL;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ rq = vi->rq + i;
+ sq = vi->sq + i;
+
+ if (ring->tx_pending != tx_pending) {
+ err = virtnet_tx_resize(vi, sq, ring->tx_pending);
+ if (err)
+ return err;
+ }
+
+ if (ring->rx_pending != rx_pending) {
+ err = virtnet_rx_resize(vi, rq, ring->rx_pending);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
}
static bool virtnet_commit_rss_command(struct virtnet_info *vi)
@@ -2615,27 +2746,89 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
+static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
+{
+ struct scatterlist sgs_tx, sgs_rx;
+ struct virtio_net_ctrl_coal_tx coal_tx;
+ struct virtio_net_ctrl_coal_rx coal_rx;
+
+ coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
+ &sgs_tx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->tx_usecs = ec->tx_coalesce_usecs;
+ vi->tx_max_packets = ec->tx_max_coalesced_frames;
+
+ coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
+ &sgs_rx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->rx_usecs = ec->rx_coalesce_usecs;
+ vi->rx_max_packets = ec->rx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
+{
+ /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
+ * feature is negotiated.
+ */
+ if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
+ return -EOPNOTSUPP;
+
+ if (ec->tx_max_coalesced_frames > 1 ||
+ ec->rx_max_coalesced_frames != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
- int i, napi_weight;
-
- if (ec->tx_max_coalesced_frames > 1 ||
- ec->rx_max_coalesced_frames != 1)
- return -EINVAL;
+ int ret, i, napi_weight;
+ bool update_napi = false;
+ /* Can't change NAPI weight if the link is up */
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
+ else
+ update_napi = true;
+ }
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
+ ret = virtnet_send_notf_coal_cmds(vi, ec);
+ else
+ ret = virtnet_coal_params_supported(ec);
+
+ if (ret)
+ return ret;
+
+ if (update_napi) {
for (i = 0; i < vi->max_queue_pairs; i++)
vi->sq[i].napi.weight = napi_weight;
}
- return 0;
+ return ret;
}
static int virtnet_get_coalesce(struct net_device *dev,
@@ -2643,16 +2836,19 @@ static int virtnet_get_coalesce(struct net_device *dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ethtool_coalesce ec_default = {
- .cmd = ETHTOOL_GCOALESCE,
- .rx_max_coalesced_frames = 1,
- };
struct virtnet_info *vi = netdev_priv(dev);
- memcpy(ec, &ec_default, sizeof(ec_default));
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ ec->rx_coalesce_usecs = vi->rx_usecs;
+ ec->tx_coalesce_usecs = vi->tx_usecs;
+ ec->tx_max_coalesced_frames = vi->tx_max_packets;
+ ec->rx_max_coalesced_frames = vi->rx_max_packets;
+ } else {
+ ec->rx_max_coalesced_frames = 1;
- if (vi->sq[0].napi.weight)
- ec->tx_max_coalesced_frames = 1;
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+ }
return 0;
}
@@ -2771,10 +2967,12 @@ static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
+ .set_ringparam = virtnet_set_ringparam,
.get_strings = virtnet_get_strings,
.get_sset_count = virtnet_get_sset_count,
.get_ethtool_stats = virtnet_get_ethtool_stats,
@@ -3168,6 +3366,27 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ if (!is_xdp_frame(buf))
+ dev_kfree_skb(buf);
+ else
+ xdp_return_frame(ptr_to_xdp(buf));
+}
+
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2rxq(vq);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -3175,26 +3394,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_frame(buf))
- dev_kfree_skb(buf);
- else
- xdp_return_frame(ptr_to_xdp(buf));
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_sq_free_unused_buf(vq, buf);
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
-
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(&vi->rq[i], buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_free_unused_buf(vq, buf);
}
}
@@ -3441,6 +3648,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
"VIRTIO_NET_F_CTRL_VQ"))) {
return false;
}
@@ -3577,6 +3786,13 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ vi->rx_usecs = 0;
+ vi->tx_usecs = 0;
+ vi->tx_max_packets = 0;
+ vi->rx_max_packets = 0;
+ }
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
@@ -3811,7 +4027,7 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_CTRL_MAC_ADDR, \
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
- VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT
+ VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL
static unsigned int features[] = {
VIRTNET_FEATURES,
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 90811ab851fd..c3285242f74f 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2321,7 +2321,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowi6_oif = oif;
fl6.daddr = *daddr;
fl6.saddr = *saddr;
- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
+ fl6.flowlabel = ip6_make_flowinfo(tos, label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
fl6.fl6_dport = dport;
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 4714c86bb501..64e7a767d963 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -52,15 +52,12 @@ DECLARE_EVENT_CLASS(ath10k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH10K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH10K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
@@ -92,16 +89,13 @@ TRACE_EVENT(ath10k_log_dbg,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, level)
- __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->level = level;
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH10K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH10K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index a02e54735e88..76560587bea0 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -126,15 +126,12 @@ DECLARE_EVENT_CLASS(ath11k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
- __dynamic_array(char, msg, ATH11K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH11K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH11K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
index a3d3740419eb..231a94769ddb 100644
--- a/drivers/net/wireless/ath/ath6kl/trace.h
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -253,13 +253,10 @@ DECLARE_EVENT_CLASS(ath6kl_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH6KL_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH6KL_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -284,14 +281,11 @@ TRACE_EVENT(ath6kl_log_dbg,
TP_ARGS(level, vaf),
TP_STRUCT__entry(
__field(unsigned int, level)
- __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH6KL_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH6KL_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/ath/trace.h b/drivers/net/wireless/ath/trace.h
index ba711644d27e..9935cf475b6d 100644
--- a/drivers/net/wireless/ath/trace.h
+++ b/drivers/net/wireless/ath/trace.h
@@ -40,16 +40,13 @@ TRACE_EVENT(ath_log,
TP_STRUCT__entry(
__string(device, wiphy_name(wiphy))
__string(driver, KBUILD_MODNAME)
- __dynamic_array(char, msg, ATH_DBG_MAX_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, wiphy_name(wiphy));
__assign_str(driver, KBUILD_MODNAME);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH_DBG_MAX_LEN,
- vaf->fmt,
- *vaf->va) >= ATH_DBG_MAX_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index 11c989e95880..201f44612c31 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -70,13 +70,10 @@ DECLARE_EVENT_CLASS(wil6210_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, WIL6210_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- WIL6210_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= WIL6210_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
index 338c66d0c5f8..5a139d7ed47a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
@@ -33,13 +33,11 @@ TRACE_EVENT(brcmf_err,
TP_ARGS(func, vaf),
TP_STRUCT__entry(
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
@@ -50,14 +48,12 @@ TRACE_EVENT(brcmf_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
index 0e8a69ab909f..488456420353 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
@@ -28,12 +28,10 @@ DECLARE_EVENT_CLASS(brcms_msg_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -64,14 +62,12 @@ TRACE_EVENT(brcms_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
index 7dd70011fd1e..1d6c292cf545 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -18,12 +18,10 @@ DECLARE_EVENT_CLASS(iwlwifi_msg_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -55,14 +53,12 @@ TRACE_EVENT(iwlwifi_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index b89519ab9205..eb1d1ba3a443 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -635,7 +635,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
conn_info->req_ies_len = 0;
}
-inline void wilc_handle_disconnect(struct wilc_vif *vif)
+void wilc_handle_disconnect(struct wilc_vif *vif)
{
struct host_if_drv *hif_drv = vif->hif_drv;
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index 69ba1d469e9f..baa2881f4465 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -215,5 +215,6 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto);
int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index);
-inline void wilc_handle_disconnect(struct wilc_vif *vif);
+void wilc_handle_disconnect(struct wilc_vif *vif);
+
#endif
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 2caf997f9bc9..07596bf5f7d6 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -310,6 +310,7 @@ static void pn532_uart_remove(struct serdev_device *serdev)
pn53x_unregister_nfc(pn532->priv);
serdev_device_close(serdev);
pn53x_common_clean(pn532->priv);
+ del_timer_sync(&pn532->cmd_timeout);
kfree_skb(pn532->recv_skb);
kfree(pn532);
}
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
index b019755e4e21..3ece49cb18ff 100644
--- a/drivers/ntb/hw/epf/ntb_hw_epf.c
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -45,7 +45,6 @@
#define NTB_EPF_MIN_DB_COUNT 3
#define NTB_EPF_MAX_DB_COUNT 31
-#define NTB_EPF_MW_OFFSET 2
#define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */
@@ -67,6 +66,7 @@ struct ntb_epf_dev {
enum pci_barno ctrl_reg_bar;
enum pci_barno peer_spad_reg_bar;
enum pci_barno db_reg_bar;
+ enum pci_barno mw_bar;
unsigned int mw_count;
unsigned int spad_count;
@@ -92,6 +92,8 @@ struct ntb_epf_data {
enum pci_barno peer_spad_reg_bar;
/* BAR that contains Doorbell region and Memory window '1' */
enum pci_barno db_reg_bar;
+ /* BAR that contains memory windows*/
+ enum pci_barno mw_bar;
};
static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
@@ -411,7 +413,7 @@ static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return -EINVAL;
}
- bar = idx + NTB_EPF_MW_OFFSET;
+ bar = idx + ndev->mw_bar;
mw_size = pci_resource_len(ntb->pdev, bar);
@@ -453,7 +455,7 @@ static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
if (idx == 0)
offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET);
- bar = idx + NTB_EPF_MW_OFFSET;
+ bar = idx + ndev->mw_bar;
if (base)
*base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
@@ -565,6 +567,7 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
struct pci_dev *pdev)
{
struct device *dev = ndev->dev;
+ size_t spad_sz, spad_off;
int ret;
pci_set_drvdata(pdev, ndev);
@@ -599,10 +602,16 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
goto err_dma_mask;
}
- ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
- if (!ndev->peer_spad_reg) {
- ret = -EIO;
- goto err_dma_mask;
+ if (ndev->peer_spad_reg_bar) {
+ ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
+ if (!ndev->peer_spad_reg) {
+ ret = -EIO;
+ goto err_dma_mask;
+ }
+ } else {
+ spad_sz = 4 * readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
+ spad_off = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
+ ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz;
}
ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
@@ -657,6 +666,7 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev,
enum pci_barno peer_spad_reg_bar = BAR_1;
enum pci_barno ctrl_reg_bar = BAR_0;
enum pci_barno db_reg_bar = BAR_2;
+ enum pci_barno mw_bar = BAR_2;
struct device *dev = &pdev->dev;
struct ntb_epf_data *data;
struct ntb_epf_dev *ndev;
@@ -671,17 +681,16 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev,
data = (struct ntb_epf_data *)id->driver_data;
if (data) {
- if (data->peer_spad_reg_bar)
- peer_spad_reg_bar = data->peer_spad_reg_bar;
- if (data->ctrl_reg_bar)
- ctrl_reg_bar = data->ctrl_reg_bar;
- if (data->db_reg_bar)
- db_reg_bar = data->db_reg_bar;
+ peer_spad_reg_bar = data->peer_spad_reg_bar;
+ ctrl_reg_bar = data->ctrl_reg_bar;
+ db_reg_bar = data->db_reg_bar;
+ mw_bar = data->mw_bar;
}
ndev->peer_spad_reg_bar = peer_spad_reg_bar;
ndev->ctrl_reg_bar = ctrl_reg_bar;
ndev->db_reg_bar = db_reg_bar;
+ ndev->mw_bar = mw_bar;
ndev->dev = dev;
ntb_epf_init_struct(ndev, pdev);
@@ -729,6 +738,14 @@ static const struct ntb_epf_data j721e_data = {
.ctrl_reg_bar = BAR_0,
.peer_spad_reg_bar = BAR_1,
.db_reg_bar = BAR_2,
+ .mw_bar = BAR_2,
+};
+
+static const struct ntb_epf_data mx8_data = {
+ .ctrl_reg_bar = BAR_0,
+ .peer_spad_reg_bar = BAR_0,
+ .db_reg_bar = BAR_2,
+ .mw_bar = BAR_4,
};
static const struct pci_device_id ntb_epf_pci_tbl[] = {
@@ -737,6 +754,11 @@ static const struct pci_device_id ntb_epf_pci_tbl[] = {
.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
.driver_data = (kernel_ulong_t)&j721e_data,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809),
+ .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
+ .driver_data = (kernel_ulong_t)&mx8_data,
+ },
{ },
};
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 733557231ed0..0ed6f809ff2e 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2406,7 +2406,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
"\t%hhu.\t", idx);
else
off += scnprintf(strbuf + off, size - off,
- "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+ "\t%hhu-%d.\t", idx, idx + cnt - 1);
off += scnprintf(strbuf + off, size - off, "%s BAR%hhu, ",
idt_get_mw_name(data), ndev->mws[idx].bar);
@@ -2435,7 +2435,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
"\t%hhu.\t", idx);
else
off += scnprintf(strbuf + off, size - off,
- "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+ "\t%hhu-%d.\t", idx, idx + cnt - 1);
off += scnprintf(strbuf + off, size - off,
"%s BAR%hhu, ", idt_get_mw_name(data),
@@ -2480,7 +2480,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
int src;
data = idt_ntb_msg_read(&ndev->ntb, &src, idx);
off += scnprintf(strbuf + off, size - off,
- "\t%hhu. 0x%08x from peer %hhu (Port %hhu)\n",
+ "\t%hhu. 0x%08x from peer %d (Port %hhu)\n",
idx, data, src, ndev->peers[src].port);
}
off += scnprintf(strbuf + off, size - off, "\n");
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index e5f14e20a9ff..84772013812b 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -763,7 +763,7 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_gen3(ndev->ntb.pdev))
return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
- else if (pdev_is_gen4(ndev->ntb.pdev))
+ else if (pdev_is_gen4(ndev->ntb.pdev) || pdev_is_gen5(ndev->ntb.pdev))
return ndev_ntb4_debugfs_read(filp, ubuf, count, offp);
return -ENXIO;
@@ -1874,7 +1874,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
rc = gen3_init_dev(ndev);
if (rc)
goto err_init_dev;
- } else if (pdev_is_gen4(pdev)) {
+ } else if (pdev_is_gen4(pdev) || pdev_is_gen5(pdev)) {
ndev->ntb.ops = &intel_ntb4_ops;
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
@@ -1904,7 +1904,8 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
err_register:
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
+ pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
@@ -1920,7 +1921,8 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
+ pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
@@ -2047,6 +2049,8 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
/* GEN4 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)},
+ /* GEN5 PCIe */
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_GNR)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
index 4081fc538ff4..22cac7975b3c 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -197,7 +197,7 @@ int gen4_init_dev(struct intel_ntb_dev *ndev)
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
if (pdev_is_ICX(pdev))
ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
- else if (pdev_is_SPR(pdev))
+ else if (pdev_is_SPR(pdev) || pdev_is_gen5(pdev))
ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
ntb_topo_string(ndev->ntb.topo));
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index b233d1c6ba2d..da4d5fe55bab 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -70,6 +70,7 @@
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
#define PCI_DEVICE_ID_INTEL_NTB_B2B_ICX 0x347e
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_GNR 0x0db4
/* Ntb control and link status */
#define NTB_CTL_CFG_LOCK BIT(0)
@@ -228,4 +229,10 @@ static inline int pdev_is_gen4(struct pci_dev *pdev)
return 0;
}
+
+static inline int pdev_is_gen5(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_GNR;
+}
+
#endif
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index b7bf3f863d79..5ee0afa621a9 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -367,14 +367,16 @@ static ssize_t tool_fn_write(struct tool_ctx *tc,
u64 bits;
int n;
+ if (*offp)
+ return 0;
+
buf = kmalloc(size + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = simple_write_to_buffer(buf, size, offp, ubuf, size);
- if (ret < 0) {
+ if (copy_from_user(buf, ubuf, size)) {
kfree(buf);
- return ret;
+ return -EFAULT;
}
buf[size] = 0;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f36efcc11f67..7e88cd242380 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -453,6 +453,21 @@ static void pmem_release_disk(void *__pmem)
put_disk(pmem->disk);
}
+static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap,
+ unsigned long pfn, unsigned long nr_pages, int mf_flags)
+{
+ struct pmem_device *pmem =
+ container_of(pgmap, struct pmem_device, pgmap);
+ u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
+ u64 len = nr_pages << PAGE_SHIFT;
+
+ return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags);
+}
+
+static const struct dev_pagemap_ops fsdax_pagemap_ops = {
+ .memory_failure = pmem_pagemap_memory_failure,
+};
+
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
{
@@ -514,6 +529,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags = PFN_DEV;
if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -527,6 +543,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pgmap.range.end = res->end;
pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index d976260eca7a..473a71bbd9c9 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -133,7 +133,8 @@ static void nd_region_release(struct device *dev)
put_device(&nvdimm->dev);
}
free_percpu(nd_region->lane);
- memregion_free(nd_region->id);
+ if (!test_bit(ND_REGION_CXL, &nd_region->flags))
+ memregion_free(nd_region->id);
kfree(nd_region);
}
@@ -982,9 +983,14 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (!nd_region)
return NULL;
- nd_region->id = memregion_alloc(GFP_KERNEL);
- if (nd_region->id < 0)
- goto err_id;
+ /* CXL pre-assigns memregion ids before creating nvdimm regions */
+ if (test_bit(ND_REGION_CXL, &ndr_desc->flags)) {
+ nd_region->id = ndr_desc->memregion;
+ } else {
+ nd_region->id = memregion_alloc(GFP_KERNEL);
+ if (nd_region->id < 0)
+ goto err_id;
+ }
nd_region->lane = alloc_percpu(struct nd_percpu_lane);
if (!nd_region->lane)
@@ -1043,9 +1049,10 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return nd_region;
- err_percpu:
- memregion_free(nd_region->id);
- err_id:
+err_percpu:
+ if (!test_bit(ND_REGION_CXL, &ndr_desc->flags))
+ memregion_free(nd_region->id);
+err_id:
kfree(nd_region);
return NULL;
}
@@ -1068,6 +1075,13 @@ struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
+void nvdimm_region_delete(struct nd_region *nd_region)
+{
+ if (nd_region)
+ nd_device_unregister(&nd_region->dev, ND_SYNC);
+}
+EXPORT_SYMBOL_GPL(nvdimm_region_delete);
+
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
{
int rc = 0;
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 995b6cdc67ed..20da455d2ef6 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -81,17 +81,24 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
ndr_desc.res = &res;
ndr_desc.numa_node = nid;
ndr_desc.flush = async_pmem_flush;
+ ndr_desc.provider_data = vdev;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
+ /*
+ * The NVDIMM region could be available before the
+ * virtio_device_ready() that is called by
+ * virtio_dev_probe(), so we set device ready here.
+ */
+ virtio_device_ready(vdev);
nd_region = nvdimm_pmem_region_create(vpmem->nvdimm_bus, &ndr_desc);
if (!nd_region) {
dev_err(&vdev->dev, "failed to create nvdimm region\n");
err = -ENXIO;
goto out_nd;
}
- nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent);
return 0;
out_nd:
+ virtio_reset_device(vdev);
nvdimm_bus_unregister(vpmem->nvdimm_bus);
out_vq:
vdev->config->del_vqs(vdev);
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
index 87ae409a32b9..656e46d938da 100644
--- a/drivers/nvme/Kconfig
+++ b/drivers/nvme/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "NVME Support"
+source "drivers/nvme/common/Kconfig"
source "drivers/nvme/host/Kconfig"
source "drivers/nvme/target/Kconfig"
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
index fb42c44609a8..eedca8c72098 100644
--- a/drivers/nvme/Makefile
+++ b/drivers/nvme/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NVME_COMMON) += common/
obj-y += host/
obj-y += target/
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig
new file mode 100644
index 000000000000..4514f44362dd
--- /dev/null
+++ b/drivers/nvme/common/Kconfig
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config NVME_COMMON
+ tristate
diff --git a/drivers/nvme/common/Makefile b/drivers/nvme/common/Makefile
new file mode 100644
index 000000000000..720c625b8a52
--- /dev/null
+++ b/drivers/nvme/common/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_NVME_COMMON) += nvme-common.o
+
+nvme-common-y += auth.o
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
new file mode 100644
index 000000000000..04bd28f17dcc
--- /dev/null
+++ b/drivers/nvme/common/auth.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include <linux/nvme.h>
+#include <linux/nvme-auth.h>
+
+static u32 nvme_dhchap_seqnum;
+static DEFINE_MUTEX(nvme_dhchap_mutex);
+
+u32 nvme_auth_get_seqnum(void)
+{
+ u32 seqnum;
+
+ mutex_lock(&nvme_dhchap_mutex);
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum = prandom_u32();
+ else {
+ nvme_dhchap_seqnum++;
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum++;
+ }
+ seqnum = nvme_dhchap_seqnum;
+ mutex_unlock(&nvme_dhchap_mutex);
+ return seqnum;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum);
+
+static struct nvme_auth_dhgroup_map {
+ const char name[16];
+ const char kpp[16];
+} dhgroup_map[] = {
+ [NVME_AUTH_DHGROUP_NULL] = {
+ .name = "null", .kpp = "null" },
+ [NVME_AUTH_DHGROUP_2048] = {
+ .name = "ffdhe2048", .kpp = "ffdhe2048(dh)" },
+ [NVME_AUTH_DHGROUP_3072] = {
+ .name = "ffdhe3072", .kpp = "ffdhe3072(dh)" },
+ [NVME_AUTH_DHGROUP_4096] = {
+ .name = "ffdhe4096", .kpp = "ffdhe4096(dh)" },
+ [NVME_AUTH_DHGROUP_6144] = {
+ .name = "ffdhe6144", .kpp = "ffdhe6144(dh)" },
+ [NVME_AUTH_DHGROUP_8192] = {
+ .name = "ffdhe8192", .kpp = "ffdhe8192(dh)" },
+};
+
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].name;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
+
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].kpp;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
+
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name)
+{
+ int i;
+
+ if (!dhgroup_name || !strlen(dhgroup_name))
+ return NVME_AUTH_DHGROUP_INVALID;
+ for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+ if (!strlen(dhgroup_map[i].name))
+ continue;
+ if (!strncmp(dhgroup_map[i].name, dhgroup_name,
+ strlen(dhgroup_map[i].name)))
+ return i;
+ }
+ return NVME_AUTH_DHGROUP_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
+
+static struct nvme_dhchap_hash_map {
+ int len;
+ const char hmac[15];
+ const char digest[8];
+} hash_map[] = {
+ [NVME_AUTH_HASH_SHA256] = {
+ .len = 32,
+ .hmac = "hmac(sha256)",
+ .digest = "sha256",
+ },
+ [NVME_AUTH_HASH_SHA384] = {
+ .len = 48,
+ .hmac = "hmac(sha384)",
+ .digest = "sha384",
+ },
+ [NVME_AUTH_HASH_SHA512] = {
+ .len = 64,
+ .hmac = "hmac(sha512)",
+ .digest = "sha512",
+ },
+};
+
+const char *nvme_auth_hmac_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].hmac;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
+
+const char *nvme_auth_digest_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].digest;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
+
+u8 nvme_auth_hmac_id(const char *hmac_name)
+{
+ int i;
+
+ if (!hmac_name || !strlen(hmac_name))
+ return NVME_AUTH_HASH_INVALID;
+
+ for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
+ if (!strlen(hash_map[i].hmac))
+ continue;
+ if (!strncmp(hash_map[i].hmac, hmac_name,
+ strlen(hash_map[i].hmac)))
+ return i;
+ }
+ return NVME_AUTH_HASH_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
+
+size_t nvme_auth_hmac_hash_len(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return 0;
+ return hash_map[hmac_id].len;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
+
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash)
+{
+ struct nvme_dhchap_key *key;
+ unsigned char *p;
+ u32 crc;
+ int ret, key_len;
+ size_t allocated_len = strlen(secret);
+
+ /* Secret might be affixed with a ':' */
+ p = strrchr(secret, ':');
+ if (p)
+ allocated_len = p - secret;
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key)
+ return ERR_PTR(-ENOMEM);
+ key->key = kzalloc(allocated_len, GFP_KERNEL);
+ if (!key->key) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ key_len = base64_decode(secret, allocated_len, key->key);
+ if (key_len < 0) {
+ pr_debug("base64 key decoding error %d\n",
+ key_len);
+ ret = key_len;
+ goto out_free_secret;
+ }
+
+ if (key_len != 36 && key_len != 52 &&
+ key_len != 68) {
+ pr_err("Invalid key len %d\n", key_len);
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ if (key_hash > 0 &&
+ (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
+ pr_err("Mismatched key len %d for %s\n", key_len,
+ nvme_auth_hmac_name(key_hash));
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ /* The last four bytes is the CRC in little-endian format */
+ key_len -= 4;
+ /*
+ * The linux implementation doesn't do pre- and post-increments,
+ * so we have to do it manually.
+ */
+ crc = ~crc32(~0, key->key, key_len);
+
+ if (get_unaligned_le32(key->key + key_len) != crc) {
+ pr_err("key crc mismatch (key %08x, crc %08x)\n",
+ get_unaligned_le32(key->key + key_len), crc);
+ ret = -EKEYREJECTED;
+ goto out_free_secret;
+ }
+ key->len = key_len;
+ key->hash = key_hash;
+ return key;
+out_free_secret:
+ kfree_sensitive(key->key);
+out_free_key:
+ kfree(key);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
+
+void nvme_auth_free_key(struct nvme_dhchap_key *key)
+{
+ if (!key)
+ return;
+ kfree_sensitive(key->key);
+ kfree(key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free_key);
+
+u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
+{
+ const char *hmac_name;
+ struct crypto_shash *key_tfm;
+ struct shash_desc *shash;
+ u8 *transformed_key;
+ int ret;
+
+ if (!key || !key->key) {
+ pr_warn("No key specified\n");
+ return ERR_PTR(-ENOKEY);
+ }
+ if (key->hash == 0) {
+ transformed_key = kmemdup(key->key, key->len, GFP_KERNEL);
+ return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
+ }
+ hmac_name = nvme_auth_hmac_name(key->hash);
+ if (!hmac_name) {
+ pr_warn("Invalid key hash id %d\n", key->hash);
+ return ERR_PTR(-EINVAL);
+ }
+
+ key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(key_tfm))
+ return (u8 *)key_tfm;
+
+ shash = kmalloc(sizeof(struct shash_desc) +
+ crypto_shash_descsize(key_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
+ if (!transformed_key) {
+ ret = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ shash->tfm = key_tfm;
+ ret = crypto_shash_setkey(key_tfm, key->key, key->len);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_init(shash);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, nqn, strlen(nqn));
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_final(shash, transformed_key);
+ if (ret < 0)
+ goto out_free_transformed_key;
+
+ kfree(shash);
+ crypto_free_shash(key_tfm);
+
+ return transformed_key;
+
+out_free_transformed_key:
+ kfree_sensitive(transformed_key);
+out_free_shash:
+ kfree(shash);
+out_free_key:
+ crypto_free_shash(key_tfm);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
+
+static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey)
+{
+ const char *digest_name;
+ struct crypto_shash *tfm;
+ int ret;
+
+ digest_name = nvme_auth_digest_name(hmac_id);
+ if (!digest_name) {
+ pr_debug("%s: failed to get digest for %d\n", __func__,
+ hmac_id);
+ return -EINVAL;
+ }
+ tfm = crypto_alloc_shash(digest_name, 0, 0);
+ if (IS_ERR(tfm))
+ return -ENOMEM;
+
+ ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey);
+ if (ret < 0)
+ pr_debug("%s: Failed to hash digest len %zu\n", __func__,
+ skey_len);
+
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ u8 *hashed_key;
+ const char *hmac_name;
+ int ret;
+
+ hashed_key = kmalloc(hlen, GFP_KERNEL);
+ if (!hashed_key)
+ return -ENOMEM;
+
+ ret = nvme_auth_hash_skey(hmac_id, skey,
+ skey_len, hashed_key);
+ if (ret < 0)
+ goto out_free_key;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ ret = -EINVAL;
+ goto out_free_key;
+ }
+
+ tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out_free_key;
+ }
+
+ desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out_free_hash;
+ }
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, hashed_key, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_update(desc, challenge, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_final(desc, aug);
+out_free_desc:
+ kfree_sensitive(desc);
+out_free_hash:
+ crypto_free_shash(tfm);
+out_free_key:
+ kfree_sensitive(hashed_key);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge);
+
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid)
+{
+ int ret;
+
+ ret = crypto_kpp_set_secret(dh_tfm, NULL, 0);
+ if (ret)
+ pr_debug("failed to set private key, error %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_privkey);
+
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ kpp_request_set_input(req, NULL, 0);
+ sg_init_one(&dst, host_key, host_key_len);
+ kpp_request_set_output(req, &dst, host_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey);
+
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist src, dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ sg_init_one(&src, ctrl_key, ctrl_key_len);
+ kpp_request_set_input(req, &src, ctrl_key_len);
+ sg_init_one(&dst, sess_key, sess_key_len);
+ kpp_request_set_output(req, &dst, sess_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
+
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret);
+
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
+{
+ struct nvme_dhchap_key *key;
+ u8 key_hash;
+
+ if (!secret) {
+ *ret_key = NULL;
+ return 0;
+ }
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
+ return -EINVAL;
+
+ /* Pass in the secret without the 'DHHC-1:XX:' prefix */
+ key = nvme_auth_extract_key(secret + 10, key_hash);
+ if (IS_ERR(key)) {
+ *ret_key = NULL;
+ return PTR_ERR(key);
+ }
+
+ *ret_key = key;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 877d2ec4ea9f..2f6a7f8c94e8 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -92,6 +92,21 @@ config NVME_TCP
If unsure, say N.
+config NVME_AUTH
+ bool "NVM Express over Fabrics In-Band Authentication"
+ depends on NVME_CORE
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This provides support for NVMe over Fabrics In-Band Authentication.
+
+ If unsure, say N.
+
config NVME_APPLE
tristate "Apple ANS2 NVM Express host driver"
depends on OF && BLOCK
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index a36ae1612059..e27202d22c7d 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -10,12 +10,14 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
-nvme-core-y := core.o ioctl.o constants.o
+nvme-core-y += core.o ioctl.o
+nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
+nvme-core-$(CONFIG_NVME_AUTH) += auth.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 5c352d5d8ee6..5fc5ea196b40 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -845,11 +845,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
apple_nvme_handle_cq(&anv->adminq, true);
spin_unlock_irqrestore(&anv->lock, flags);
- blk_mq_tagset_busy_iter(&anv->tagset, nvme_cancel_request, &anv->ctrl);
- blk_mq_tagset_busy_iter(&anv->admin_tagset, nvme_cancel_request,
- &anv->ctrl);
- blk_mq_tagset_wait_completed_request(&anv->tagset);
- blk_mq_tagset_wait_completed_request(&anv->admin_tagset);
+ nvme_cancel_tagset(&anv->ctrl);
+ nvme_cancel_admin_tagset(&anv->ctrl);
/*
* The driver will not be starting up queues again if shutting down so
@@ -1222,6 +1219,11 @@ static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
nvme_put_ctrl(&anv->ctrl);
}
+static void devm_apple_nvme_put_tag_set(void *data)
+{
+ blk_mq_free_tag_set(data);
+}
+
static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
{
int ret;
@@ -1238,8 +1240,7 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
if (ret)
return ret;
- ret = devm_add_action_or_reset(anv->dev,
- (void (*)(void *))blk_mq_free_tag_set,
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
&anv->admin_tagset);
if (ret)
return ret;
@@ -1263,8 +1264,8 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
ret = blk_mq_alloc_tag_set(&anv->tagset);
if (ret)
return ret;
- ret = devm_add_action_or_reset(
- anv->dev, (void (*)(void *))blk_mq_free_tag_set, &anv->tagset);
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
+ &anv->tagset);
if (ret)
return ret;
@@ -1365,6 +1366,11 @@ static int apple_nvme_attach_genpd(struct apple_nvme *anv)
return 0;
}
+static void devm_apple_nvme_mempool_destroy(void *data)
+{
+ mempool_destroy(data);
+}
+
static int apple_nvme_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1462,8 +1468,8 @@ static int apple_nvme_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto put_dev;
}
- ret = devm_add_action_or_reset(
- anv->dev, (void (*)(void *))mempool_destroy, anv->iod_mempool);
+ ret = devm_add_action_or_reset(anv->dev,
+ devm_apple_nvme_mempool_destroy, anv->iod_mempool);
if (ret)
goto put_dev;
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
new file mode 100644
index 000000000000..c8a6db7c4498
--- /dev/null
+++ b/drivers/nvme/host/auth.c
@@ -0,0 +1,1017 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-auth.h>
+
+struct nvme_dhchap_queue_context {
+ struct list_head entry;
+ struct work_struct auth_work;
+ struct nvme_ctrl *ctrl;
+ struct crypto_shash *shash_tfm;
+ struct crypto_kpp *dh_tfm;
+ void *buf;
+ size_t buf_size;
+ int qid;
+ int error;
+ u32 s1;
+ u32 s2;
+ u16 transaction;
+ u8 status;
+ u8 hash_id;
+ size_t hash_len;
+ u8 dhgroup_id;
+ u8 c1[64];
+ u8 c2[64];
+ u8 response[64];
+ u8 *host_response;
+ u8 *ctrl_key;
+ int ctrl_key_len;
+ u8 *host_key;
+ int host_key_len;
+ u8 *sess_key;
+ int sess_key_len;
+};
+
+#define nvme_auth_flags_from_qid(qid) \
+ (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
+#define nvme_auth_queue_from_qid(ctrl, qid) \
+ (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
+
+static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
+ void *data, size_t data_len, bool auth_send)
+{
+ struct nvme_command cmd = {};
+ blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
+ struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+ int ret;
+
+ cmd.auth_common.opcode = nvme_fabrics_command;
+ cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
+ cmd.auth_common.spsp0 = 0x01;
+ cmd.auth_common.spsp1 = 0x01;
+ if (auth_send) {
+ cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
+ cmd.auth_send.tl = cpu_to_le32(data_len);
+ } else {
+ cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
+ cmd.auth_receive.al = cpu_to_le32(data_len);
+ }
+
+ ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
+ qid == 0 ? NVME_QID_ANY : qid,
+ 0, flags);
+ if (ret > 0)
+ dev_warn(ctrl->device,
+ "qid %d auth_send failed with status %d\n", qid, ret);
+ else if (ret < 0)
+ dev_err(ctrl->device,
+ "qid %d auth_send failed with error %d\n", qid, ret);
+ return ret;
+}
+
+static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
+ struct nvmf_auth_dhchap_failure_data *data,
+ u16 transaction, u8 expected_msg)
+{
+ dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
+ __func__, qid, data->auth_type, data->auth_id);
+
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ return data->rescode_exp;
+ }
+ if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
+ data->auth_id != expected_msg) {
+ dev_warn(ctrl->device,
+ "qid %d invalid message %02x/%02x\n",
+ qid, data->auth_type, data->auth_id);
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ if (le16_to_cpu(data->t_id) != transaction) {
+ dev_warn(ctrl->device,
+ "qid %d invalid transaction ID %d\n",
+ qid, le16_to_cpu(data->t_id));
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
+ size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+ memset((u8 *)chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->sc_c = 0; /* No secure channel concatenation */
+ data->napd = 1;
+ data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
+ data->auth_protocol[0].dhchap.halen = 3;
+ data->auth_protocol[0].dhchap.dhlen = 6;
+ data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
+ data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
+ data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
+ data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
+ data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
+ data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
+ data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
+ data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
+ data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ size_t size = sizeof(*data) + data->hl + dhvlen;
+ const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
+ const char *hmac_name, *kpp_name;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ hmac_name = nvme_auth_hmac_name(data->hashid);
+ if (!hmac_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid HASH ID %d\n",
+ chap->qid, data->hashid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ if (chap->hash_id == data->hashid && chap->shash_tfm &&
+ !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
+ crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing hash %s\n",
+ chap->qid, hmac_name);
+ goto select_kpp;
+ }
+
+ /* Reset if hash cannot be reused */
+ if (chap->shash_tfm) {
+ crypto_free_shash(chap->shash_tfm);
+ chap->hash_id = 0;
+ chap->hash_len = 0;
+ }
+ chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(chap->shash_tfm)) {
+ dev_warn(ctrl->device,
+ "qid %d: failed to allocate hash %s, error %ld\n",
+ chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %d\n",
+ chap->qid, data->hl);
+ crypto_free_shash(chap->shash_tfm);
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Reset host response if the hash had been changed */
+ if (chap->hash_id != data->hashid) {
+ kfree(chap->host_response);
+ chap->host_response = NULL;
+ }
+
+ chap->hash_id = data->hashid;
+ chap->hash_len = data->hl;
+ dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
+ chap->qid, hmac_name);
+
+select_kpp:
+ kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
+ if (!kpp_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH group id %d\n",
+ chap->qid, data->dhgid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ /* Leave previous dh_tfm intact */
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Clear host and controller key to avoid accidental reuse */
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+
+ if (chap->dhgroup_id == data->dhgid &&
+ (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing DH group %s\n",
+ chap->qid, gid_name);
+ goto skip_kpp;
+ }
+
+ /* Reset dh_tfm if it can't be reused */
+ if (chap->dh_tfm) {
+ crypto_free_kpp(chap->dh_tfm);
+ chap->dh_tfm = NULL;
+ }
+
+ if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
+ if (dhvlen == 0) {
+ dev_warn(ctrl->device,
+ "qid %d: empty DH value\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
+ if (IS_ERR(chap->dh_tfm)) {
+ int ret = PTR_ERR(chap->dh_tfm);
+
+ dev_warn(ctrl->device,
+ "qid %d: error %d initializing DH group %s\n",
+ chap->qid, ret, gid_name);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ chap->dh_tfm = NULL;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+ dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
+ chap->qid, gid_name);
+ } else if (dhvlen != 0) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH value for NULL DH\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+ chap->dhgroup_id = data->dhgid;
+
+skip_kpp:
+ chap->s1 = le32_to_cpu(data->seqnum);
+ memcpy(chap->c1, data->cval, chap->hash_len);
+ if (dhvlen) {
+ chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
+ if (!chap->ctrl_key) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+ chap->ctrl_key_len = dhvlen;
+ memcpy(chap->ctrl_key, data->cval + chap->hash_len,
+ dhvlen);
+ dev_dbg(ctrl->device, "ctrl public key %*ph\n",
+ (int)chap->ctrl_key_len, chap->ctrl_key);
+ }
+
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_reply_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ size += 2 * chap->hash_len;
+
+ if (chap->host_key_len)
+ size += chap->host_key_len;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->hl = chap->hash_len;
+ data->dhvlen = cpu_to_le16(chap->host_key_len);
+ memcpy(data->rval, chap->response, chap->hash_len);
+ if (ctrl->ctrl_key) {
+ get_random_bytes(chap->c2, chap->hash_len);
+ data->cvalid = 1;
+ chap->s2 = nvme_auth_get_seqnum();
+ memcpy(data->rval + chap->hash_len, chap->c2,
+ chap->hash_len);
+ dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, chap->c2);
+ } else {
+ memset(chap->c2, 0, chap->hash_len);
+ chap->s2 = 0;
+ }
+ data->seqnum = cpu_to_le32(chap->s2);
+ if (chap->host_key_len) {
+ dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
+ __func__, chap->qid,
+ chap->host_key_len, chap->host_key);
+ memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
+ chap->host_key_len);
+ }
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success1_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ if (ctrl->ctrl_key)
+ size += chap->hash_len;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ if (data->hl != chap->hash_len) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %u\n",
+ chap->qid, data->hl);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: authenticated with hash %s dhgroup %s\n",
+ nvme_auth_hmac_name(chap->hash_id),
+ nvme_auth_dhgroup_name(chap->dhgroup_id));
+
+ if (!data->rvalid)
+ return 0;
+
+ /* Validate controller response */
+ if (memcmp(chap->response, data->rval, data->hl)) {
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, data->rval);
+ dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len,
+ chap->response);
+ dev_warn(ctrl->device,
+ "qid %d: controller authentication failed\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: controller authenticated\n");
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success2_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ data->t_id = cpu_to_le16(chap->transaction);
+
+ return size;
+}
+
+static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_failure_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = chap->status;
+
+ return size;
+}
+
+static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 buf[4], *challenge = chap->c1;
+ int ret;
+
+ dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s1, chap->transaction);
+
+ if (!chap->host_response) {
+ chap->host_response = nvme_auth_transform_key(ctrl->host_key,
+ ctrl->opts->host->nqn);
+ if (IS_ERR(chap->host_response)) {
+ ret = PTR_ERR(chap->host_response);
+ chap->host_response = NULL;
+ return ret;
+ }
+ } else {
+ dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
+ __func__, chap->qid);
+ }
+
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ chap->host_response, ctrl->host_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c1, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, sizeof(buf));
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c1)
+ kfree(challenge);
+ return ret;
+}
+
+static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 *ctrl_response;
+ u8 buf[4], *challenge = chap->c2;
+ int ret;
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->opts->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ return ret;
+ }
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ ctrl_response, ctrl->ctrl_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c2, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s2, chap->transaction);
+ dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, challenge);
+ dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
+ __func__, chap->qid, ctrl->opts->subsysnqn);
+ dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
+ __func__, chap->qid, ctrl->opts->host->nqn);
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c2)
+ kfree(challenge);
+ kfree(ctrl_response);
+ return ret;
+}
+
+static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ int ret;
+
+ if (chap->host_key && chap->host_key_len) {
+ dev_dbg(ctrl->device,
+ "qid %d: reusing host key\n", chap->qid);
+ goto gen_sesskey;
+ }
+ ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
+ if (ret < 0) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+ chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
+
+ chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
+ if (!chap->host_key) {
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(chap->dh_tfm,
+ chap->host_key, chap->host_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate public key, error %d\n", ret);
+ kfree(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+gen_sesskey:
+ chap->sess_key_len = chap->host_key_len;
+ chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
+ if (!chap->sess_key) {
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+
+ ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
+ chap->ctrl_key, chap->ctrl_key_len,
+ chap->sess_key, chap->sess_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate shared secret, error %d\n", ret);
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+ dev_dbg(ctrl->device, "shared secret %*ph\n",
+ (int)chap->sess_key_len, chap->sess_key);
+ return 0;
+}
+
+static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
+{
+ kfree_sensitive(chap->host_response);
+ chap->host_response = NULL;
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = 0;
+ chap->error = 0;
+ chap->s1 = 0;
+ chap->s2 = 0;
+ chap->transaction = 0;
+ memset(chap->c1, 0, sizeof(chap->c1));
+ memset(chap->c2, 0, sizeof(chap->c2));
+}
+
+static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap)
+{
+ __nvme_auth_reset(chap);
+ if (chap->shash_tfm)
+ crypto_free_shash(chap->shash_tfm);
+ if (chap->dh_tfm)
+ crypto_free_kpp(chap->dh_tfm);
+ kfree_sensitive(chap->ctrl_key);
+ kfree_sensitive(chap->host_key);
+ kfree_sensitive(chap->sess_key);
+ kfree_sensitive(chap->host_response);
+ kfree(chap->buf);
+ kfree(chap);
+}
+
+static void __nvme_auth_work(struct work_struct *work)
+{
+ struct nvme_dhchap_queue_context *chap =
+ container_of(work, struct nvme_dhchap_queue_context, auth_work);
+ struct nvme_ctrl *ctrl = chap->ctrl;
+ size_t tl;
+ int ret = 0;
+
+ chap->transaction = ctrl->transaction++;
+
+ /* DH-HMAC-CHAP Step 1: send negotiate */
+ dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ return;
+ }
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ return;
+ }
+
+ /* DH-HMAC-CHAP Step 2: receive challenge */
+ dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, chap->buf_size);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive challenge, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
+ if (ret) {
+ chap->status = ret;
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ return;
+ }
+
+ ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
+ if (ret) {
+ /* Invalid challenge parameters */
+ chap->error = ret;
+ goto fail2;
+ }
+
+ if (chap->ctrl_key_len) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d DH exponential\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_exponential(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ dev_dbg(ctrl->device, "%s: qid %d host response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 3: send reply */
+ dev_dbg(ctrl->device, "%s: qid %d send reply\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 4: receive success1 */
+ dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, chap->buf_size);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive success1, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid,
+ chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
+ if (ret) {
+ chap->status = ret;
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ return;
+ }
+
+ if (ctrl->ctrl_key) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d controller response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ ret = nvme_auth_process_dhchap_success1(ctrl, chap);
+ if (ret) {
+ /* Controller authentication failed */
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ goto fail2;
+ }
+
+ if (ctrl->ctrl_key) {
+ /* DH-HMAC-CHAP Step 5: send success2 */
+ dev_dbg(ctrl->device, "%s: qid %d send success2\n",
+ __func__, chap->qid);
+ tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret)
+ chap->error = ret;
+ }
+ if (!ret) {
+ chap->error = 0;
+ return;
+ }
+
+fail2:
+ dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
+ __func__, chap->qid, chap->status);
+ tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ /*
+ * only update error if send failure2 failed and no other
+ * error had been set during authentication.
+ */
+ if (ret && !chap->error)
+ chap->error = ret;
+}
+
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ if (!ctrl->host_key) {
+ dev_warn(ctrl->device, "qid %d: no key\n", qid);
+ return -ENOKEY;
+ }
+
+ if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
+ dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
+ return -ENOKEY;
+ }
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ /* Check if the context is already queued */
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ WARN_ON(!chap->buf);
+ if (chap->qid == qid) {
+ dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ __nvme_auth_reset(chap);
+ queue_work(nvme_wq, &chap->auth_work);
+ return 0;
+ }
+ }
+ chap = kzalloc(sizeof(*chap), GFP_KERNEL);
+ if (!chap) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ return -ENOMEM;
+ }
+ chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
+ chap->ctrl = ctrl;
+
+ /*
+ * Allocate a large enough buffer for the entire negotiation:
+ * 4k should be enough to ffdhe8192.
+ */
+ chap->buf_size = 4096;
+ chap->buf = kzalloc(chap->buf_size, GFP_KERNEL);
+ if (!chap->buf) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ kfree(chap);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&chap->auth_work, __nvme_auth_work);
+ list_add(&chap->entry, &ctrl->dhchap_auth_list);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ queue_work(nvme_wq, &chap->auth_work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
+
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+ int ret;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ if (chap->qid != qid)
+ continue;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ ret = chap->error;
+ return ret;
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_wait);
+
+void nvme_auth_reset(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ __nvme_auth_reset(chap);
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_reset);
+
+static void nvme_dhchap_auth_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, dhchap_auth_work);
+ int ret, q;
+
+ /* Authenticate admin queue first */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: error %d setting up authentication\n", ret);
+ return;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ return;
+ }
+
+ for (q = 1; q < ctrl->queue_count; q++) {
+ ret = nvme_auth_negotiate(ctrl, q);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: error %d setting up authentication\n",
+ q, ret);
+ break;
+ }
+ }
+
+ /*
+ * Failure is a soft-state; credentials remain valid until
+ * the controller terminates the connection.
+ */
+}
+
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
+{
+ INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
+ INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
+ mutex_init(&ctrl->dhchap_auth_mutex);
+ if (!ctrl->opts)
+ return;
+ nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
+ nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
+
+void nvme_auth_stop(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+ cancel_work_sync(&ctrl->dhchap_auth_work);
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
+ cancel_work_sync(&chap->auth_work);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_stop);
+
+void nvme_auth_free(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
+ list_del_init(&chap->entry);
+ flush_work(&chap->auth_work);
+ __nvme_auth_free(chap);
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free);
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 4910543f00ff..e958d5015585 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -6,7 +6,6 @@
#include "nvme.h"
-#ifdef CONFIG_NVME_VERBOSE_ERRORS
static const char * const nvme_ops[] = {
[nvme_cmd_flush] = "Flush",
[nvme_cmd_write] = "Write",
@@ -178,6 +177,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
return nvme_ops[opcode];
return "Unknown";
}
+EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
{
@@ -185,4 +185,3 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
return nvme_admin_ops[opcode];
return "Unknown";
}
-#endif /* CONFIG_NVME_VERBOSE_ERRORS */
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2533b88e66d5..af367b22871b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -24,12 +24,22 @@
#include "nvme.h"
#include "fabrics.h"
+#include <linux/nvme-auth.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#define NVME_MINORS (1U << MINORBITS)
+struct nvme_ns_info {
+ struct nvme_ns_ids ids;
+ u32 nsid;
+ __le32 anagrpid;
+ bool is_shared;
+ bool is_readonly;
+ bool is_ready;
+};
+
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
@@ -330,6 +340,7 @@ enum nvme_disposition {
COMPLETE,
RETRY,
FAILOVER,
+ AUTHENTICATE,
};
static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
@@ -337,6 +348,9 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
if (likely(nvme_req(req)->status == 0))
return COMPLETE;
+ if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
+ return AUTHENTICATE;
+
if (blk_noretry_request(req) ||
(nvme_req(req)->status & NVME_SC_DNR) ||
nvme_req(req)->retries >= nvme_max_retries)
@@ -375,11 +389,13 @@ static inline void nvme_end_req(struct request *req)
void nvme_complete_rq(struct request *req)
{
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
- if (nvme_req(req)->ctrl->kas)
- nvme_req(req)->ctrl->comp_seen = true;
+ if (ctrl->kas)
+ ctrl->comp_seen = true;
switch (nvme_decide_disposition(req)) {
case COMPLETE:
@@ -391,6 +407,14 @@ void nvme_complete_rq(struct request *req)
case FAILOVER:
nvme_failover_req(req);
return;
+ case AUTHENTICATE:
+#ifdef CONFIG_NVME_AUTH
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+ nvme_retry_req(req);
+#else
+ nvme_end_req(req);
+#endif
+ return;
}
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -702,7 +726,9 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
switch (ctrl->state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
- req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
+ (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
return true;
break;
default:
@@ -990,8 +1016,7 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags)
+ int qid, int at_head, blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
@@ -1000,15 +1025,12 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
else
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
- qid ? qid - 1 : 0);
+ qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
nvme_init_request(req, cmd);
- if (timeout)
- req->timeout = timeout;
-
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
if (ret)
@@ -1028,7 +1050,7 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1329,8 +1351,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
}
}
-static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
struct nvme_command c = { };
bool csi_seen = false;
@@ -1343,7 +1365,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
return 0;
c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.nsid = cpu_to_le32(info->nsid);
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
@@ -1355,7 +1377,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (status) {
dev_warn(ctrl->device,
"Identify Descriptors failed (nsid=%u, status=0x%x)\n",
- nsid, status);
+ info->nsid, status);
goto free_data;
}
@@ -1365,7 +1387,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (cur->nidl == 0)
break;
- len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
+ len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
if (len < 0)
break;
@@ -1374,7 +1396,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (nvme_multi_css(ctrl) && !csi_seen) {
dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
- nsid);
+ info->nsid);
status = -EINVAL;
}
@@ -1384,7 +1406,7 @@ free_data:
}
static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids, struct nvme_id_ns **id)
+ struct nvme_id_ns **id)
{
struct nvme_command c = { };
int error;
@@ -1407,51 +1429,66 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
error = NVME_SC_INVALID_NS | NVME_SC_DNR;
if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id;
+ return 0;
+out_free_id:
+ kfree(*id);
+ return error;
+}
+static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_ns_ids *ids = &info->ids;
+ struct nvme_id_ns *id;
+ int ret;
+
+ ret = nvme_identify_ns(ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = true;
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
dev_info(ctrl->device,
"Ignoring bogus Namespace Identifiers\n");
} else {
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0) &&
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+ memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
}
-
+ kfree(id);
return 0;
-
-out_free_id:
- kfree(*id);
- return error;
}
-static int nvme_identify_ns_cs_indep(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_id_ns_cs_indep **id)
+static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
+ struct nvme_id_ns_cs_indep *id;
struct nvme_command c = {
.identify.opcode = nvme_admin_identify,
- .identify.nsid = cpu_to_le32(nsid),
+ .identify.nsid = cpu_to_le32(info->nsid),
.identify.cns = NVME_ID_CNS_NS_CS_INDEP,
};
int ret;
- *id = kmalloc(sizeof(**id), GFP_KERNEL);
- if (!*id)
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
return -ENOMEM;
- ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
- if (ret) {
- dev_warn(ctrl->device,
- "Identify namespace (CS independent) failed (%d)\n",
- ret);
- kfree(*id);
- return ret;
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (!ret) {
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = id->nstat & NVME_NSTAT_NRDY;
}
-
- return 0;
+ kfree(id);
+ return ret;
}
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
@@ -1466,7 +1503,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, NVME_QID_ANY, 0, 0);
+ buffer, buflen, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1875,6 +1912,11 @@ static void nvme_update_disk_info(struct gendisk *disk,
ns->ctrl->max_zeroes_sectors);
}
+static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
+}
+
static inline bool nvme_first_scan(struct gendisk *disk)
{
/* nvme_alloc_ns() scans the disk prior to adding it */
@@ -1912,12 +1954,44 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
blk_queue_chunk_sectors(ns->queue, iob);
}
-static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_update_ns_info_generic(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
{
- unsigned lbaf = nvme_lbaf_index(id->flbas);
+ blk_mq_freeze_queue(ns->disk->queue);
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ blk_mq_unfreeze_queue(ns->disk->queue);
+
+ if (nvme_ns_head_multipath(ns->head)) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+
+ /* Hide the block-interface for these devices */
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+
+ return 0;
+}
+
+static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
+{
+ struct nvme_id_ns *id;
+ unsigned lbaf;
int ret;
+ ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+
blk_mq_freeze_queue(ns->disk->queue);
+ lbaf = nvme_lbaf_index(id->flbas);
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
@@ -1927,36 +2001,35 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf);
- if (ret)
- goto out_unfreeze;
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
}
- set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns);
if (ret && !nvme_first_scan(ns->disk))
- return ret;
+ goto out;
}
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
- set_disk_ro(ns->head->disk,
- (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
disk_update_readahead(ns->head->disk);
blk_mq_unfreeze_queue(ns->head->disk->queue);
}
- return 0;
-out_unfreeze:
+ ret = 0;
+out:
/*
* If probing fails due an unsupported feature, hide the block device,
* but still allow other access.
@@ -1966,10 +2039,31 @@ out_unfreeze:
set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
- blk_mq_unfreeze_queue(ns->disk->queue);
+ kfree(id);
return ret;
}
+static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ switch (info->ids.csi) {
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
+ info->nsid);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+ return nvme_update_ns_info_block(ns, info);
+ case NVME_CSI_NVM:
+ return nvme_update_ns_info_block(ns, info);
+ default:
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported (csi %u)\n",
+ info->nsid, info->ids.csi);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+}
+
static char nvme_pr_type(enum pr_type type)
{
switch (type) {
@@ -2103,7 +2197,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
cmd.common.cdw11 = cpu_to_le32(len);
- return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
+ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
NVME_QID_ANY, 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
@@ -2123,6 +2217,7 @@ static int nvme_report_zones(struct gendisk *disk, sector_t sector,
static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
@@ -3613,6 +3708,108 @@ static ssize_t dctype_show(struct device *dev,
}
static DEVICE_ATTR_RO(dctype);
+#ifdef CONFIG_NVME_AUTH
+static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_ctrl_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_ctrl_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
+#endif
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -3636,6 +3833,10 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_kato.attr,
&dev_attr_cntrltype.attr,
&dev_attr_dctype.attr,
+#ifdef CONFIG_NVME_AUTH
+ &dev_attr_dhchap_secret.attr,
+ &dev_attr_dhchap_ctrl_secret.attr,
+#endif
NULL
};
@@ -3659,6 +3860,12 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
return 0;
+#ifdef CONFIG_NVME_AUTH
+ if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
+ return 0;
+#endif
return a->mode;
}
@@ -3786,7 +3993,7 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
}
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_ns_ids *ids, bool is_shared)
+ struct nvme_ns_info *info)
{
struct nvme_ns_head *head;
size_t size = sizeof(*head);
@@ -3808,9 +4015,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
if (ret)
goto out_ida_remove;
head->subsys = ctrl->subsys;
- head->ns_id = nsid;
- head->ids = *ids;
- head->shared = is_shared;
+ head->ns_id = info->nsid;
+ head->ids = info->ids;
+ head->shared = info->is_shared;
kref_init(&head->ref);
if (head->ids.csi) {
@@ -3867,54 +4074,54 @@ static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
return ret;
}
-static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
- struct nvme_ns_ids *ids, bool is_shared)
+static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
{
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_ns_head *head = NULL;
int ret;
- ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
+ ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
- "globally duplicate IDs for nsid %d\n", nsid);
+ "globally duplicate IDs for nsid %d\n", info->nsid);
nvme_print_device_info(ctrl);
return ret;
}
mutex_lock(&ctrl->subsys->lock);
- head = nvme_find_ns_head(ctrl, nsid);
+ head = nvme_find_ns_head(ctrl, info->nsid);
if (!head) {
- ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
+ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
"duplicate IDs in subsystem for nsid %d\n",
- nsid);
+ info->nsid);
goto out_unlock;
}
- head = nvme_alloc_ns_head(ctrl, nsid, ids, is_shared);
+ head = nvme_alloc_ns_head(ctrl, info);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
}
} else {
ret = -EINVAL;
- if (!is_shared || !head->shared) {
+ if (!info->is_shared || !head->shared) {
dev_err(ctrl->device,
- "Duplicate unshared namespace %d\n", nsid);
+ "Duplicate unshared namespace %d\n",
+ info->nsid);
goto out_put_ns_head;
}
- if (!nvme_ns_ids_equal(&head->ids, ids)) {
+ if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
- nsid);
+ info->nsid);
goto out_put_ns_head;
}
if (!multipath && !list_empty(&head->list)) {
dev_warn(ctrl->device,
"Found shared namespace %d, but multipathing not supported.\n",
- nsid);
+ info->nsid);
dev_warn_once(ctrl->device,
"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
}
@@ -3968,20 +4175,15 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
list_add(&ns->list, &ns->ctrl->namespaces);
}
-static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
{
struct nvme_ns *ns;
struct gendisk *disk;
- struct nvme_id_ns *id;
int node = ctrl->numa_node;
- if (nvme_identify_ns(ctrl, nsid, ids, &id))
- return;
-
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
- goto out_free_id;
+ return;
disk = blk_mq_alloc_disk(ctrl->tagset, ns);
if (IS_ERR(disk))
@@ -3996,13 +4198,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
- if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
+ if (ctrl->ops->supports_pci_p2pdma &&
+ ctrl->ops->supports_pci_p2pdma(ctrl))
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
ns->ctrl = ctrl;
kref_init(&ns->kref);
- if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
+ if (nvme_init_ns_head(ns, info))
goto out_cleanup_disk;
/*
@@ -4028,7 +4231,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
ns->head->instance);
}
- if (nvme_update_ns_info(ns, id))
+ if (nvme_update_ns_info(ns, info))
goto out_unlink_ns;
down_write(&ctrl->namespaces_rwsem);
@@ -4042,9 +4245,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!nvme_ns_head_multipath(ns->head))
nvme_add_ns_cdev(ns);
- nvme_mpath_add_disk(ns, id);
+ nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
- kfree(id);
return;
@@ -4064,8 +4266,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
put_disk(disk);
out_free_ns:
kfree(ns);
- out_free_id:
- kfree(id);
}
static void nvme_ns_remove(struct nvme_ns *ns)
@@ -4123,29 +4323,21 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
}
}
-static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
+static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
{
- struct nvme_id_ns *id;
int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
if (test_bit(NVME_NS_DEAD, &ns->flags))
goto out;
- ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
- if (ret)
- goto out;
-
ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
- if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
+ if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
dev_err(ns->ctrl->device,
"identifiers changed for nsid %d\n", ns->head->ns_id);
- goto out_free_id;
+ goto out;
}
- ret = nvme_update_ns_info(ns, id);
-
-out_free_id:
- kfree(id);
+ ret = nvme_update_ns_info(ns, info);
out:
/*
* Only remove the namespace if we got a fatal error back from the
@@ -4157,59 +4349,47 @@ out:
nvme_ns_remove(ns);
}
-static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
- struct nvme_ns_ids ids = { };
- struct nvme_id_ns_cs_indep *id;
+ struct nvme_ns_info info = { .nsid = nsid };
struct nvme_ns *ns;
- bool ready = true;
- if (nvme_identify_ns_descs(ctrl, nsid, &ids))
+ if (nvme_identify_ns_descs(ctrl, &info))
return;
+ if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
+ dev_warn(ctrl->device,
+ "command set not reported for nsid: %d\n", nsid);
+ return;
+ }
+
/*
- * Check if the namespace is ready. If not ignore it, we will get an
- * AEN once it becomes ready and restart the scan.
+ * If available try to use the Command Set Idependent Identify Namespace
+ * data structure to find all the generic information that is needed to
+ * set up a namespace. If not fall back to the legacy version.
*/
- if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) &&
- !nvme_identify_ns_cs_indep(ctrl, nsid, &id)) {
- ready = id->nstat & NVME_NSTAT_NRDY;
- kfree(id);
+ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
+ (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) {
+ if (nvme_ns_info_from_id_cs_indep(ctrl, &info))
+ return;
+ } else {
+ if (nvme_ns_info_from_identify(ctrl, &info))
+ return;
}
- if (!ready)
+ /*
+ * Ignore the namespace if it is not ready. We will get an AEN once it
+ * becomes ready and restart the scan.
+ */
+ if (!info.is_ready)
return;
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- nvme_validate_ns(ns, &ids);
+ nvme_validate_ns(ns, &info);
nvme_put_ns(ns);
- return;
- }
-
- switch (ids.csi) {
- case NVME_CSI_NVM:
- nvme_alloc_ns(ctrl, nsid, &ids);
- break;
- case NVME_CSI_ZNS:
- if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
- dev_warn(ctrl->device,
- "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
- nsid);
- break;
- }
- if (!nvme_multi_css(ctrl)) {
- dev_warn(ctrl->device,
- "command set not reported for nsid: %d\n",
- nsid);
- break;
- }
- nvme_alloc_ns(ctrl, nsid, &ids);
- break;
- default:
- dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
- ids.csi, nsid);
- break;
+ } else {
+ nvme_alloc_ns(ctrl, &info);
}
}
@@ -4265,7 +4445,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
if (!nsid) /* end of the list? */
goto out;
- nvme_validate_or_alloc_ns(ctrl, nsid);
+ nvme_scan_ns(ctrl, nsid);
while (++prev < nsid)
nvme_ns_remove_by_nsid(ctrl, prev);
}
@@ -4288,7 +4468,7 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
kfree(id);
for (i = 1; i <= nn; i++)
- nvme_validate_or_alloc_ns(ctrl, i);
+ nvme_scan_ns(ctrl, i);
nvme_remove_invalid_namespaces(ctrl, nn);
}
@@ -4525,9 +4705,19 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_get_fw_slot_info(ctrl);
}
+static u32 nvme_aer_type(u32 result)
+{
+ return result & 0x7;
+}
+
+static u32 nvme_aer_subtype(u32 result)
+{
+ return (result & 0xff00) >> 8;
+}
+
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
- u32 aer_notice_type = (result & 0xff00) >> 8;
+ u32 aer_notice_type = nvme_aer_subtype(result);
trace_nvme_async_event(ctrl, aer_notice_type);
@@ -4542,8 +4732,10 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
* recovery actions from interfering with the controller's
* firmware activation.
*/
- if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+ nvme_auth_stop(ctrl);
queue_work(nvme_wq, &ctrl->fw_act_work);
+ }
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
@@ -4560,11 +4752,19 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
}
}
+static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+{
+ trace_nvme_async_event(ctrl, NVME_AER_ERROR);
+ dev_warn(ctrl->device, "resetting controller due to AER\n");
+ nvme_reset_ctrl(ctrl);
+}
+
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
- u32 aer_type = result & 0x07;
+ u32 aer_type = nvme_aer_type(result);
+ u32 aer_subtype = nvme_aer_subtype(result);
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
@@ -4574,6 +4774,15 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
+ /*
+ * For a persistent internal error, don't run async_event_work
+ * to submit a new AER. The controller reset will do it.
+ */
+ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
+ nvme_handle_aer_persistent_error(ctrl);
+ return;
+ }
+ fallthrough;
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
@@ -4590,6 +4799,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
@@ -4649,6 +4859,8 @@ static void nvme_free_ctrl(struct device *dev)
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
+ nvme_auth_stop(ctrl);
+ nvme_auth_free(ctrl);
__free_page(ctrl->discard_page);
if (subsys) {
@@ -4739,6 +4951,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
nvme_mpath_init_ctrl(ctrl);
+ nvme_auth_init_ctrl(ctrl);
return 0;
out_free_name:
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ee79a6d639b4..10cc4a814602 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -152,7 +152,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
@@ -198,7 +198,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
@@ -243,7 +243,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
NVME_QID_ANY, 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
@@ -270,6 +270,12 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
{
int err_sctype = errval & ~NVME_SC_DNR;
+ if (errval < 0) {
+ dev_err(ctrl->device,
+ "Connect command failed, errno: %d\n", errval);
+ return;
+ }
+
switch (err_sctype) {
case NVME_SC_CONNECT_INVALID_PARAM:
if (offset >> 16) {
@@ -331,6 +337,10 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
dev_err(ctrl->device,
"Connect command failed: host path error\n");
break;
+ case NVME_SC_AUTH_REQUIRED:
+ dev_err(ctrl->device,
+ "Connect command failed: authentication required\n");
+ break;
default:
dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n",
@@ -365,6 +375,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
union nvme_result res;
struct nvmf_connect_data *data;
int ret;
+ u32 result;
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
@@ -389,7 +400,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), 0, NVME_QID_ANY, 1,
+ data, sizeof(*data), NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
@@ -397,8 +408,25 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
goto out_free_data;
}
- ctrl->cntlid = le16_to_cpu(res.u16);
-
+ result = le32_to_cpu(res.u32);
+ ctrl->cntlid = result & 0xFFFF;
+ if ((result >> 16) & 0x3) {
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication setup failed\n");
+ ret = NVME_SC_AUTH_REQUIRED;
+ goto out_free_data;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ else
+ dev_info(ctrl->device,
+ "qid 0: authenticated\n");
+ }
out_free_data:
kfree(data);
return ret;
@@ -431,6 +459,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
struct nvmf_connect_data *data;
union nvme_result res;
int ret;
+ u32 result;
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
@@ -450,12 +479,27 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), 0, qid, 1,
+ data, sizeof(*data), qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
}
+ result = le32_to_cpu(res.u32);
+ if ((result >> 16) & 2) {
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, qid);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: authentication setup failed\n", qid);
+ ret = NVME_SC_AUTH_REQUIRED;
+ } else {
+ ret = nvme_auth_wait(ctrl, qid);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid %u: authentication failed\n", qid);
+ }
+ }
kfree(data);
return ret;
}
@@ -548,6 +592,8 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" },
+ { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
+ { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
{ NVMF_OPT_ERR, NULL }
};
@@ -829,6 +875,34 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
case NVMF_OPT_DISCOVERY:
opts->discovery_nqn = true;
break;
+ case NVMF_OPT_DHCHAP_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = p;
+ break;
+ case NVMF_OPT_DHCHAP_CTRL_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = p;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -947,6 +1021,8 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->subsysnqn);
kfree(opts->host_traddr);
kfree(opts->host_iface);
+ kfree(opts->dhchap_secret);
+ kfree(opts->dhchap_ctrl_secret);
kfree(opts);
}
EXPORT_SYMBOL_GPL(nvmf_free_options);
@@ -956,7 +1032,8 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
- NVMF_OPT_FAIL_FAST_TMO)
+ NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
+ NVMF_OPT_DHCHAP_CTRL_SECRET)
static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf)
@@ -1159,7 +1236,7 @@ static int __init nvmf_init(void)
nvmf_device =
device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
if (IS_ERR(nvmf_device)) {
- pr_err("couldn't create nvme-fabris device!\n");
+ pr_err("couldn't create nvme-fabrics device!\n");
ret = PTR_ERR(nvmf_device);
goto out_destroy_class;
}
@@ -1192,7 +1269,14 @@ static void __exit nvmf_exit(void)
BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46d6e194ac2b..a6e22116e139 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,8 @@ enum {
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
NVMF_OPT_DISCOVERY = 1 << 22,
+ NVMF_OPT_DHCHAP_SECRET = 1 << 23,
+ NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24,
};
/**
@@ -97,6 +99,9 @@ enum {
* @max_reconnects: maximum number of allowed reconnect attempts before removing
* the controller, (-1) means reconnect forever, zero means remove
* immediately;
+ * @dhchap_secret: DH-HMAC-CHAP secret
+ * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
+ * authentication
* @disable_sqflow: disable controller sq flow control
* @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP)
@@ -121,6 +126,8 @@ struct nvmf_ctrl_options {
unsigned int kato;
struct nvmf_host *host;
int max_reconnects;
+ char *dhchap_secret;
+ char *dhchap_ctrl_secret;
bool disable_sqflow;
bool hdr_digest;
bool data_digest;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9987797620b6..127abaf9ba5d 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2533,6 +2533,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ if (start_queues)
+ nvme_start_admin_queue(&ctrl->ctrl);
}
static void
@@ -3878,6 +3880,7 @@ static int fc_parse_cgrpid(const char *buf, u64 *id)
static ssize_t fc_appid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
+ size_t orig_count = count;
u64 cgrp_id;
int appid_len = 0;
int cgrpid_len = 0;
@@ -3902,7 +3905,7 @@ static ssize_t fc_appid_store(struct device *dev,
ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
if (ret < 0)
return ret;
- return count;
+ return orig_count;
}
static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
#endif /* CONFIG_BLK_CGROUP_FC_APPID */
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index f26640ccb955..6ef497c75a16 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -346,7 +346,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
* different queue via blk_steal_bios(), so we need to use the bio_split
* pool from the original queue to allocate the bvecs from.
*/
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
@@ -408,6 +408,7 @@ const struct block_device_operations nvme_ns_head_ops = {
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ns_head_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.getgeo = nvme_getgeo,
.report_zones = nvme_ns_head_report_zones,
.pr_ops = &nvme_pr_ops,
@@ -800,16 +801,16 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
return -ENXIO; /* just break out of the loop */
}
-void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
if (nvme_ctrl_use_ana(ns->ctrl)) {
struct nvme_ana_group_desc desc = {
- .grpid = id->anagrpid,
+ .grpid = anagrpid,
.state = 0,
};
mutex_lock(&ns->ctrl->ana_lock);
- ns->ana_grpid = le32_to_cpu(id->anagrpid);
+ ns->ana_grpid = le32_to_cpu(anagrpid);
nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
mutex_unlock(&ns->ctrl->ana_lock);
if (desc.state) {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7e0a925bf3be..1bdf714dcd9e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -140,7 +140,7 @@ enum nvme_quirks {
NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
/*
- * The controller requires the command_id value be be limited, so skip
+ * The controller requires the command_id value be limited, so skip
* encoding the generation sequence number.
*/
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
@@ -328,6 +328,15 @@ struct nvme_ctrl {
struct work_struct ana_work;
#endif
+#ifdef CONFIG_NVME_AUTH
+ struct work_struct dhchap_auth_work;
+ struct list_head dhchap_auth_list;
+ struct mutex dhchap_auth_mutex;
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u16 transaction;
+#endif
+
/* Power saving configuration */
u64 ps_max_latency_us;
bool apst_enabled;
@@ -495,7 +504,6 @@ struct nvme_ctrl_ops {
unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
-#define NVME_F_PCI_P2PDMA (1 << 2)
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -505,6 +513,7 @@ struct nvme_ctrl_ops {
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
void (*print_device_info)(struct nvme_ctrl *ctrl);
+ bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
};
/*
@@ -781,7 +790,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
+ int qid, int at_head,
blk_mq_req_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
@@ -837,7 +846,7 @@ void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
-void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
@@ -879,8 +888,7 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
{
return 0;
}
-static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
- struct nvme_id_ns *id)
+static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
}
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -992,6 +1000,27 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
return ctrl->sgls & ((1 << 0) | (1 << 1));
}
+#ifdef CONFIG_NVME_AUTH
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_auth_stop(struct nvme_ctrl *ctrl);
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
+void nvme_auth_reset(struct nvme_ctrl *ctrl);
+void nvme_auth_free(struct nvme_ctrl *ctrl);
+#else
+static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
+static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
+static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ return -EPROTONOSUPPORT;
+}
+static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ return NVME_SC_AUTH_REQUIRED;
+}
+static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
+#endif
+
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
int nvme_execute_passthru_rq(struct request *rq);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 7e7d4802ac6b..3a1c37f32f30 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -230,11 +230,10 @@ struct nvme_iod {
bool use_sgl;
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
- int nents; /* Used in scatterlist */
dma_addr_t first_dma;
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t meta_dma;
- struct scatterlist *sg;
+ struct sg_table sgt;
};
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
@@ -524,7 +523,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
static void **nvme_pci_iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
+ return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
}
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
@@ -576,17 +575,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
}
}
-static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
-}
-
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -597,9 +585,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
return;
}
- WARN_ON_ONCE(!iod->nents);
+ WARN_ON_ONCE(!iod->sgt.nents);
+
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- nvme_unmap_sg(dev, req);
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
iod->first_dma);
@@ -607,7 +596,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
nvme_free_sgls(dev, req);
else
nvme_free_prps(dev, req);
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -630,7 +619,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
int length = blk_rq_payload_bytes(req);
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
@@ -670,7 +659,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) {
- iod->first_dma = dma_addr;
iod->npages = -1;
return BLK_STS_RESOURCE;
}
@@ -703,16 +691,16 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_len = sg_dma_len(sg);
}
done:
- cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
return BLK_STS_OK;
free_prps:
nvme_free_prps(dev, req);
return BLK_STS_RESOURCE;
bad_sgl:
- WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
+ WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
"Invalid SGL for payload:%d nents:%d\n",
- blk_rq_payload_bytes(req), iod->nents);
+ blk_rq_payload_bytes(req), iod->sgt.nents);
return BLK_STS_IOERR;
}
@@ -738,12 +726,13 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
}
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmd, int entries)
+ struct request *req, struct nvme_rw_command *cmd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
+ unsigned int entries = iod->sgt.nents;
dma_addr_t sgl_dma;
int i = 0;
@@ -841,7 +830,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret = BLK_STS_RESOURCE;
- int nr_mapped;
+ int rc;
if (blk_rq_nr_phys_segments(req) == 1) {
struct bio_vec bv = req_bvec(req);
@@ -859,26 +848,25 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
}
iod->dma_len = 0;
- iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
+ iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sgt.sgl)
return BLK_STS_RESOURCE;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
- if (!iod->nents)
+ sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
+ iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
+ if (!iod->sgt.orig_nents)
goto out_free_sg;
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
- iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
- else
- nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req), DMA_ATTR_NO_WARN);
- if (!nr_mapped)
+ rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
+ DMA_ATTR_NO_WARN);
+ if (rc) {
+ if (rc == -EREMOTEIO)
+ ret = BLK_STS_TARGET;
goto out_free_sg;
+ }
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
- ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
if (ret != BLK_STS_OK)
@@ -886,9 +874,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
return BLK_STS_OK;
out_unmap_sg:
- nvme_unmap_sg(dev, req);
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
out_free_sg:
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
return ret;
}
@@ -912,7 +900,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
iod->aborted = 0;
iod->npages = -1;
- iod->nents = 0;
+ iod->sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
@@ -1435,8 +1423,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
- "I/O %d QID %d timeout, aborting\n",
- req->tag, nvmeq->qid);
+ "I/O %d (%s) QID %d timeout, aborting\n",
+ req->tag,
+ nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
+ nvmeq->qid);
abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
BLK_MQ_REQ_NOWAIT);
@@ -1765,37 +1755,35 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
}
}
-static int nvme_alloc_admin_tags(struct nvme_dev *dev)
+static int nvme_pci_alloc_admin_tag_set(struct nvme_dev *dev)
{
- if (!dev->ctrl.admin_q) {
- dev->admin_tagset.ops = &nvme_mq_admin_ops;
- dev->admin_tagset.nr_hw_queues = 1;
+ struct blk_mq_tag_set *set = &dev->admin_tagset;
- dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- dev->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
- dev->admin_tagset.numa_node = dev->ctrl.numa_node;
- dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
- dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
- dev->admin_tagset.driver_data = dev;
+ set->ops = &nvme_mq_admin_ops;
+ set->nr_hw_queues = 1;
- if (blk_mq_alloc_tag_set(&dev->admin_tagset))
- return -ENOMEM;
- dev->ctrl.admin_tagset = &dev->admin_tagset;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ set->numa_node = dev->ctrl.numa_node;
+ set->cmd_size = sizeof(struct nvme_iod);
+ set->flags = BLK_MQ_F_NO_SCHED;
+ set->driver_data = dev;
- dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
- if (IS_ERR(dev->ctrl.admin_q)) {
- blk_mq_free_tag_set(&dev->admin_tagset);
- dev->ctrl.admin_q = NULL;
- return -ENOMEM;
- }
- if (!blk_get_queue(dev->ctrl.admin_q)) {
- nvme_dev_remove_admin(dev);
- dev->ctrl.admin_q = NULL;
- return -ENODEV;
- }
- } else
- nvme_start_admin_queue(&dev->ctrl);
+ if (blk_mq_alloc_tag_set(set))
+ return -ENOMEM;
+ dev->ctrl.admin_tagset = set;
+ dev->ctrl.admin_q = blk_mq_init_queue(set);
+ if (IS_ERR(dev->ctrl.admin_q)) {
+ blk_mq_free_tag_set(set);
+ dev->ctrl.admin_q = NULL;
+ return -ENOMEM;
+ }
+ if (!blk_get_queue(dev->ctrl.admin_q)) {
+ nvme_dev_remove_admin(dev);
+ dev->ctrl.admin_q = NULL;
+ return -ENODEV;
+ }
return 0;
}
@@ -2534,47 +2522,45 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
return true;
}
-static void nvme_dev_add(struct nvme_dev *dev)
+static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
{
+ struct blk_mq_tag_set * set = &dev->tagset;
int ret;
- if (!dev->ctrl.tagset) {
- dev->tagset.ops = &nvme_mq_ops;
- dev->tagset.nr_hw_queues = dev->online_queues - 1;
- dev->tagset.nr_maps = 2; /* default + read */
- if (dev->io_queues[HCTX_TYPE_POLL])
- dev->tagset.nr_maps++;
- dev->tagset.timeout = NVME_IO_TIMEOUT;
- dev->tagset.numa_node = dev->ctrl.numa_node;
- dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
- BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = sizeof(struct nvme_iod);
- dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
- dev->tagset.driver_data = dev;
-
- /*
- * Some Apple controllers requires tags to be unique
- * across admin and IO queue, so reserve the first 32
- * tags of the IO queue.
- */
- if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
- dev->tagset.reserved_tags = NVME_AQ_DEPTH;
+ set->ops = &nvme_mq_ops;
+ set->nr_hw_queues = dev->online_queues - 1;
+ set->nr_maps = 2; /* default + read */
+ if (dev->io_queues[HCTX_TYPE_POLL])
+ set->nr_maps++;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->numa_node = dev->ctrl.numa_node;
+ set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
+ set->cmd_size = sizeof(struct nvme_iod);
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->driver_data = dev;
- ret = blk_mq_alloc_tag_set(&dev->tagset);
- if (ret) {
- dev_warn(dev->ctrl.device,
- "IO queues tagset allocation failed %d\n", ret);
- return;
- }
- dev->ctrl.tagset = &dev->tagset;
- } else {
- blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /*
+ * Some Apple controllers requires tags to be unique
+ * across admin and IO queue, so reserve the first 32
+ * tags of the IO queue.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+ set->reserved_tags = NVME_AQ_DEPTH;
- /* Free previously allocated queues that are no longer usable */
- nvme_free_queues(dev, dev->online_queues);
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "IO queues tagset allocation failed %d\n", ret);
+ return;
}
+ dev->ctrl.tagset = set;
+}
- nvme_dbbuf_set(dev);
+static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
+{
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /* free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
}
static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2725,10 +2711,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_pci_disable(dev);
nvme_reap_pending_cqes(dev);
- blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
- blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
- blk_mq_tagset_wait_completed_request(&dev->tagset);
- blk_mq_tagset_wait_completed_request(&dev->admin_tagset);
+ nvme_cancel_tagset(&dev->ctrl);
+ nvme_cancel_admin_tagset(&dev->ctrl);
/*
* The driver will not be starting up queues again if shutting down so
@@ -2842,9 +2826,13 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out_unlock;
- result = nvme_alloc_admin_tags(dev);
- if (result)
- goto out_unlock;
+ if (!dev->ctrl.admin_q) {
+ result = nvme_pci_alloc_admin_tag_set(dev);
+ if (result)
+ goto out_unlock;
+ } else {
+ nvme_start_admin_queue(&dev->ctrl);
+ }
/*
* Limit the max command size to prevent iod->sg allocations going
@@ -2923,7 +2911,11 @@ static void nvme_reset_work(struct work_struct *work)
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_dev_add(dev);
+ if (!dev->ctrl.tagset)
+ nvme_pci_alloc_tag_set(dev);
+ else
+ nvme_pci_update_nr_queues(dev);
+ nvme_dbbuf_set(dev);
nvme_unfreeze(&dev->ctrl);
}
@@ -2989,7 +2981,6 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
}
-
static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
{
struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
@@ -3004,11 +2995,17 @@ static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
subsys->firmware_rev);
}
+static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+ return dma_pci_p2pdma_supported(dev->dev);
+}
+
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
- .flags = NVME_F_METADATA_SUPPORTED |
- NVME_F_PCI_P2PDMA,
+ .flags = NVME_F_METADATA_SUPPORTED,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
@@ -3016,6 +3013,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.submit_async_event = nvme_pci_submit_async_event,
.get_address = nvme_pci_get_address,
.print_device_info = nvme_pci_print_device_info,
+ .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
};
static int nvme_dev_map(struct nvme_dev *dev)
@@ -3513,6 +3511,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 4665aebd944d..3100643be299 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -29,7 +29,7 @@
#include "fabrics.h"
-#define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */
+#define NVME_RDMA_CM_TIMEOUT_MS 3000 /* 3 second */
#define NVME_RDMA_MAX_SEGMENTS 256
@@ -248,12 +248,9 @@ static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
{
int ret;
- ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
- msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
- if (ret < 0)
+ ret = wait_for_completion_interruptible(&queue->cm_done);
+ if (ret)
return ret;
- if (ret == 0)
- return -ETIMEDOUT;
WARN_ON_ONCE(queue->cm_error > 0);
return queue->cm_error;
}
@@ -612,7 +609,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
queue->cm_error = -ETIMEDOUT;
ret = rdma_resolve_addr(queue->cm_id, src_addr,
(struct sockaddr *)&ctrl->addr,
- NVME_RDMA_CONNECT_TIMEOUT_MS);
+ NVME_RDMA_CM_TIMEOUT_MS);
if (ret) {
dev_info(ctrl->ctrl.device,
"rdma_resolve_addr failed (%d).\n", ret);
@@ -790,50 +787,54 @@ out_free_queues:
return ret;
}
-static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
- bool admin)
+static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set;
+ struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
int ret;
- if (admin) {
- set = &ctrl->admin_tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- set->flags = BLK_MQ_F_NO_SCHED;
- } else {
- set = &ctrl->tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- if (nctrl->max_integrity_segments)
- set->cmd_size += sizeof(struct nvme_rdma_sgl) +
- NVME_RDMA_METADATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- }
-
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_rdma_admin_mq_ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ set->flags = BLK_MQ_F_NO_SCHED;
ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
+ if (!ret)
+ ctrl->ctrl.admin_tagset = set;
+ return ret;
+}
+
+static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int ret;
- return set;
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_rdma_mq_ops;
+ set->queue_depth = nctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+ if (nctrl->max_integrity_segments)
+ set->cmd_size += sizeof(struct nvme_rdma_sgl) +
+ NVME_RDMA_METADATA_SGL_SIZE;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = nctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (!ret)
+ ctrl->ctrl.tagset = set;
+ return ret;
}
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
@@ -885,11 +886,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_queue;
if (new) {
- ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
- if (IS_ERR(ctrl->ctrl.admin_tagset)) {
- error = PTR_ERR(ctrl->ctrl.admin_tagset);
+ error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl);
+ if (error)
goto out_free_async_qe;
- }
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.fabrics_q)) {
@@ -972,11 +971,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
return ret;
if (new) {
- ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
- if (IS_ERR(ctrl->ctrl.tagset)) {
- ret = PTR_ERR(ctrl->ctrl.tagset);
+ ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
+ if (ret)
goto out_free_io_queues;
- }
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
@@ -1205,6 +1202,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work);
+ nvme_auth_stop(&ctrl->ctrl);
nvme_stop_keep_alive(&ctrl->ctrl);
flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
@@ -1894,7 +1892,7 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
if (ctrl->opts->tos >= 0)
rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
- ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
+ ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS);
if (ret) {
dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
queue->cm_error);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index b95ee85053e3..044da18c06f5 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -209,9 +209,11 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}
-static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
+static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
{
- return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+ if (nvme_is_fabrics(req->req.cmd))
+ return NVME_TCP_ADMIN_CCSZ;
+ return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
}
static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
@@ -229,7 +231,7 @@ static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
rq = blk_mq_rq_from_pdu(req);
return rq_data_dir(rq) == WRITE && req->data_len &&
- req->data_len <= nvme_tcp_inline_data_size(req->queue);
+ req->data_len <= nvme_tcp_inline_data_size(req);
}
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
@@ -1658,6 +1660,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ return;
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
@@ -1685,45 +1690,49 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
return ret;
}
-static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
- bool admin)
+static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct blk_mq_tag_set *set;
+ struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
int ret;
- if (admin) {
- set = &ctrl->admin_tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- } else {
- set = &ctrl->tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- }
-
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_tcp_admin_mq_ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_BLOCKING;
+ set->cmd_size = sizeof(struct nvme_tcp_request);
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
+ if (!ret)
+ nctrl->admin_tagset = set;
+ return ret;
+}
- return set;
+static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_tcp_mq_ops;
+ set->queue_depth = nctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ set->cmd_size = sizeof(struct nvme_tcp_request);
+ set->driver_data = ctrl;
+ set->nr_hw_queues = nctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (!ret)
+ nctrl->tagset = set;
+ return ret;
}
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
@@ -1899,11 +1908,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
return ret;
if (new) {
- ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
- if (IS_ERR(ctrl->tagset)) {
- ret = PTR_ERR(ctrl->tagset);
+ ret = nvme_tcp_alloc_tag_set(ctrl);
+ if (ret)
goto out_free_io_queues;
- }
ret = nvme_ctrl_init_connect_q(ctrl);
if (ret)
@@ -1968,11 +1975,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
return error;
if (new) {
- ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
- if (IS_ERR(ctrl->admin_tagset)) {
- error = PTR_ERR(ctrl->admin_tagset);
+ error = nvme_tcp_alloc_admin_tag_set(ctrl);
+ if (error)
goto out_free_queue;
- }
ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
if (IS_ERR(ctrl->fabrics_q)) {
@@ -2173,6 +2178,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
@@ -2371,7 +2377,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
if (!blk_rq_nr_phys_segments(rq))
nvme_tcp_set_sg_null(c);
else if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(queue))
+ req->data_len <= nvme_tcp_inline_data_size(req))
nvme_tcp_set_sg_inline(queue, c, req->data_len);
else
nvme_tcp_set_sg_host_data(c, req->data_len);
@@ -2406,7 +2412,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
nvme_tcp_init_iter(req, rq_data_dir(rq));
if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(queue))
+ req->data_len <= nvme_tcp_inline_data_size(req))
req->pdu_len = req->data_len;
pdu->hdr.type = nvme_tcp_cmd;
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 2a89c5aa0790..1c36fcedea20 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -287,6 +287,34 @@ static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc)
return ret;
}
+static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 tl = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+ spsp0, spsp1, secp, tl);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 al = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+ spsp0, spsp1, secp, al);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -306,6 +334,10 @@ const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p,
return nvme_trace_fabrics_connect(p, spc);
case nvme_fabrics_type_property_get:
return nvme_trace_fabrics_property_get(p, spc);
+ case nvme_fabrics_type_auth_send:
+ return nvme_trace_fabrics_auth_send(p, spc);
+ case nvme_fabrics_type_auth_receive:
+ return nvme_trace_fabrics_auth_receive(p, spc);
default:
return nvme_trace_fabrics_common(p, spc);
}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 37c7f4c89f92..6f0eaf6a1528 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -98,7 +98,7 @@ TRACE_EVENT(nvme_complete_rq,
TP_fast_assign(
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
__entry->qid = nvme_req_qid(req);
- __entry->cid = req->tag;
+ __entry->cid = nvme_req(req)->cmd->common.command_id;
__entry->result = le64_to_cpu(nvme_req(req)->result.u64);
__entry->retries = nvme_req(req)->retries;
__entry->flags = nvme_req(req)->flags;
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 973561c93888..79fc64035ee3 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -83,3 +83,18 @@ config NVME_TARGET_TCP
devices over TCP.
If unsure, say N.
+
+config NVME_TARGET_AUTH
+ bool "NVMe over Fabrics In-band Authentication support"
+ depends on NVME_TARGET
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This enables support for NVMe over Fabrics In-band Authentication
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 9837e580fa7e..c66820102493 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -13,6 +13,7 @@ nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
+nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 397daaf51f1b..fc8a957fad0a 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1017,7 +1017,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
u16 ret;
if (nvme_is_fabrics(cmd))
- return nvmet_parse_fabrics_cmd(req);
+ return nvmet_parse_fabrics_admin_cmd(req);
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
new file mode 100644
index 000000000000..cf690df34775
--- /dev/null
+++ b/drivers/nvme/target/auth.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <crypto/hash.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <asm/unaligned.h>
+
+#include "nvmet.h"
+
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl)
+{
+ unsigned char key_hash;
+ char *dhchap_secret;
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1)
+ return -EINVAL;
+ if (key_hash > 3) {
+ pr_warn("Invalid DH-HMAC-CHAP hash id %d\n",
+ key_hash);
+ return -EINVAL;
+ }
+ if (key_hash > 0) {
+ /* Validate selected hash algorithm */
+ const char *hmac = nvme_auth_hmac_name(key_hash);
+
+ if (!crypto_has_shash(hmac, 0, 0)) {
+ pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac);
+ return -ENOTSUPP;
+ }
+ }
+ dhchap_secret = kstrdup(secret, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ if (set_ctrl) {
+ host->dhchap_ctrl_secret = strim(dhchap_secret);
+ host->dhchap_ctrl_key_hash = key_hash;
+ } else {
+ host->dhchap_secret = strim(dhchap_secret);
+ host->dhchap_key_hash = key_hash;
+ }
+ return 0;
+}
+
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
+{
+ const char *dhgroup_kpp;
+ int ret = 0;
+
+ pr_debug("%s: ctrl %d selecting dhgroup %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+
+ if (ctrl->dh_tfm) {
+ if (ctrl->dh_gid == dhgroup_id) {
+ pr_debug("%s: ctrl %d reuse existing DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return 0;
+ }
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+
+ if (dhgroup_id == NVME_AUTH_DHGROUP_NULL)
+ return 0;
+
+ dhgroup_kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+ if (!dhgroup_kpp) {
+ pr_debug("%s: ctrl %d invalid DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return -EINVAL;
+ }
+ ctrl->dh_tfm = crypto_alloc_kpp(dhgroup_kpp, 0, 0);
+ if (IS_ERR(ctrl->dh_tfm)) {
+ pr_debug("%s: ctrl %d failed to setup DH group %d, err %ld\n",
+ __func__, ctrl->cntlid, dhgroup_id,
+ PTR_ERR(ctrl->dh_tfm));
+ ret = PTR_ERR(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ } else {
+ ctrl->dh_gid = dhgroup_id;
+ pr_debug("%s: ctrl %d setup DH group %d\n",
+ __func__, ctrl->cntlid, ctrl->dh_gid);
+ ret = nvme_auth_gen_privkey(ctrl->dh_tfm, ctrl->dh_gid);
+ if (ret < 0) {
+ pr_debug("%s: ctrl %d failed to generate private key, err %d\n",
+ __func__, ctrl->cntlid, ret);
+ kfree_sensitive(ctrl->dh_key);
+ return ret;
+ }
+ ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm);
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = kzalloc(ctrl->dh_keysize, GFP_KERNEL);
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d failed to allocate public key\n",
+ ctrl->cntlid);
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(ctrl->dh_tfm, ctrl->dh_key,
+ ctrl->dh_keysize);
+ if (ret < 0) {
+ pr_warn("ctrl %d failed to generate public key\n",
+ ctrl->cntlid);
+ kfree(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+ }
+ }
+
+ return ret;
+}
+
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ int ret = 0;
+ struct nvmet_host_link *p;
+ struct nvmet_host *host = NULL;
+ const char *hash_name;
+
+ down_read(&nvmet_config_sem);
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ goto out_unlock;
+
+ if (ctrl->subsys->allow_any_host)
+ goto out_unlock;
+
+ list_for_each_entry(p, &ctrl->subsys->hosts, entry) {
+ pr_debug("check %s\n", nvmet_host_name(p->host));
+ if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn))
+ continue;
+ host = p->host;
+ break;
+ }
+ if (!host) {
+ pr_debug("host %s not found\n", ctrl->hostnqn);
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
+ if (ret < 0)
+ pr_warn("Failed to setup DH group");
+
+ if (!host->dhchap_secret) {
+ pr_debug("No authentication provided\n");
+ goto out_unlock;
+ }
+
+ if (host->dhchap_hash_id == ctrl->shash_id) {
+ pr_debug("Re-use existing hash ID %d\n",
+ ctrl->shash_id);
+ } else {
+ hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ctrl->shash_id = host->dhchap_hash_id;
+ }
+
+ /* Skip the 'DHHC-1:XX:' prefix */
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10,
+ host->dhchap_key_hash);
+ if (IS_ERR(ctrl->host_key)) {
+ ret = PTR_ERR(ctrl->host_key);
+ ctrl->host_key = NULL;
+ goto out_free_hash;
+ }
+ pr_debug("%s: using hash %s key %*ph\n", __func__,
+ ctrl->host_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->host_key->hash) : "none",
+ (int)ctrl->host_key->len, ctrl->host_key->key);
+
+ nvme_auth_free_key(ctrl->ctrl_key);
+ if (!host->dhchap_ctrl_secret) {
+ ctrl->ctrl_key = NULL;
+ goto out_unlock;
+ }
+
+ ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10,
+ host->dhchap_ctrl_key_hash);
+ if (IS_ERR(ctrl->ctrl_key)) {
+ ret = PTR_ERR(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+ pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
+ ctrl->ctrl_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none",
+ (int)ctrl->ctrl_key->len, ctrl->ctrl_key->key);
+
+out_free_hash:
+ if (ret) {
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ ctrl->shash_id = 0;
+ }
+out_unlock:
+ up_read(&nvmet_config_sem);
+
+ return ret;
+}
+
+void nvmet_auth_sq_free(struct nvmet_sq *sq)
+{
+ cancel_delayed_work(&sq->auth_expired_work);
+ kfree(sq->dhchap_c1);
+ sq->dhchap_c1 = NULL;
+ kfree(sq->dhchap_c2);
+ sq->dhchap_c2 = NULL;
+ kfree(sq->dhchap_skey);
+ sq->dhchap_skey = NULL;
+}
+
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
+{
+ ctrl->shash_id = 0;
+
+ if (ctrl->dh_tfm) {
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+
+bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ if (req->sq->ctrl->host_key &&
+ !req->sq->authenticated)
+ return false;
+ return true;
+}
+
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c1, *host_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn);
+ if (IS_ERR(host_response)) {
+ ret = PTR_ERR(host_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, host_response,
+ ctrl->host_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c1,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid);
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c1)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(host_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c2, *ctrl_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, ctrl_response,
+ ctrl->ctrl_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c2,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c2)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(ctrl_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d no DH public key!\n", ctrl->cntlid);
+ return -ENOKEY;
+ }
+ if (buf_size != ctrl->dh_keysize) {
+ pr_warn("ctrl %d DH public key size mismatch, need %zu is %d\n",
+ ctrl->cntlid, ctrl->dh_keysize, buf_size);
+ ret = -EINVAL;
+ } else {
+ memcpy(buf, ctrl->dh_key, buf_size);
+ pr_debug("%s: ctrl %d public key %*ph\n", __func__,
+ ctrl->cntlid, (int)buf_size, buf);
+ }
+
+ return ret;
+}
+
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *pkey, int pkey_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret;
+
+ req->sq->dhchap_skey_len = ctrl->dh_keysize;
+ req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL);
+ if (!req->sq->dhchap_skey)
+ return -ENOMEM;
+ ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm,
+ pkey, pkey_size,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len);
+ if (ret)
+ pr_debug("failed to compute shared secret, err %d\n", ret);
+ else
+ pr_debug("%s: shared secret %*ph\n", __func__,
+ (int)req->sq->dhchap_skey_len,
+ req->sq->dhchap_skey);
+
+ return ret;
+}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index ff77c3d2354f..2bcd60758919 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -11,6 +11,11 @@
#include <linux/ctype.h>
#include <linux/pci.h>
#include <linux/pci-p2pdma.h>
+#ifdef CONFIG_NVME_TARGET_AUTH
+#include <linux/nvme-auth.h>
+#endif
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
#include "nvmet.h"
@@ -1680,10 +1685,133 @@ static const struct config_item_type nvmet_ports_type = {
static struct config_group nvmet_subsystems_group;
static struct config_group nvmet_ports_group;
+#ifdef CONFIG_NVME_TARGET_AUTH
+static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, false);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_key);
+
+static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, true);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
+
+static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+
+ return sprintf(page, "%s\n", hash_name ? hash_name : "none");
+}
+
+static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ u8 hmac_id;
+
+ hmac_id = nvme_auth_hmac_id(page);
+ if (hmac_id == NVME_AUTH_HASH_INVALID)
+ return -EINVAL;
+ if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
+ return -ENOTSUPP;
+ host->dhchap_hash_id = hmac_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
+
+static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
+
+ return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
+}
+
+static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int dhgroup_id;
+
+ dhgroup_id = nvme_auth_dhgroup_id(page);
+ if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
+ return -EINVAL;
+ if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
+ const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+
+ if (!crypto_has_kpp(kpp, 0, 0))
+ return -EINVAL;
+ }
+ host->dhchap_dhgroup_id = dhgroup_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
+
+static struct configfs_attribute *nvmet_host_attrs[] = {
+ &nvmet_host_attr_dhchap_key,
+ &nvmet_host_attr_dhchap_ctrl_key,
+ &nvmet_host_attr_dhchap_hash,
+ &nvmet_host_attr_dhchap_dhgroup,
+ NULL,
+};
+#endif /* CONFIG_NVME_TARGET_AUTH */
+
static void nvmet_host_release(struct config_item *item)
{
struct nvmet_host *host = to_host(item);
+#ifdef CONFIG_NVME_TARGET_AUTH
+ kfree(host->dhchap_secret);
+#endif
kfree(host);
}
@@ -1693,6 +1821,9 @@ static struct configfs_item_operations nvmet_host_item_ops = {
static const struct config_item_type nvmet_host_type = {
.ct_item_ops = &nvmet_host_item_ops,
+#ifdef CONFIG_NVME_TARGET_AUTH
+ .ct_attrs = nvmet_host_attrs,
+#endif
.ct_owner = THIS_MODULE,
};
@@ -1705,6 +1836,11 @@ static struct config_group *nvmet_hosts_make_group(struct config_group *group,
if (!host)
return ERR_PTR(-ENOMEM);
+#ifdef CONFIG_NVME_TARGET_AUTH
+ /* Default to SHA256 */
+ host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
+#endif
+
config_group_init_type_name(&host->group, name, &nvmet_host_type);
return &host->group;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index c27660a660d9..a1345790005f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -795,6 +795,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref);
+ nvmet_auth_sq_free(sq);
if (ctrl) {
/*
@@ -865,8 +866,15 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
+ struct nvme_command *cmd = req->cmd;
u16 ret;
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_io_cmd(req);
+
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+
ret = nvmet_check_ctrl_status(req);
if (unlikely(ret))
return ret;
@@ -1271,6 +1279,11 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req)
req->cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
}
+
+ if (unlikely(!nvmet_check_auth_status(req))) {
+ pr_warn("qid %d not authenticated\n", req->sq->qid);
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ }
return 0;
}
@@ -1467,6 +1480,8 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
+ nvmet_destroy_auth(ctrl);
+
ida_free(&cntlid_ida, ctrl->cntlid);
nvmet_async_events_free(ctrl);
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
new file mode 100644
index 000000000000..ebdf9aa81041
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include "nvmet.h"
+
+static void nvmet_auth_expired_work(struct work_struct *work)
+{
+ struct nvmet_sq *sq = container_of(to_delayed_work(work),
+ struct nvmet_sq, auth_expired_work);
+
+ pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
+ __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ sq->dhchap_tid = -1;
+}
+
+void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+{
+ u32 result = le32_to_cpu(req->cqe->result.u32);
+
+ /* Initialize in-band authentication */
+ INIT_DELAYED_WORK(&req->sq->auth_expired_work,
+ nvmet_auth_expired_work);
+ req->sq->authenticated = false;
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16;
+ req->cqe->result.u32 = cpu_to_le32(result);
+}
+
+static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_negotiate_data *data = d;
+ int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
+
+ pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
+ data->auth_protocol[0].dhchap.halen,
+ data->auth_protocol[0].dhchap.dhlen);
+ req->sq->dhchap_tid = le16_to_cpu(data->t_id);
+ if (data->sc_c)
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+
+ if (data->napd != 1)
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+
+ if (data->auth_protocol[0].dhchap.authid !=
+ NVME_AUTH_DHCHAP_AUTH_ID)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+
+ for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
+ u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
+
+ if (!fallback_hash_id &&
+ crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
+ fallback_hash_id = host_hmac_id;
+ if (ctrl->shash_id != host_hmac_id)
+ continue;
+ hash_id = ctrl->shash_id;
+ break;
+ }
+ if (hash_id == 0) {
+ if (fallback_hash_id == 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_hmac_name(fallback_hash_id));
+ ctrl->shash_id = fallback_hash_id;
+ }
+
+ dhgid = -1;
+ fallback_dhgid = -1;
+ for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
+ int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
+
+ if (tmp_dhgid != ctrl->dh_gid) {
+ dhgid = tmp_dhgid;
+ break;
+ }
+ if (fallback_dhgid < 0) {
+ const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
+
+ if (crypto_has_kpp(kpp, 0, 0))
+ fallback_dhgid = tmp_dhgid;
+ }
+ }
+ if (dhgid < 0) {
+ if (fallback_dhgid < 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(fallback_dhgid));
+ ctrl->dh_gid = fallback_dhgid;
+ }
+ pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
+ return 0;
+}
+
+static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_reply_data *data = d;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ u8 *response;
+
+ pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->hl, data->cvalid, dhvlen);
+
+ if (dhvlen) {
+ if (!ctrl->dh_tfm)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
+ dhvlen) < 0)
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+
+ response = kmalloc(data->hl, GFP_KERNEL);
+ if (!response)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ if (!ctrl->host_key) {
+ pr_warn("ctrl %d qid %d no host key\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
+ pr_debug("ctrl %d qid %d host hash failed\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+
+ if (memcmp(data->rval, response, data->hl)) {
+ pr_info("ctrl %d qid %d host response mismatch\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ kfree(response);
+ pr_debug("%s: ctrl %d qid %d host authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ if (data->cvalid) {
+ req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
+ GFP_KERNEL);
+ if (!req->sq->dhchap_c2)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
+ __func__, ctrl->cntlid, req->sq->qid, data->hl,
+ req->sq->dhchap_c2);
+ req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
+ } else {
+ req->sq->authenticated = true;
+ req->sq->dhchap_c2 = NULL;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ return data->rescode_exp;
+}
+
+void nvmet_execute_auth_send(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_success2_data *data;
+ void *d;
+ u32 tl;
+ u16 status = 0;
+
+ if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp1);
+ goto done;
+ }
+ tl = le32_to_cpu(req->cmd->auth_send.tl);
+ if (!tl) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, tl);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, tl)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
+ return;
+ }
+
+ d = kmalloc(tl, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, tl);
+ if (status) {
+ kfree(d);
+ goto done;
+ }
+
+ data = d;
+ pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
+ req->sq->dhchap_step);
+ if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
+ goto done_failure1;
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
+ if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
+ /* Restart negotiation */
+ pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
+ ctrl->cntlid, req->sq->qid);
+ if (!req->sq->qid) {
+ if (nvmet_setup_auth(ctrl) < 0) {
+ status = NVME_SC_INTERNAL;
+ pr_err("ctrl %d qid 0 failed to setup"
+ "re-authentication",
+ ctrl->cntlid);
+ goto done_failure1;
+ }
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ } else if (data->auth_id != req->sq->dhchap_step)
+ goto done_failure1;
+ /* Validate negotiation parameters */
+ status = nvmet_auth_negotiate(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ }
+ if (data->auth_id != req->sq->dhchap_step) {
+ pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->auth_id, req->sq->dhchap_step);
+ goto done_failure1;
+ }
+ if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
+ pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ le16_to_cpu(data->t_id),
+ req->sq->dhchap_tid);
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ goto done_kfree;
+ }
+
+ switch (data->auth_id) {
+ case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
+ status = nvmet_auth_reply(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
+ req->sq->authenticated = true;
+ pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ goto done_kfree;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
+ status = nvmet_auth_failure2(req, d);
+ if (status) {
+ pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ status = 0;
+ }
+ goto done_kfree;
+ break;
+ default:
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ req->sq->authenticated = false;
+ goto done_kfree;
+ break;
+ }
+done_failure1:
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+
+done_kfree:
+ kfree(d);
+done:
+ pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status, req->sq->dhchap_step);
+ if (status)
+ pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ status, req->error_loc);
+ req->cqe->result.u64 = 0;
+ nvmet_req_complete(req, status);
+ if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+
+ mod_delayed_work(system_wq, &req->sq->auth_expired_work,
+ auth_expire_secs * HZ);
+ return;
+ }
+ /* Final states, clear up variables */
+ nvmet_auth_sq_free(req->sq);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
+ nvmet_ctrl_fatal_error(ctrl);
+}
+
+static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+ int data_size = sizeof(*d) + hash_len;
+
+ if (ctrl->dh_tfm)
+ data_size += ctrl->dh_keysize;
+ if (al < data_size) {
+ pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
+ al, data_size);
+ return -EINVAL;
+ }
+ memset(data, 0, data_size);
+ req->sq->dhchap_s1 = nvme_auth_get_seqnum();
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hashid = ctrl->shash_id;
+ data->hl = hash_len;
+ data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
+ req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
+ if (!req->sq->dhchap_c1)
+ return -ENOMEM;
+ get_random_bytes(req->sq->dhchap_c1, data->hl);
+ memcpy(data->cval, req->sq->dhchap_c1, data->hl);
+ if (ctrl->dh_tfm) {
+ data->dhgid = ctrl->dh_gid;
+ data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
+ ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
+ ctrl->dh_keysize);
+ }
+ pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
+ __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
+ return ret;
+}
+
+static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_success1_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+
+ WARN_ON(al < sizeof(*data));
+ memset(data, 0, sizeof(*data));
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hl = hash_len;
+ if (req->sq->dhchap_c2) {
+ if (!ctrl->ctrl_key) {
+ pr_warn("ctrl %d qid %d no ctrl key\n",
+ ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ data->rvalid = 1;
+ pr_debug("ctrl %d qid %d response %*ph\n",
+ ctrl->cntlid, req->sq->qid, data->hl, data->rval);
+ }
+ return 0;
+}
+
+static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ WARN_ON(al < sizeof(*data));
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = req->sq->dhchap_status;
+}
+
+void nvmet_execute_auth_receive(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ void *d;
+ u32 al;
+ u16 status = 0;
+
+ if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp1);
+ goto done;
+ }
+ al = le32_to_cpu(req->cmd->auth_receive.al);
+ if (!al) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, al);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, al)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
+ return;
+ }
+
+ d = kmalloc(al, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+ pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ switch (req->sq->dhchap_step) {
+ case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
+ if (nvmet_auth_challenge(req, d, al) < 0) {
+ pr_warn("ctrl %d qid %d: challenge error (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ status = NVME_SC_INTERNAL;
+ break;
+ }
+ if (status) {
+ req->sq->dhchap_status = status;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: challenge status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ status = 0;
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
+ status = nvmet_auth_success1(req, d, al);
+ if (status) {
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: success1 status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d failure1 (%x)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
+ break;
+ default:
+ pr_warn("ctrl %d qid %d unhandled step (%d)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ nvmet_auth_failure1(req, d, al);
+ status = 0;
+ break;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, d, al);
+ kfree(d);
+done:
+ req->cqe->result.u64 = 0;
+ nvmet_req_complete(req, status);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ nvmet_auth_sq_free(req->sq);
+ else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ nvmet_auth_sq_free(req->sq);
+ nvmet_ctrl_fatal_error(ctrl);
+ }
+}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 70fb587e9413..f91a56180d3d 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -82,7 +82,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
-u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -93,6 +93,37 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
case nvme_fabrics_type_property_get:
req->execute = nvmet_execute_prop_get;
break;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
+ default:
+ pr_debug("received unknown capsule type 0x%x\n",
+ cmd->fabrics.fctype);
+ req->error_loc = offsetof(struct nvmf_common_command, fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return 0;
+}
+
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
default:
pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
@@ -173,6 +204,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
u16 status = 0;
+ int ret;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
@@ -215,18 +247,32 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
uuid_copy(&ctrl->hostid, &d->hostid);
+ ret = nvmet_setup_auth(ctrl);
+ if (ret < 0) {
+ pr_err("Failed to setup authentication, error %d\n", ret);
+ nvmet_ctrl_put(ctrl);
+ if (ret == -EPERM)
+ status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
+ else
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
status = nvmet_install_queue(ctrl, req);
if (status) {
nvmet_ctrl_put(ctrl);
goto out;
}
- pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n",
+ pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
- ctrl->pi_support ? " T10-PI is enabled" : "");
+ ctrl->pi_support ? " T10-PI is enabled" : "",
+ nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+ if (nvmet_has_auth(ctrl))
+ nvmet_init_auth(ctrl, req);
out:
kfree(d);
complete:
@@ -286,6 +332,9 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+ if (nvmet_has_auth(ctrl))
+ nvmet_init_auth(ctrl, req);
out:
kfree(d);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 0f5c77e22a0a..9750a7fca268 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -424,9 +424,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
+ nvme_cancel_tagset(&ctrl->ctrl);
nvme_loop_destroy_io_queues(ctrl);
}
@@ -434,9 +432,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
nvme_shutdown_ctrl(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 2b3e5719f24e..6ffeeb0a1c49 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -108,6 +108,19 @@ struct nvmet_sq {
u16 size;
u32 sqhd;
bool sqhd_disabled;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct delayed_work auth_expired_work;
+ bool authenticated;
+ u16 dhchap_tid;
+ u16 dhchap_status;
+ int dhchap_step;
+ u8 *dhchap_c1;
+ u8 *dhchap_c2;
+ u32 dhchap_s1;
+ u32 dhchap_s2;
+ u8 *dhchap_skey;
+ int dhchap_skey_len;
+#endif
struct completion free_done;
struct completion confirm_done;
};
@@ -209,6 +222,15 @@ struct nvmet_ctrl {
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
bool pi_support;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u8 shash_id;
+ struct crypto_kpp *dh_tfm;
+ u8 dh_gid;
+ u8 *dh_key;
+ size_t dh_keysize;
+#endif
};
struct nvmet_subsys {
@@ -271,6 +293,12 @@ static inline struct nvmet_subsys *namespaces_to_subsys(
struct nvmet_host {
struct config_group group;
+ u8 *dhchap_secret;
+ u8 *dhchap_ctrl_secret;
+ u8 dhchap_key_hash;
+ u8 dhchap_ctrl_key_hash;
+ u8 dhchap_hash_id;
+ u8 dhchap_dhgroup_id;
};
static inline struct nvmet_host *to_host(struct config_item *item)
@@ -420,7 +448,8 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
-u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
@@ -668,4 +697,48 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
bio_put(bio);
}
+#ifdef CONFIG_NVME_TARGET_AUTH
+void nvmet_execute_auth_send(struct nvmet_req *req);
+void nvmet_execute_auth_receive(struct nvmet_req *req);
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl);
+int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req);
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
+void nvmet_auth_sq_free(struct nvmet_sq *sq);
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
+bool nvmet_check_auth_status(struct nvmet_req *req);
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return ctrl->host_key != NULL;
+}
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+#else
+static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ return 0;
+}
+static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl,
+ struct nvmet_req *req) {};
+static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
+static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
+static inline bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ return true;
+}
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return false;
+}
+static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
+#endif
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 09fdcac87d17..4597bca43a6d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -415,7 +415,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
goto out_free_rsp;
- if (!ib_uses_virt_dma(ndev->device))
+ if (ib_dma_pci_p2p_dma_supported(ndev->device))
r->req.p2p_client = &ndev->device->dev;
r->send_sge.length = sizeof(*r->req.cqe);
r->send_sge.lkey = ndev->pd->local_dma_lkey;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 0a9542599ad1..dc3b4dc8fe08 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1839,7 +1839,8 @@ static int __init nvmet_tcp_init(void)
{
int ret;
- nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
+ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!nvmet_tcp_wq)
return -ENOMEM;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 94f017d808c4..96f0a12e507c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -1045,26 +1045,29 @@ phys_addr_t __init of_dma_get_max_cpu_address(struct device_node *np)
*
* It returns true if "dma-coherent" property was found
* for this device in the DT, or if DMA is coherent by
- * default for OF devices on the current platform.
+ * default for OF devices on the current platform and no
+ * "dma-noncoherent" property was found for this device.
*/
bool of_dma_is_coherent(struct device_node *np)
{
struct device_node *node;
-
- if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
- return true;
+ bool is_coherent = IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT);
node = of_node_get(np);
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
- of_node_put(node);
- return true;
+ is_coherent = true;
+ break;
+ }
+ if (of_property_read_bool(node, "dma-noncoherent")) {
+ is_coherent = false;
+ break;
}
node = of_get_next_dma_parent(node);
}
of_node_put(node);
- return false;
+ return is_coherent;
}
EXPORT_SYMBOL_GPL(of_dma_is_coherent);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index a19cd0c73644..7fa960bd3df1 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -2079,7 +2079,7 @@ struct device_node *of_find_next_cache_node(const struct device_node *np)
*
* @cpu: cpu number(logical index) for which the last cache level is needed
*
- * Return: The the level at which the last cache is present. It is exactly
+ * Return: The level at which the last cache is present. It is exactly
* same as the total number of cache levels for the given logical cpu.
*/
int of_find_last_cache_level(unsigned int cpu)
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 874f031442dc..75b6cbffa755 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -81,8 +81,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
* restricted-dma-pool region is allowed.
*/
if (of_device_is_compatible(node, "restricted-dma-pool") &&
- of_device_is_available(node))
+ of_device_is_available(node)) {
+ of_node_put(node);
break;
+ }
+ of_node_put(node);
}
/*
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index a8f5b6532165..7bc92923104c 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -246,7 +246,7 @@ static int populate_node(const void *blob,
}
*pnp = np;
- return true;
+ return 0;
}
static void reverse_nodes(struct device_node *parent)
@@ -477,8 +477,8 @@ void *initial_boot_params __ro_after_init;
static u32 of_fdt_crc32;
-static int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
- phys_addr_t size, bool nomap)
+static int __init early_init_dt_reserve_memory(phys_addr_t base,
+ phys_addr_t size, bool nomap)
{
if (nomap) {
/*
@@ -525,15 +525,15 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
- early_init_dt_reserve_memory_arch(base, size, nomap) == 0) {
+ early_init_dt_reserve_memory(base, size, nomap) == 0) {
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
if (!nomap)
- kmemleak_alloc_phys(base, size, 0, 0);
+ kmemleak_alloc_phys(base, size, 0);
}
else
- pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
- uname, &base, (unsigned long)(size / SZ_1M));
+ pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
+ uname, &base, (unsigned long)(size / SZ_1M));
len -= t_len;
if (first) {
@@ -644,7 +644,7 @@ void __init early_init_fdt_scan_reserved_mem(void)
fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
if (!size)
break;
- early_init_dt_reserve_memory_arch(base, size, false);
+ memblock_reserve(base, size);
}
fdt_scan_reserved_mem();
@@ -661,9 +661,8 @@ void __init early_init_fdt_reserve_self(void)
return;
/* Reserve the dtb region */
- early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
- fdt_totalsize(initial_boot_params),
- false);
+ memblock_reserve(__pa(initial_boot_params),
+ fdt_totalsize(initial_boot_params));
}
/**
@@ -1025,6 +1024,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
int l;
const struct earlycon_id *match;
const void *fdt = initial_boot_params;
+ int ret;
offset = fdt_path_offset(fdt, "/chosen");
if (offset < 0)
@@ -1057,7 +1057,8 @@ int __init early_init_dt_scan_chosen_stdout(void)
if (fdt_node_check_compatible(fdt, offset, match->compatible))
continue;
- if (of_setup_earlycon(match, offset, options) == 0)
+ ret = of_setup_earlycon(match, offset, options);
+ if (!ret || ret == -EALREADY)
return 0;
}
return -ENODEV;
diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
index f2e58ddfaed2..e6c01db393f9 100644
--- a/drivers/of/kexec.c
+++ b/drivers/of/kexec.c
@@ -128,6 +128,7 @@ int __init ima_get_kexec_buffer(void **addr, size_t *size)
{
int ret, len;
unsigned long tmp_addr;
+ unsigned long start_pfn, end_pfn;
size_t tmp_size;
const void *prop;
@@ -139,6 +140,22 @@ int __init ima_get_kexec_buffer(void **addr, size_t *size)
if (ret)
return ret;
+ /* Do some sanity on the returned size for the ima-kexec buffer */
+ if (!tmp_size)
+ return -ENOENT;
+
+ /*
+ * Calculate the PFNs for the buffer and ensure
+ * they are with in addressable memory.
+ */
+ start_pfn = PHYS_PFN(tmp_addr);
+ end_pfn = PHYS_PFN(tmp_addr + tmp_size - 1);
+ if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn)) {
+ pr_warn("IMA buffer at 0x%lx, size = 0x%zx beyond memory\n",
+ tmp_addr, tmp_size);
+ return -EINVAL;
+ }
+
*addr = __va(tmp_addr);
*size = tmp_size;
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 75caa6f5d36f..65f3b02a0e4e 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -156,7 +156,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
}
if (base == 0) {
- pr_info("failed to allocate memory for node '%s'\n", uname);
+ pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
+ uname, (unsigned long)(size / SZ_1M));
return -ENOMEM;
}
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 4044ddcb02c6..bd8ff4df723d 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -903,12 +903,6 @@ static int of_overlay_apply(struct overlay_changeset *ovcs)
{
int ret = 0, ret_revert, ret_tmp;
- if (devicetree_corrupt()) {
- pr_err("devicetree state suspect, refuse to apply overlay\n");
- ret = -EBUSY;
- goto out;
- }
-
ret = of_resolve_phandles(ovcs->overlay_root);
if (ret)
goto out;
@@ -983,6 +977,11 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
*ret_ovcs_id = 0;
+ if (devicetree_corrupt()) {
+ pr_err("devicetree state suspect, refuse to apply overlay\n");
+ return -EBUSY;
+ }
+
if (overlay_fdt_size < sizeof(struct fdt_header) ||
fdt_check_header(overlay_fdt)) {
pr_err("Invalid overlay_fdt header\n");
@@ -1044,20 +1043,15 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
* goto err_free_ovcs. Instead, the caller of of_overlay_fdt_apply()
* can call of_overlay_remove();
*/
-
- mutex_unlock(&of_mutex);
- of_overlay_mutex_unlock();
-
*ret_ovcs_id = ovcs->id;
-
- return ret;
+ goto out_unlock;
err_free_ovcs:
free_overlay_changeset(ovcs);
+out_unlock:
mutex_unlock(&of_mutex);
of_overlay_mutex_unlock();
-
return ret;
}
EXPORT_SYMBOL_GPL(of_overlay_fdt_apply);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 7f6bba18c515..eafa8ffefbd0 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1602,7 +1602,7 @@ static int unittest_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, devptr);
- devptr->chip.of_node = pdev->dev.of_node;
+ devptr->chip.fwnode = dev_fwnode(&pdev->dev);
devptr->chip.label = "of-unittest-gpio";
devptr->chip.base = -1; /* dynamic allocation */
devptr->chip.ngpio = 5;
@@ -1611,7 +1611,7 @@ static int unittest_gpio_probe(struct platform_device *pdev)
ret = gpiochip_add_data(&devptr->chip, NULL);
unittest(!ret,
- "gpiochip_add_data() for node @%pOF failed, ret = %d\n", devptr->chip.of_node, ret);
+ "gpiochip_add_data() for node @%pfw failed, ret = %d\n", devptr->chip.fwnode, ret);
if (!ret)
unittest_gpio_probe_pass_count++;
@@ -1620,20 +1620,19 @@ static int unittest_gpio_probe(struct platform_device *pdev)
static int unittest_gpio_remove(struct platform_device *pdev)
{
- struct unittest_gpio_dev *gdev = platform_get_drvdata(pdev);
+ struct unittest_gpio_dev *devptr = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
- dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
+ dev_dbg(dev, "%s for node @%pfw\n", __func__, devptr->chip.fwnode);
- if (!gdev)
+ if (!devptr)
return -EINVAL;
- if (gdev->chip.base != -1)
- gpiochip_remove(&gdev->chip);
+ if (devptr->chip.base != -1)
+ gpiochip_remove(&devptr->chip);
platform_set_drvdata(pdev, NULL);
- kfree(gdev);
+ kfree(devptr);
return 0;
}
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 84063eaebb91..77d1ba3a4154 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -13,11 +13,12 @@
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/slab.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
#include "opp.h"
@@ -36,6 +37,9 @@ DEFINE_MUTEX(opp_table_lock);
/* Flag indicating that opp_tables list is being updated at the moment */
static bool opp_tables_busy;
+/* OPP ID allocator */
+static DEFINE_XARRAY_ALLOC1(opp_configs);
+
static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
{
struct opp_device *opp_dev;
@@ -93,6 +97,18 @@ struct opp_table *_find_opp_table(struct device *dev)
return opp_table;
}
+/*
+ * Returns true if multiple clocks aren't there, else returns false with WARN.
+ *
+ * We don't force clk_count == 1 here as there are users who don't have a clock
+ * representation in the OPP table and manage the clock configuration themselves
+ * in an platform specific way.
+ */
+static bool assert_single_clk(struct opp_table *opp_table)
+{
+ return !WARN_ON(opp_table->clk_count > 1);
+}
+
/**
* dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
* @opp: opp for which voltage has to be returned for
@@ -114,6 +130,31 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
/**
+ * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp
+ * @opp: opp for which voltage has to be returned for
+ * @supplies: Placeholder for copying the supply information.
+ *
+ * Return: negative error number on failure, 0 otherwise on success after
+ * setting @supplies.
+ *
+ * This can be used for devices with any number of power supplies. The caller
+ * must ensure the @supplies array must contain space for each regulator.
+ */
+int dev_pm_opp_get_supplies(struct dev_pm_opp *opp,
+ struct dev_pm_opp_supply *supplies)
+{
+ if (IS_ERR_OR_NULL(opp) || !supplies) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(supplies, opp->supplies,
+ sizeof(*supplies) * opp->opp_table->regulator_count);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies);
+
+/**
* dev_pm_opp_get_power() - Gets the power corresponding to an opp
* @opp: opp for which power has to be returned for
*
@@ -152,7 +193,10 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return 0;
}
- return opp->rate;
+ if (!assert_single_clk(opp->opp_table))
+ return 0;
+
+ return opp->rates[0];
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
@@ -398,6 +442,154 @@ int dev_pm_opp_get_opp_count(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
+/* Helpers to read keys */
+static unsigned long _read_freq(struct dev_pm_opp *opp, int index)
+{
+ return opp->rates[0];
+}
+
+static unsigned long _read_level(struct dev_pm_opp *opp, int index)
+{
+ return opp->level;
+}
+
+static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
+{
+ return opp->bandwidth[index].peak;
+}
+
+/* Generic comparison helpers */
+static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key == key) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
+static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key >= key) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
+static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key > key)
+ return true;
+
+ *opp = temp_opp;
+ return false;
+}
+
+/* Generic key finding helpers */
+static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ /* Assert that the requirement is met */
+ if (assert && !assert(opp_table))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available == available) {
+ if (compare(&opp, temp_opp, read(temp_opp, index), *key))
+ break;
+ }
+ }
+
+ /* Increment the reference count of OPP */
+ if (!IS_ERR(opp)) {
+ *key = read(opp, index);
+ dev_pm_opp_get(opp);
+ }
+
+ mutex_unlock(&opp_table->lock);
+
+ return opp;
+}
+
+static struct dev_pm_opp *
+_find_key(struct device *dev, unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp;
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
+ PTR_ERR(opp_table));
+ return ERR_CAST(opp_table);
+ }
+
+ opp = _opp_table_find_key(opp_table, key, index, available, read,
+ compare, assert);
+
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+
+static struct dev_pm_opp *_find_key_exact(struct device *dev,
+ unsigned long key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ /*
+ * The value of key will be updated here, but will be ignored as the
+ * caller doesn't need it.
+ */
+ return _find_key(dev, &key, index, available, read, _compare_exact,
+ assert);
+}
+
+static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _opp_table_find_key(opp_table, key, index, available, read,
+ _compare_ceil, assert);
+}
+
+static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
+ int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _find_key(dev, key, index, available, read, _compare_ceil,
+ assert);
+}
+
+static struct dev_pm_opp *_find_key_floor(struct device *dev,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _find_key(dev, key, index, available, read, _compare_floor,
+ assert);
+}
+
/**
* dev_pm_opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
@@ -422,61 +614,18 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
* use.
*/
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
- unsigned long freq,
- bool available)
+ unsigned long freq, bool available)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available == available &&
- temp_opp->rate == freq) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_exact(dev, freq, 0, available, _read_freq,
+ assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->rate >= *freq) {
- opp = temp_opp;
- *freq = opp->rate;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
-
- return opp;
+ return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq,
+ assert_single_clk);
}
/**
@@ -500,23 +649,7 @@ static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *opp;
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- opp = _find_freq_ceil(opp_table, freq);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
@@ -541,98 +674,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available) {
- /* go to the next node, before choosing prev */
- if (temp_opp->rate > *freq)
- break;
- else
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- if (!IS_ERR(opp))
- *freq = opp->rate;
-
- return opp;
+ return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/**
- * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
- * target voltage.
- * @dev: Device for which we do this operation.
- * @u_volt: Target voltage.
- *
- * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
- *
- * Return: matching *opp, else returns ERR_PTR in case of error which should be
- * handled using IS_ERR.
- *
- * Error return values can be:
- * EINVAL: bad parameters
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !u_volt) {
- dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
- u_volt);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available) {
- if (temp_opp->supplies[0].u_volt > u_volt)
- break;
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
-
-/**
* dev_pm_opp_find_level_exact() - search for an exact level
* @dev: device for which we do this operation
* @level: level to search for
@@ -650,33 +696,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
unsigned int level)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->level == level) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_exact(dev, level, 0, true, _read_level, NULL);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
@@ -698,33 +718,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
unsigned int *level)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->level >= *level) {
- opp = temp_opp;
- *level = opp->level;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
+ unsigned long temp = *level;
+ struct dev_pm_opp *opp;
+ opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
+ *level = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
@@ -732,7 +730,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
/**
* dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
* @dev: device for which we do this operation
- * @freq: start bandwidth
+ * @bw: start bandwidth
* @index: which bandwidth to compare, in case of OPPs with several values
*
* Search for the matching floor *available* OPP from a starting bandwidth
@@ -748,42 +746,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
* The callers are required to call dev_pm_opp_put() for the returned OPP after
* use.
*/
-struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
- unsigned int *bw, int index)
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
+ int index)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !bw) {
- dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- if (index >= opp_table->path_count)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->bandwidth) {
- if (temp_opp->bandwidth[index].peak >= *bw) {
- opp = temp_opp;
- *bw = opp->bandwidth[index].peak;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
+ unsigned long temp = *bw;
+ struct dev_pm_opp *opp;
+ opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL);
+ *bw = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
@@ -791,7 +761,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
/**
* dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
* @dev: device for which we do this operation
- * @freq: start bandwidth
+ * @bw: start bandwidth
* @index: which bandwidth to compare, in case of OPPs with several values
*
* Search for the matching floor *available* OPP from a starting bandwidth
@@ -810,41 +780,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
unsigned int *bw, int index)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !bw) {
- dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- if (index >= opp_table->path_count)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->bandwidth) {
- /* go to the next node, before choosing prev */
- if (temp_opp->bandwidth[index].peak > *bw)
- break;
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- if (!IS_ERR(opp))
- *bw = opp->bandwidth[index].peak;
+ unsigned long temp = *bw;
+ struct dev_pm_opp *opp;
+ opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL);
+ *bw = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
@@ -874,80 +814,97 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
return ret;
}
-static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
- unsigned long freq)
+static int
+_opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data, bool scaling_down)
{
+ unsigned long *target = data;
+ unsigned long freq;
int ret;
- /* We may reach here for devices which don't change frequency */
- if (IS_ERR(clk))
- return 0;
+ /* One of target and opp must be available */
+ if (target) {
+ freq = *target;
+ } else if (opp) {
+ freq = opp->rates[0];
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
- ret = clk_set_rate(clk, freq);
+ ret = clk_set_rate(opp_table->clk, freq);
if (ret) {
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
ret);
+ } else {
+ opp_table->rate_clk_single = freq;
}
return ret;
}
-static int _generic_set_opp_regulator(struct opp_table *opp_table,
- struct device *dev,
- struct dev_pm_opp *opp,
- unsigned long freq,
- int scaling_down)
+/*
+ * Simple implementation for configuring multiple clocks. Configure clocks in
+ * the order in which they are present in the array while scaling up.
+ */
+int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
{
- struct regulator *reg = opp_table->regulators[0];
- struct dev_pm_opp *old_opp = opp_table->current_opp;
+ int ret, i;
+
+ if (scaling_down) {
+ for (i = opp_table->clk_count - 1; i >= 0; i--) {
+ ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ return ret;
+ }
+ }
+ } else {
+ for (i = 0; i < opp_table->clk_count; i++) {
+ ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
+
+static int _opp_config_regulator_single(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count)
+{
+ struct regulator *reg = regulators[0];
int ret;
/* This function only supports single regulator per device */
- if (WARN_ON(opp_table->regulator_count > 1)) {
+ if (WARN_ON(count > 1)) {
dev_err(dev, "multiple regulators are not supported\n");
return -EINVAL;
}
- /* Scaling up? Scale voltage before frequency */
- if (!scaling_down) {
- ret = _set_opp_voltage(dev, reg, opp->supplies);
- if (ret)
- goto restore_voltage;
- }
-
- /* Change frequency */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ ret = _set_opp_voltage(dev, reg, new_opp->supplies);
if (ret)
- goto restore_voltage;
-
- /* Scaling down? Scale voltage after frequency */
- if (scaling_down) {
- ret = _set_opp_voltage(dev, reg, opp->supplies);
- if (ret)
- goto restore_freq;
- }
+ return ret;
/*
* Enable the regulator after setting its voltages, otherwise it breaks
* some boot-enabled regulators.
*/
- if (unlikely(!opp_table->enabled)) {
+ if (unlikely(!new_opp->opp_table->enabled)) {
ret = regulator_enable(reg);
if (ret < 0)
dev_warn(dev, "Failed to enable regulator: %d", ret);
}
return 0;
-
-restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, old_opp->rate))
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_opp->rate);
-restore_voltage:
- /* This shouldn't harm even if the voltages weren't updated earlier */
- _set_opp_voltage(dev, reg, old_opp->supplies);
-
- return ret;
}
static int _set_opp_bw(const struct opp_table *opp_table,
@@ -978,36 +935,6 @@ static int _set_opp_bw(const struct opp_table *opp_table,
return 0;
}
-static int _set_opp_custom(const struct opp_table *opp_table,
- struct device *dev, struct dev_pm_opp *opp,
- unsigned long freq)
-{
- struct dev_pm_set_opp_data *data = opp_table->set_opp_data;
- struct dev_pm_opp *old_opp = opp_table->current_opp;
- int size;
-
- /*
- * We support this only if dev_pm_opp_set_regulators() was called
- * earlier.
- */
- if (opp_table->sod_supplies) {
- size = sizeof(*old_opp->supplies) * opp_table->regulator_count;
- memcpy(data->old_opp.supplies, old_opp->supplies, size);
- memcpy(data->new_opp.supplies, opp->supplies, size);
- data->regulator_count = opp_table->regulator_count;
- } else {
- data->regulator_count = 0;
- }
-
- data->regulators = opp_table->regulators;
- data->clk = opp_table->clk;
- data->dev = dev;
- data->old_opp.rate = old_opp->rate;
- data->new_opp.rate = freq;
-
- return opp_table->set_opp(data);
-}
-
static int _set_required_opp(struct device *dev, struct device *pd_dev,
struct dev_pm_opp *opp, int i)
{
@@ -1019,7 +946,7 @@ static int _set_required_opp(struct device *dev, struct device *pd_dev,
ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
if (ret) {
- dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
+ dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
dev_name(pd_dev), pstate, ret);
}
@@ -1138,7 +1065,7 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
}
static int _set_opp(struct device *dev, struct opp_table *opp_table,
- struct dev_pm_opp *opp, unsigned long freq)
+ struct dev_pm_opp *opp, void *clk_data, bool forced)
{
struct dev_pm_opp *old_opp;
int scaling_down, ret;
@@ -1153,18 +1080,17 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
old_opp = opp_table->current_opp;
/* Return early if nothing to do */
- if (old_opp == opp && opp_table->current_rate == freq &&
- opp_table->enabled) {
+ if (!forced && old_opp == opp && opp_table->enabled) {
dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__);
return 0;
}
dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
- __func__, opp_table->current_rate, freq, old_opp->level,
+ __func__, old_opp->rates[0], opp->rates[0], old_opp->level,
opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
opp->bandwidth ? opp->bandwidth[0].peak : 0);
- scaling_down = _opp_compare_key(old_opp, opp);
+ scaling_down = _opp_compare_key(opp_table, old_opp, opp);
if (scaling_down == -1)
scaling_down = 0;
@@ -1181,23 +1107,38 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
dev_err(dev, "Failed to set bw: %d\n", ret);
return ret;
}
- }
- if (opp_table->set_opp) {
- ret = _set_opp_custom(opp_table, dev, opp, freq);
- } else if (opp_table->regulators) {
- ret = _generic_set_opp_regulator(opp_table, dev, opp, freq,
- scaling_down);
- } else {
- /* Only frequency scaling */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ if (opp_table->config_regulators) {
+ ret = opp_table->config_regulators(dev, old_opp, opp,
+ opp_table->regulators,
+ opp_table->regulator_count);
+ if (ret) {
+ dev_err(dev, "Failed to set regulator voltages: %d\n",
+ ret);
+ return ret;
+ }
+ }
}
- if (ret)
- return ret;
+ if (opp_table->config_clks) {
+ ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down);
+ if (ret)
+ return ret;
+ }
/* Scaling down? Configure required OPPs after frequency */
if (scaling_down) {
+ if (opp_table->config_regulators) {
+ ret = opp_table->config_regulators(dev, old_opp, opp,
+ opp_table->regulators,
+ opp_table->regulator_count);
+ if (ret) {
+ dev_err(dev, "Failed to set regulator voltages: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
ret = _set_opp_bw(opp_table, opp, dev);
if (ret) {
dev_err(dev, "Failed to set bw: %d\n", ret);
@@ -1217,7 +1158,6 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
/* Make sure current_opp doesn't get freed */
dev_pm_opp_get(opp);
opp_table->current_opp = opp;
- opp_table->current_rate = freq;
return ret;
}
@@ -1238,6 +1178,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
struct opp_table *opp_table;
unsigned long freq = 0, temp_freq;
struct dev_pm_opp *opp = NULL;
+ bool forced = false;
int ret;
opp_table = _find_opp_table(dev);
@@ -1255,7 +1196,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
* equivalent to a clk_set_rate()
*/
if (!_get_opp_count(opp_table)) {
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, target_freq);
+ ret = opp_table->config_clks(dev, opp_table, NULL,
+ &target_freq, false);
goto put_opp_table;
}
@@ -1276,12 +1218,22 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
__func__, freq, ret);
goto put_opp_table;
}
+
+ /*
+ * An OPP entry specifies the highest frequency at which other
+ * properties of the OPP entry apply. Even if the new OPP is
+ * same as the old one, we may still reach here for a different
+ * value of the frequency. In such a case, do not abort but
+ * configure the hardware to the desired frequency forcefully.
+ */
+ forced = opp_table->rate_clk_single != target_freq;
}
- ret = _set_opp(dev, opp_table, opp, freq);
+ ret = _set_opp(dev, opp_table, opp, &target_freq, forced);
if (target_freq)
dev_pm_opp_put(opp);
+
put_opp_table:
dev_pm_opp_put_opp_table(opp_table);
return ret;
@@ -1309,7 +1261,7 @@ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
return PTR_ERR(opp_table);
}
- ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0);
+ ret = _set_opp(dev, opp_table, opp, NULL, false);
dev_pm_opp_put_opp_table(opp_table);
return ret;
@@ -1366,6 +1318,8 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
INIT_LIST_HEAD(&opp_table->dev_list);
INIT_LIST_HEAD(&opp_table->lazy);
+ opp_table->clk = ERR_PTR(-ENODEV);
+
/* Mark regulator count uninitialized */
opp_table->regulator_count = -1;
@@ -1412,20 +1366,38 @@ static struct opp_table *_update_opp_table_clk(struct device *dev,
int ret;
/*
- * Return early if we don't need to get clk or we have already tried it
+ * Return early if we don't need to get clk or we have already done it
* earlier.
*/
- if (!getclk || IS_ERR(opp_table) || opp_table->clk)
+ if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) ||
+ opp_table->clks)
return opp_table;
/* Find clk for the device */
opp_table->clk = clk_get(dev, NULL);
ret = PTR_ERR_OR_ZERO(opp_table->clk);
- if (!ret)
+ if (!ret) {
+ opp_table->config_clks = _opp_config_clk_single;
+ opp_table->clk_count = 1;
return opp_table;
+ }
if (ret == -ENOENT) {
+ /*
+ * There are few platforms which don't want the OPP core to
+ * manage device's clock settings. In such cases neither the
+ * platform provides the clks explicitly to us, nor the DT
+ * contains a valid clk entry. The OPP nodes in DT may still
+ * contain "opp-hz" property though, which we need to parse and
+ * allow the platform to find an OPP based on freq later on.
+ *
+ * This is a simple solution to take care of such corner cases,
+ * i.e. make the clk_count 1, which lets us allocate space for
+ * frequency in opp->rates and also parse the entries in DT.
+ */
+ opp_table->clk_count = 1;
+
dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
return opp_table;
}
@@ -1528,7 +1500,7 @@ static void _opp_table_kref_release(struct kref *kref)
_of_clear_opp_table(opp_table);
- /* Release clk */
+ /* Release automatically acquired single clk */
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
@@ -1581,7 +1553,7 @@ static void _opp_kref_release(struct kref *kref)
* frequency/voltage list.
*/
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
- _of_opp_free_required_opps(opp_table, opp);
+ _of_clear_opp(opp_table, opp);
opp_debug_remove_one(opp);
kfree(opp);
}
@@ -1613,10 +1585,13 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
if (IS_ERR(opp_table))
return;
+ if (!assert_single_clk(opp_table))
+ goto put_table;
+
mutex_lock(&opp_table->lock);
list_for_each_entry(iter, &opp_table->opp_list, node) {
- if (iter->rate == freq) {
+ if (iter->rates[0] == freq) {
opp = iter;
break;
}
@@ -1634,6 +1609,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
__func__, freq);
}
+put_table:
/* Drop the reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
}
@@ -1720,26 +1696,31 @@ void dev_pm_opp_remove_all_dynamic(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
-struct dev_pm_opp *_opp_allocate(struct opp_table *table)
+struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
- int supply_count, supply_size, icc_size;
+ int supply_count, supply_size, icc_size, clk_size;
/* Allocate space for at least one supply */
- supply_count = table->regulator_count > 0 ? table->regulator_count : 1;
+ supply_count = opp_table->regulator_count > 0 ?
+ opp_table->regulator_count : 1;
supply_size = sizeof(*opp->supplies) * supply_count;
- icc_size = sizeof(*opp->bandwidth) * table->path_count;
+ clk_size = sizeof(*opp->rates) * opp_table->clk_count;
+ icc_size = sizeof(*opp->bandwidth) * opp_table->path_count;
/* allocate new OPP node and supplies structures */
- opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL);
-
+ opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL);
if (!opp)
return NULL;
- /* Put the supplies at the end of the OPP structure as an empty array */
+ /* Put the supplies, bw and clock at the end of the OPP structure */
opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+
+ opp->rates = (unsigned long *)(opp->supplies + supply_count);
+
if (icc_size)
- opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count);
+ opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count);
+
INIT_LIST_HEAD(&opp->node);
return opp;
@@ -1770,15 +1751,57 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
return true;
}
-int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+static int _opp_compare_rate(struct opp_table *opp_table,
+ struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+{
+ int i;
+
+ for (i = 0; i < opp_table->clk_count; i++) {
+ if (opp1->rates[i] != opp2->rates[i])
+ return opp1->rates[i] < opp2->rates[i] ? -1 : 1;
+ }
+
+ /* Same rates for both OPPs */
+ return 0;
+}
+
+static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1,
+ struct dev_pm_opp *opp2)
{
- if (opp1->rate != opp2->rate)
- return opp1->rate < opp2->rate ? -1 : 1;
- if (opp1->bandwidth && opp2->bandwidth &&
- opp1->bandwidth[0].peak != opp2->bandwidth[0].peak)
- return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1;
+ int i;
+
+ for (i = 0; i < opp_table->path_count; i++) {
+ if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak)
+ return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1;
+ }
+
+ /* Same bw for both OPPs */
+ return 0;
+}
+
+/*
+ * Returns
+ * 0: opp1 == opp2
+ * 1: opp1 > opp2
+ * -1: opp1 < opp2
+ */
+int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1,
+ struct dev_pm_opp *opp2)
+{
+ int ret;
+
+ ret = _opp_compare_rate(opp_table, opp1, opp2);
+ if (ret)
+ return ret;
+
+ ret = _opp_compare_bw(opp_table, opp1, opp2);
+ if (ret)
+ return ret;
+
if (opp1->level != opp2->level)
return opp1->level < opp2->level ? -1 : 1;
+
+ /* Duplicate OPPs */
return 0;
}
@@ -1798,7 +1821,7 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
* loop.
*/
list_for_each_entry(opp, &opp_table->opp_list, node) {
- opp_cmp = _opp_compare_key(new_opp, opp);
+ opp_cmp = _opp_compare_key(opp_table, new_opp, opp);
if (opp_cmp > 0) {
*head = &opp->node;
continue;
@@ -1809,8 +1832,8 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
/* Duplicate OPPs */
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
- __func__, opp->rate, opp->supplies[0].u_volt,
- opp->available, new_opp->rate,
+ __func__, opp->rates[0], opp->supplies[0].u_volt,
+ opp->available, new_opp->rates[0],
new_opp->supplies[0].u_volt, new_opp->available);
/* Should we compare voltages for all regulators here ? */
@@ -1831,7 +1854,7 @@ void _required_opps_available(struct dev_pm_opp *opp, int count)
opp->available = false;
pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
- __func__, opp->required_opps[i]->np, opp->rate);
+ __func__, opp->required_opps[i]->np, opp->rates[0]);
return;
}
}
@@ -1847,7 +1870,7 @@ void _required_opps_available(struct dev_pm_opp *opp, int count)
* should be considered an error by the callers of _opp_add().
*/
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct opp_table *opp_table, bool rate_not_available)
+ struct opp_table *opp_table)
{
struct list_head *head;
int ret;
@@ -1872,7 +1895,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
if (!_opp_supported_by_regulators(new_opp, opp_table)) {
new_opp->available = false;
dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
- __func__, new_opp->rate);
+ __func__, new_opp->rates[0]);
}
/* required-opps not fully initialized yet */
@@ -1913,12 +1936,15 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
unsigned long tol;
int ret;
+ if (!assert_single_clk(opp_table))
+ return -EINVAL;
+
new_opp = _opp_allocate(opp_table);
if (!new_opp)
return -ENOMEM;
/* populate the opp table */
- new_opp->rate = freq;
+ new_opp->rates[0] = freq;
tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
new_opp->supplies[0].u_volt = u_volt;
new_opp->supplies[0].u_volt_min = u_volt - tol;
@@ -1926,7 +1952,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
new_opp->available = true;
new_opp->dynamic = dynamic;
- ret = _opp_add(dev, new_opp, opp_table, false);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
@@ -1948,7 +1974,7 @@ free_opp:
}
/**
- * dev_pm_opp_set_supported_hw() - Set supported platforms
+ * _opp_set_supported_hw() - Set supported platforms
* @dev: Device for which supported-hw has to be set.
* @versions: Array of hierarchy of versions to match.
* @count: Number of elements in the array.
@@ -1958,87 +1984,42 @@ free_opp:
* OPPs, which are available for those versions, based on its 'opp-supported-hw'
* property.
*/
-struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions, unsigned int count)
+static int _opp_set_supported_hw(struct opp_table *opp_table,
+ const u32 *versions, unsigned int count)
{
- struct opp_table *opp_table;
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
/* Another CPU that shares the OPP table has set the property ? */
if (opp_table->supported_hw)
- return opp_table;
+ return 0;
opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
GFP_KERNEL);
- if (!opp_table->supported_hw) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-ENOMEM);
- }
+ if (!opp_table->supported_hw)
+ return -ENOMEM;
opp_table->supported_hw_count = count;
- return opp_table;
+ return 0;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
/**
- * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
- * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
+ * _opp_put_supported_hw() - Releases resources blocked for supported hw
+ * @opp_table: OPP table returned by _opp_set_supported_hw().
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
+ * _opp_set_supported_hw(). Until this is called, the opp_table structure
* will not be freed.
*/
-void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
-
- kfree(opp_table->supported_hw);
- opp_table->supported_hw = NULL;
- opp_table->supported_hw_count = 0;
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
-
-static void devm_pm_opp_supported_hw_release(void *data)
+static void _opp_put_supported_hw(struct opp_table *opp_table)
{
- dev_pm_opp_put_supported_hw(data);
-}
-
-/**
- * devm_pm_opp_set_supported_hw() - Set supported platforms
- * @dev: Device for which supported-hw has to be set.
- * @versions: Array of hierarchy of versions to match.
- * @count: Number of elements in the array.
- *
- * This is a resource-managed variant of dev_pm_opp_set_supported_hw().
- *
- * Return: 0 on success and errorno otherwise.
- */
-int devm_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
- unsigned int count)
-{
- struct opp_table *opp_table;
-
- opp_table = dev_pm_opp_set_supported_hw(dev, versions, count);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- return devm_add_action_or_reset(dev, devm_pm_opp_supported_hw_release,
- opp_table);
+ if (opp_table->supported_hw) {
+ kfree(opp_table->supported_hw);
+ opp_table->supported_hw = NULL;
+ opp_table->supported_hw_count = 0;
+ }
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_supported_hw);
/**
- * dev_pm_opp_set_prop_name() - Set prop-extn name
+ * _opp_set_prop_name() - Set prop-extn name
* @dev: Device for which the prop-name has to be set.
* @name: name to postfix to properties.
*
@@ -2047,53 +2028,36 @@ EXPORT_SYMBOL_GPL(devm_pm_opp_set_supported_hw);
* which the extension will apply are opp-microvolt and opp-microamp. OPP core
* should postfix the property name with -<name> while looking for them.
*/
-struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+static int _opp_set_prop_name(struct opp_table *opp_table, const char *name)
{
- struct opp_table *opp_table;
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
/* Another CPU that shares the OPP table has set the property ? */
- if (opp_table->prop_name)
- return opp_table;
-
- opp_table->prop_name = kstrdup(name, GFP_KERNEL);
if (!opp_table->prop_name) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-ENOMEM);
+ opp_table->prop_name = kstrdup(name, GFP_KERNEL);
+ if (!opp_table->prop_name)
+ return -ENOMEM;
}
- return opp_table;
+ return 0;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
/**
- * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
- * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
+ * _opp_put_prop_name() - Releases resources blocked for prop-name
+ * @opp_table: OPP table returned by _opp_set_prop_name().
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
+ * _opp_set_prop_name(). Until this is called, the opp_table structure
* will not be freed.
*/
-void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
+static void _opp_put_prop_name(struct opp_table *opp_table)
{
- if (unlikely(!opp_table))
- return;
-
- kfree(opp_table->prop_name);
- opp_table->prop_name = NULL;
-
- dev_pm_opp_put_opp_table(opp_table);
+ if (opp_table->prop_name) {
+ kfree(opp_table->prop_name);
+ opp_table->prop_name = NULL;
+ }
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
/**
- * dev_pm_opp_set_regulators() - Set regulator names for the device
+ * _opp_set_regulators() - Set regulator names for the device
* @dev: Device for which regulator name is being set.
* @names: Array of pointers to the names of the regulator.
* @count: Number of regulators.
@@ -2104,36 +2068,29 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
+static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev,
+ const char * const names[])
{
- struct dev_pm_opp_supply *supplies;
- struct opp_table *opp_table;
+ const char * const *temp = names;
struct regulator *reg;
- int ret, i;
+ int count = 0, ret, i;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
+ /* Count number of regulators */
+ while (*temp++)
+ count++;
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
+ if (!count)
+ return -EINVAL;
/* Another CPU that shares the OPP table has set the regulators ? */
if (opp_table->regulators)
- return opp_table;
+ return 0;
opp_table->regulators = kmalloc_array(count,
sizeof(*opp_table->regulators),
GFP_KERNEL);
- if (!opp_table->regulators) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!opp_table->regulators)
+ return -ENOMEM;
for (i = 0; i < count; i++) {
reg = regulator_get_optional(dev, names[i]);
@@ -2149,21 +2106,11 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
opp_table->regulator_count = count;
- supplies = kmalloc_array(count * 2, sizeof(*supplies), GFP_KERNEL);
- if (!supplies) {
- ret = -ENOMEM;
- goto free_regulators;
- }
-
- mutex_lock(&opp_table->lock);
- opp_table->sod_supplies = supplies;
- if (opp_table->set_opp_data) {
- opp_table->set_opp_data->old_opp.supplies = supplies;
- opp_table->set_opp_data->new_opp.supplies = supplies + count;
- }
- mutex_unlock(&opp_table->lock);
+ /* Set generic config_regulators() for single regulators here */
+ if (count == 1)
+ opp_table->config_regulators = _opp_config_regulator_single;
- return opp_table;
+ return 0;
free_regulators:
while (i != 0)
@@ -2172,26 +2119,20 @@ free_regulators:
kfree(opp_table->regulators);
opp_table->regulators = NULL;
opp_table->regulator_count = -1;
-err:
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(ret);
+ return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
/**
- * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
- * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
+ * _opp_put_regulators() - Releases resources blocked for regulator
+ * @opp_table: OPP table returned from _opp_set_regulators().
*/
-void dev_pm_opp_put_regulators(struct opp_table *opp_table)
+static void _opp_put_regulators(struct opp_table *opp_table)
{
int i;
- if (unlikely(!opp_table))
- return;
-
if (!opp_table->regulators)
- goto put_opp_table;
+ return;
if (opp_table->enabled) {
for (i = opp_table->regulator_count - 1; i >= 0; i--)
@@ -2201,252 +2142,158 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
- mutex_lock(&opp_table->lock);
- if (opp_table->set_opp_data) {
- opp_table->set_opp_data->old_opp.supplies = NULL;
- opp_table->set_opp_data->new_opp.supplies = NULL;
- }
-
- kfree(opp_table->sod_supplies);
- opp_table->sod_supplies = NULL;
- mutex_unlock(&opp_table->lock);
-
kfree(opp_table->regulators);
opp_table->regulators = NULL;
opp_table->regulator_count = -1;
-
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
-static void devm_pm_opp_regulators_release(void *data)
+static void _put_clks(struct opp_table *opp_table, int count)
{
- dev_pm_opp_put_regulators(data);
-}
-
-/**
- * devm_pm_opp_set_regulators() - Set regulator names for the device
- * @dev: Device for which regulator name is being set.
- * @names: Array of pointers to the names of the regulator.
- * @count: Number of regulators.
- *
- * This is a resource-managed variant of dev_pm_opp_set_regulators().
- *
- * Return: 0 on success and errorno otherwise.
- */
-int devm_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
-{
- struct opp_table *opp_table;
+ int i;
- opp_table = dev_pm_opp_set_regulators(dev, names, count);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
+ for (i = count - 1; i >= 0; i--)
+ clk_put(opp_table->clks[i]);
- return devm_add_action_or_reset(dev, devm_pm_opp_regulators_release,
- opp_table);
+ kfree(opp_table->clks);
+ opp_table->clks = NULL;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_regulators);
/**
- * dev_pm_opp_set_clkname() - Set clk name for the device
- * @dev: Device for which clk name is being set.
- * @name: Clk name.
- *
- * In order to support OPP switching, OPP layer needs to get pointer to the
- * clock for the device. Simple cases work fine without using this routine (i.e.
- * by passing connection-id as NULL), but for a device with multiple clocks
- * available, the OPP core needs to know the exact name of the clk to use.
+ * _opp_set_clknames() - Set clk names for the device
+ * @dev: Device for which clk names is being set.
+ * @names: Clk names.
+ *
+ * In order to support OPP switching, OPP layer needs to get pointers to the
+ * clocks for the device. Simple cases work fine without using this routine
+ * (i.e. by passing connection-id as NULL), but for a device with multiple
+ * clocks available, the OPP core needs to know the exact names of the clks to
+ * use.
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
+static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev,
+ const char * const names[],
+ config_clks_t config_clks)
{
- struct opp_table *opp_table;
- int ret;
+ const char * const *temp = names;
+ int count = 0, ret, i;
+ struct clk *clk;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
+ /* Count number of clks */
+ while (*temp++)
+ count++;
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
+ /*
+ * This is a special case where we have a single clock, whose connection
+ * id name is NULL, i.e. first two entries are NULL in the array.
+ */
+ if (!count && !names[1])
+ count = 1;
- /* clk shouldn't be initialized at this point */
- if (WARN_ON(opp_table->clk)) {
- ret = -EBUSY;
- goto err;
- }
+ /* Fail early for invalid configurations */
+ if (!count || (!config_clks && count > 1))
+ return -EINVAL;
- /* Find clk for the device */
- opp_table->clk = clk_get(dev, name);
- if (IS_ERR(opp_table->clk)) {
- ret = dev_err_probe(dev, PTR_ERR(opp_table->clk),
- "%s: Couldn't find clock\n", __func__);
- goto err;
- }
+ /* Another CPU that shares the OPP table has set the clkname ? */
+ if (opp_table->clks)
+ return 0;
- return opp_table;
+ opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks),
+ GFP_KERNEL);
+ if (!opp_table->clks)
+ return -ENOMEM;
-err:
- dev_pm_opp_put_opp_table(opp_table);
+ /* Find clks for the device */
+ for (i = 0; i < count; i++) {
+ clk = clk_get(dev, names[i]);
+ if (IS_ERR(clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(clk),
+ "%s: Couldn't find clock with name: %s\n",
+ __func__, names[i]);
+ goto free_clks;
+ }
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
+ opp_table->clks[i] = clk;
+ }
-/**
- * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
- * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
- */
-void dev_pm_opp_put_clkname(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
+ opp_table->clk_count = count;
+ opp_table->config_clks = config_clks;
- clk_put(opp_table->clk);
- opp_table->clk = ERR_PTR(-EINVAL);
+ /* Set generic single clk set here */
+ if (count == 1) {
+ if (!opp_table->config_clks)
+ opp_table->config_clks = _opp_config_clk_single;
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
+ /*
+ * We could have just dropped the "clk" field and used "clks"
+ * everywhere. Instead we kept the "clk" field around for
+ * following reasons:
+ *
+ * - avoiding clks[0] everywhere else.
+ * - not running single clk helpers for multiple clk usecase by
+ * mistake.
+ *
+ * Since this is single-clk case, just update the clk pointer
+ * too.
+ */
+ opp_table->clk = opp_table->clks[0];
+ }
-static void devm_pm_opp_clkname_release(void *data)
-{
- dev_pm_opp_put_clkname(data);
+ return 0;
+
+free_clks:
+ _put_clks(opp_table, i);
+ return ret;
}
/**
- * devm_pm_opp_set_clkname() - Set clk name for the device
- * @dev: Device for which clk name is being set.
- * @name: Clk name.
- *
- * This is a resource-managed variant of dev_pm_opp_set_clkname().
- *
- * Return: 0 on success and errorno otherwise.
+ * _opp_put_clknames() - Releases resources blocked for clks.
+ * @opp_table: OPP table returned from _opp_set_clknames().
*/
-int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+static void _opp_put_clknames(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
+ if (!opp_table->clks)
+ return;
- opp_table = dev_pm_opp_set_clkname(dev, name);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
+ opp_table->config_clks = NULL;
+ opp_table->clk = ERR_PTR(-ENODEV);
- return devm_add_action_or_reset(dev, devm_pm_opp_clkname_release,
- opp_table);
+ _put_clks(opp_table, opp_table->clk_count);
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_clkname);
/**
- * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * _opp_set_config_regulators_helper() - Register custom set regulator helper.
* @dev: Device for which the helper is getting registered.
- * @set_opp: Custom set OPP helper.
+ * @config_regulators: Custom set regulator helper.
*
- * This is useful to support complex platforms (like platforms with multiple
- * regulators per device), instead of the generic OPP set rate helper.
+ * This is useful to support platforms with multiple regulators per device.
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static int _opp_set_config_regulators_helper(struct opp_table *opp_table,
+ struct device *dev, config_regulators_t config_regulators)
{
- struct dev_pm_set_opp_data *data;
- struct opp_table *opp_table;
-
- if (!set_opp)
- return ERR_PTR(-EINVAL);
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-EBUSY);
- }
-
/* Another CPU that shares the OPP table has set the helper ? */
- if (opp_table->set_opp)
- return opp_table;
+ if (!opp_table->config_regulators)
+ opp_table->config_regulators = config_regulators;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&opp_table->lock);
- opp_table->set_opp_data = data;
- if (opp_table->sod_supplies) {
- data->old_opp.supplies = opp_table->sod_supplies;
- data->new_opp.supplies = opp_table->sod_supplies +
- opp_table->regulator_count;
- }
- mutex_unlock(&opp_table->lock);
-
- opp_table->set_opp = set_opp;
-
- return opp_table;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
-
-/**
- * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
- * set_opp helper
- * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
- *
- * Release resources blocked for platform specific set_opp helper.
- */
-void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
-
- opp_table->set_opp = NULL;
-
- mutex_lock(&opp_table->lock);
- kfree(opp_table->set_opp_data);
- opp_table->set_opp_data = NULL;
- mutex_unlock(&opp_table->lock);
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
-
-static void devm_pm_opp_unregister_set_opp_helper(void *data)
-{
- dev_pm_opp_unregister_set_opp_helper(data);
+ return 0;
}
/**
- * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper
- * @dev: Device for which the helper is getting registered.
- * @set_opp: Custom set OPP helper.
- *
- * This is a resource-managed version of dev_pm_opp_register_set_opp_helper().
+ * _opp_put_config_regulators_helper() - Releases resources blocked for
+ * config_regulators helper.
+ * @opp_table: OPP table returned from _opp_set_config_regulators_helper().
*
- * Return: 0 on success and errorno otherwise.
+ * Release resources blocked for platform specific config_regulators helper.
*/
-int devm_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static void _opp_put_config_regulators_helper(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
-
- opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- return devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper,
- opp_table);
+ if (opp_table->config_regulators)
+ opp_table->config_regulators = NULL;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper);
-static void _opp_detach_genpd(struct opp_table *opp_table)
+static void _detach_genpd(struct opp_table *opp_table)
{
int index;
@@ -2466,7 +2313,7 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
}
/**
- * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
+ * _opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
* @dev: Consumer device for which the genpd is getting attached.
* @names: Null terminated array of pointers containing names of genpd to attach.
* @virt_devs: Pointer to return the array of virtual devices.
@@ -2487,30 +2334,23 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
* The order of entries in the names array must match the order in which
* "required-opps" are added in DT.
*/
-struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
- const char * const *names, struct device ***virt_devs)
+static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
+ const char * const *names, struct device ***virt_devs)
{
- struct opp_table *opp_table;
struct device *virt_dev;
int index = 0, ret = -EINVAL;
const char * const *name = names;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
if (opp_table->genpd_virt_devs)
- return opp_table;
+ return 0;
/*
* If the genpd's OPP table isn't already initialized, parsing of the
* required-opps fail for dev. We should retry this after genpd's OPP
* table is added.
*/
- if (!opp_table->required_opp_count) {
- ret = -EPROBE_DEFER;
- goto put_table;
- }
+ if (!opp_table->required_opp_count)
+ return -EPROBE_DEFER;
mutex_lock(&opp_table->genpd_virt_dev_lock);
@@ -2528,8 +2368,8 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
}
virt_dev = dev_pm_domain_attach_by_name(dev, *name);
- if (IS_ERR(virt_dev)) {
- ret = PTR_ERR(virt_dev);
+ if (IS_ERR_OR_NULL(virt_dev)) {
+ ret = PTR_ERR(virt_dev) ? : -ENODEV;
dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
goto err;
}
@@ -2543,73 +2383,230 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
*virt_devs = opp_table->genpd_virt_devs;
mutex_unlock(&opp_table->genpd_virt_dev_lock);
- return opp_table;
+ return 0;
err:
- _opp_detach_genpd(opp_table);
+ _detach_genpd(opp_table);
unlock:
mutex_unlock(&opp_table->genpd_virt_dev_lock);
+ return ret;
-put_table:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
/**
- * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device.
- * @opp_table: OPP table returned by dev_pm_opp_attach_genpd().
+ * _opp_detach_genpd() - Detach genpd(s) from the device.
+ * @opp_table: OPP table returned by _opp_attach_genpd().
*
* This detaches the genpd(s), resets the virtual device pointers, and puts the
* OPP table.
*/
-void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
+static void _opp_detach_genpd(struct opp_table *opp_table)
{
- if (unlikely(!opp_table))
- return;
-
/*
* Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
* used in parallel.
*/
mutex_lock(&opp_table->genpd_virt_dev_lock);
- _opp_detach_genpd(opp_table);
+ _detach_genpd(opp_table);
mutex_unlock(&opp_table->genpd_virt_dev_lock);
-
- dev_pm_opp_put_opp_table(opp_table);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
-static void devm_pm_opp_detach_genpd(void *data)
+static void _opp_clear_config(struct opp_config_data *data)
{
- dev_pm_opp_detach_genpd(data);
+ if (data->flags & OPP_CONFIG_GENPD)
+ _opp_detach_genpd(data->opp_table);
+ if (data->flags & OPP_CONFIG_REGULATOR)
+ _opp_put_regulators(data->opp_table);
+ if (data->flags & OPP_CONFIG_SUPPORTED_HW)
+ _opp_put_supported_hw(data->opp_table);
+ if (data->flags & OPP_CONFIG_REGULATOR_HELPER)
+ _opp_put_config_regulators_helper(data->opp_table);
+ if (data->flags & OPP_CONFIG_PROP_NAME)
+ _opp_put_prop_name(data->opp_table);
+ if (data->flags & OPP_CONFIG_CLK)
+ _opp_put_clknames(data->opp_table);
+
+ dev_pm_opp_put_opp_table(data->opp_table);
+ kfree(data);
}
/**
- * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual
- * device pointer
- * @dev: Consumer device for which the genpd is getting attached.
- * @names: Null terminated array of pointers containing names of genpd to attach.
- * @virt_devs: Pointer to return the array of virtual devices.
+ * dev_pm_opp_set_config() - Set OPP configuration for the device.
+ * @dev: Device for which configuration is being set.
+ * @config: OPP configuration.
*
- * This is a resource-managed version of dev_pm_opp_attach_genpd().
+ * This allows all device OPP configurations to be performed at once.
*
- * Return: 0 on success and errorno otherwise.
+ * This must be called before any OPPs are initialized for the device. This may
+ * be called multiple times for the same OPP table, for example once for each
+ * CPU that share the same table. This must be balanced by the same number of
+ * calls to dev_pm_opp_clear_config() in order to free the OPP table properly.
+ *
+ * This returns a token to the caller, which must be passed to
+ * dev_pm_opp_clear_config() to free the resources later. The value of the
+ * returned token will be >= 1 for success and negative for errors. The minimum
+ * value of 1 is chosen here to make it easy for callers to manage the resource.
*/
-int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names,
- struct device ***virt_devs)
+int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
struct opp_table *opp_table;
+ struct opp_config_data *data;
+ unsigned int id;
+ int ret;
- opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs);
- if (IS_ERR(opp_table))
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ opp_table = _add_opp_table(dev, false);
+ if (IS_ERR(opp_table)) {
+ kfree(data);
return PTR_ERR(opp_table);
+ }
+
+ data->opp_table = opp_table;
+ data->flags = 0;
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Configure clocks */
+ if (config->clk_names) {
+ ret = _opp_set_clknames(opp_table, dev, config->clk_names,
+ config->config_clks);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_CLK;
+ } else if (config->config_clks) {
+ /* Don't allow config callback without clocks */
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Configure property names */
+ if (config->prop_name) {
+ ret = _opp_set_prop_name(opp_table, config->prop_name);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_PROP_NAME;
+ }
+
+ /* Configure config_regulators helper */
+ if (config->config_regulators) {
+ ret = _opp_set_config_regulators_helper(opp_table, dev,
+ config->config_regulators);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_REGULATOR_HELPER;
+ }
- return devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd,
- opp_table);
+ /* Configure supported hardware */
+ if (config->supported_hw) {
+ ret = _opp_set_supported_hw(opp_table, config->supported_hw,
+ config->supported_hw_count);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_SUPPORTED_HW;
+ }
+
+ /* Configure supplies */
+ if (config->regulator_names) {
+ ret = _opp_set_regulators(opp_table, dev,
+ config->regulator_names);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_REGULATOR;
+ }
+
+ /* Attach genpds */
+ if (config->genpd_names) {
+ ret = _opp_attach_genpd(opp_table, dev, config->genpd_names,
+ config->virt_devs);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_GENPD;
+ }
+
+ ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
+ GFP_KERNEL);
+ if (ret)
+ goto err;
+
+ return id;
+
+err:
+ _opp_clear_config(data);
+ return ret;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd);
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_config);
+
+/**
+ * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration.
+ * @opp_table: OPP table returned from dev_pm_opp_set_config().
+ *
+ * This allows all device OPP configurations to be cleared at once. This must be
+ * called once for each call made to dev_pm_opp_set_config(), in order to free
+ * the OPPs properly.
+ *
+ * Currently the first call itself ends up freeing all the OPP configurations,
+ * while the later ones only drop the OPP table reference. This works well for
+ * now as we would never want to use an half initialized OPP table and want to
+ * remove the configurations together.
+ */
+void dev_pm_opp_clear_config(int token)
+{
+ struct opp_config_data *data;
+
+ /*
+ * This lets the callers call this unconditionally and keep their code
+ * simple.
+ */
+ if (unlikely(token <= 0))
+ return;
+
+ data = xa_erase(&opp_configs, token);
+ if (WARN_ON(!data))
+ return;
+
+ _opp_clear_config(data);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config);
+
+static void devm_pm_opp_config_release(void *token)
+{
+ dev_pm_opp_clear_config((unsigned long)token);
+}
+
+/**
+ * devm_pm_opp_set_config() - Set OPP configuration for the device.
+ * @dev: Device for which configuration is being set.
+ * @config: OPP configuration.
+ *
+ * This allows all device OPP configurations to be performed at once.
+ * This is a resource-managed variant of dev_pm_opp_set_config().
+ *
+ * Return: 0 on success and errorno otherwise.
+ */
+int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
+{
+ int token = dev_pm_opp_set_config(dev, config);
+
+ if (token < 0)
+ return token;
+
+ return devm_add_action_or_reset(dev, devm_pm_opp_config_release,
+ (void *) ((unsigned long) token));
+}
+EXPORT_SYMBOL_GPL(devm_pm_opp_set_config);
/**
* dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
@@ -2795,11 +2792,16 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
return r;
}
+ if (!assert_single_clk(opp_table)) {
+ r = -EINVAL;
+ goto put_table;
+ }
+
mutex_lock(&opp_table->lock);
/* Do we have the frequency? */
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rate == freq) {
+ if (tmp_opp->rates[0] == freq) {
opp = tmp_opp;
break;
}
@@ -2866,11 +2868,16 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
return r;
}
+ if (!assert_single_clk(opp_table)) {
+ r = -EINVAL;
+ goto put_table;
+ }
+
mutex_lock(&opp_table->lock);
/* Do we have the frequency? */
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rate == freq) {
+ if (tmp_opp->rates[0] == freq) {
opp = tmp_opp;
break;
}
@@ -2897,11 +2904,11 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
opp);
dev_pm_opp_put(opp);
- goto adjust_put_table;
+ goto put_table;
adjust_unlock:
mutex_unlock(&opp_table->lock);
-adjust_put_table:
+put_table:
dev_pm_opp_put_opp_table(opp_table);
return r;
}
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 5004335cf0de..3c3506021501 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -41,7 +41,7 @@
* the table if any of the mentioned functions have been invoked in the interim.
*/
int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
+ struct cpufreq_frequency_table **opp_table)
{
struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table = NULL;
@@ -76,7 +76,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
freq_table[i].driver_data = i;
freq_table[i].frequency = CPUFREQ_TABLE_END;
- *table = &freq_table[0];
+ *opp_table = &freq_table[0];
out:
if (ret)
@@ -94,13 +94,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
* Free up the table allocated by dev_pm_opp_init_cpufreq_table
*/
void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
+ struct cpufreq_frequency_table **opp_table)
{
- if (!table)
+ if (!opp_table)
return;
- kfree(*table);
- *table = NULL;
+ kfree(*opp_table);
+ *opp_table = NULL;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 1b6e5c55c3ed..96a30a032c5f 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -74,6 +74,24 @@ static void opp_debug_create_bw(struct dev_pm_opp *opp,
}
}
+static void opp_debug_create_clks(struct dev_pm_opp *opp,
+ struct opp_table *opp_table,
+ struct dentry *pdentry)
+{
+ char name[12];
+ int i;
+
+ if (opp_table->clk_count == 1) {
+ debugfs_create_ulong("rate_hz", S_IRUGO, pdentry, &opp->rates[0]);
+ return;
+ }
+
+ for (i = 0; i < opp_table->clk_count; i++) {
+ snprintf(name, sizeof(name), "rate_hz_%d", i);
+ debugfs_create_ulong(name, S_IRUGO, pdentry, &opp->rates[i]);
+ }
+}
+
static void opp_debug_create_supplies(struct dev_pm_opp *opp,
struct opp_table *opp_table,
struct dentry *pdentry)
@@ -117,10 +135,11 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
* Get directory name for OPP.
*
* - Normally rate is unique to each OPP, use it to get unique opp-name.
- * - For some devices rate isn't available, use index instead.
+ * - For some devices rate isn't available or there are multiple, use
+ * index instead for them.
*/
- if (likely(opp->rate))
- id = opp->rate;
+ if (likely(opp_table->clk_count == 1 && opp->rates[0]))
+ id = opp->rates[0];
else
id = _get_opp_count(opp_table);
@@ -134,7 +153,6 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
- debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate);
debugfs_create_u32("level", S_IRUGO, d, &opp->level);
debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
&opp->clock_latency_ns);
@@ -142,6 +160,7 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
opp->of_name = of_node_full_name(opp->np);
debugfs_create_str("of_name", S_IRUGO, d, (char **)&opp->of_name);
+ opp_debug_create_clks(opp, opp_table, d);
opp_debug_create_supplies(opp, opp_table, d);
opp_debug_create_bw(opp, opp_table, d);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index eb89c9a75985..605d68673f92 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -242,20 +242,20 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
opp_table->np = opp_np;
_opp_table_alloc_required_tables(opp_table, dev, opp_np);
- of_node_put(opp_np);
}
void _of_clear_opp_table(struct opp_table *opp_table)
{
_opp_table_free_required_tables(opp_table);
+ of_node_put(opp_table->np);
}
/*
* Release all resources previously acquired with a call to
* _of_opp_alloc_required_opps().
*/
-void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp)
+static void _of_opp_free_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp)
{
struct dev_pm_opp **required_opps = opp->required_opps;
int i;
@@ -275,6 +275,12 @@ void _of_opp_free_required_opps(struct opp_table *opp_table,
kfree(required_opps);
}
+void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp)
+{
+ _of_opp_free_required_opps(opp_table, opp);
+ of_node_put(opp->np);
+}
+
/* Populate all required OPPs which are part of "required-opps" list */
static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
struct dev_pm_opp *opp)
@@ -767,7 +773,51 @@ void dev_pm_opp_of_remove_table(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
+static int _read_rate(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
+ struct device_node *np)
+{
+ struct property *prop;
+ int i, count, ret;
+ u64 *rates;
+
+ prop = of_find_property(np, "opp-hz", NULL);
+ if (!prop)
+ return -ENODEV;
+
+ count = prop->length / sizeof(u64);
+ if (opp_table->clk_count != count) {
+ pr_err("%s: Count mismatch between opp-hz and clk_count (%d %d)\n",
+ __func__, count, opp_table->clk_count);
+ return -EINVAL;
+ }
+
+ rates = kmalloc_array(count, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return -ENOMEM;
+
+ ret = of_property_read_u64_array(np, "opp-hz", rates, count);
+ if (ret) {
+ pr_err("%s: Error parsing opp-hz: %d\n", __func__, ret);
+ } else {
+ /*
+ * Rate is defined as an unsigned long in clk API, and so
+ * casting explicitly to its type. Must be fixed once rate is 64
+ * bit guaranteed in clk API.
+ */
+ for (i = 0; i < count; i++) {
+ new_opp->rates[i] = (unsigned long)rates[i];
+
+ /* This will happen for frequencies > 4.29 GHz */
+ WARN_ON(new_opp->rates[i] != rates[i]);
+ }
+ }
+
+ kfree(rates);
+
+ return ret;
+}
+
+static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
struct device_node *np, bool peak)
{
const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
@@ -780,9 +830,9 @@ static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
return -ENODEV;
count = prop->length / sizeof(u32);
- if (table->path_count != count) {
+ if (opp_table->path_count != count) {
pr_err("%s: Mismatch between %s and paths (%d %d)\n",
- __func__, name, count, table->path_count);
+ __func__, name, count, opp_table->path_count);
return -EINVAL;
}
@@ -808,34 +858,27 @@ out:
return ret;
}
-static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table,
- struct device_node *np, bool *rate_not_available)
+static int _read_opp_key(struct dev_pm_opp *new_opp,
+ struct opp_table *opp_table, struct device_node *np)
{
bool found = false;
- u64 rate;
int ret;
- ret = of_property_read_u64(np, "opp-hz", &rate);
- if (!ret) {
- /*
- * Rate is defined as an unsigned long in clk API, and so
- * casting explicitly to its type. Must be fixed once rate is 64
- * bit guaranteed in clk API.
- */
- new_opp->rate = (unsigned long)rate;
+ ret = _read_rate(new_opp, opp_table, np);
+ if (!ret)
found = true;
- }
- *rate_not_available = !!ret;
+ else if (ret != -ENODEV)
+ return ret;
/*
* Bandwidth consists of peak and average (optional) values:
* opp-peak-kBps = <path1_value path2_value>;
* opp-avg-kBps = <path1_value path2_value>;
*/
- ret = _read_bw(new_opp, table, np, true);
+ ret = _read_bw(new_opp, opp_table, np, true);
if (!ret) {
found = true;
- ret = _read_bw(new_opp, table, np, false);
+ ret = _read_bw(new_opp, opp_table, np, false);
}
/* The properties were found but we failed to parse them */
@@ -881,13 +924,12 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
struct dev_pm_opp *new_opp;
u32 val;
int ret;
- bool rate_not_available = false;
new_opp = _opp_allocate(opp_table);
if (!new_opp)
return ERR_PTR(-ENOMEM);
- ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
+ ret = _read_opp_key(new_opp, opp_table, np);
if (ret < 0) {
dev_err(dev, "%s: opp key field not found\n", __func__);
goto free_opp;
@@ -895,14 +937,14 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
- dev_dbg(dev, "OPP not supported by hardware: %lu\n",
- new_opp->rate);
+ dev_dbg(dev, "OPP not supported by hardware: %s\n",
+ of_node_full_name(np));
goto free_opp;
}
new_opp->turbo = of_property_read_bool(np, "turbo-mode");
- new_opp->np = np;
+ new_opp->np = of_node_get(np);
new_opp->dynamic = false;
new_opp->available = true;
@@ -920,7 +962,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
if (opp_table->is_genpd)
new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
- ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
@@ -931,8 +973,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
if (opp_table->suspend_opp) {
- /* Pick the OPP with higher rate as suspend OPP */
- if (new_opp->rate > opp_table->suspend_opp->rate) {
+ /* Pick the OPP with higher rate/bw/level as suspend OPP */
+ if (_opp_compare_key(opp_table, new_opp, opp_table->suspend_opp) == 1) {
opp_table->suspend_opp->suspend = false;
new_opp->suspend = true;
opp_table->suspend_opp = new_opp;
@@ -947,7 +989,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu level:%u\n",
- __func__, new_opp->turbo, new_opp->rate,
+ __func__, new_opp->turbo, new_opp->rates[0],
new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns,
new_opp->level);
@@ -1084,7 +1126,7 @@ remove_static_opp:
return ret;
}
-static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
+static int _of_add_table_indexed(struct device *dev, int index)
{
struct opp_table *opp_table;
int ret, count;
@@ -1100,7 +1142,7 @@ static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
index = 0;
}
- opp_table = _add_opp_table_indexed(dev, index, getclk);
+ opp_table = _add_opp_table_indexed(dev, index, true);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@@ -1124,11 +1166,11 @@ static void devm_pm_opp_of_table_release(void *data)
dev_pm_opp_of_remove_table(data);
}
-static int _devm_of_add_table_indexed(struct device *dev, int index, bool getclk)
+static int _devm_of_add_table_indexed(struct device *dev, int index)
{
int ret;
- ret = _of_add_table_indexed(dev, index, getclk);
+ ret = _of_add_table_indexed(dev, index);
if (ret)
return ret;
@@ -1156,7 +1198,7 @@ static int _devm_of_add_table_indexed(struct device *dev, int index, bool getclk
*/
int devm_pm_opp_of_add_table(struct device *dev)
{
- return _devm_of_add_table_indexed(dev, 0, true);
+ return _devm_of_add_table_indexed(dev, 0);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
@@ -1179,7 +1221,7 @@ EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
*/
int dev_pm_opp_of_add_table(struct device *dev)
{
- return _of_add_table_indexed(dev, 0, true);
+ return _of_add_table_indexed(dev, 0);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
@@ -1195,7 +1237,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
*/
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return _of_add_table_indexed(dev, index, true);
+ return _of_add_table_indexed(dev, index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
@@ -1208,42 +1250,10 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
*/
int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return _devm_of_add_table_indexed(dev, index, true);
+ return _devm_of_add_table_indexed(dev, index);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_indexed);
-/**
- * dev_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
- * tree without getting clk for device.
- * @dev: device pointer used to lookup OPP table.
- * @index: Index number.
- *
- * Register the initial OPP table with the OPP library for given device only
- * using the "operating-points-v2" property. Do not try to get the clk for the
- * device.
- *
- * Return: Refer to dev_pm_opp_of_add_table() for return values.
- */
-int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
-{
- return _of_add_table_indexed(dev, index, false);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_noclk);
-
-/**
- * devm_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
- * tree without getting clk for device.
- * @dev: device pointer used to lookup OPP table.
- * @index: Index number.
- *
- * This is a resource-managed variant of dev_pm_opp_of_add_table_noclk().
- */
-int devm_pm_opp_of_add_table_noclk(struct device *dev, int index)
-{
- return _devm_of_add_table_indexed(dev, index, false);
-}
-EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_noclk);
-
/* CPU device specific helpers */
/**
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 45e3a55239a1..3a6e077df386 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -28,6 +28,27 @@ extern struct mutex opp_table_lock;
extern struct list_head opp_tables, lazy_opp_tables;
+/* OPP Config flags */
+#define OPP_CONFIG_CLK BIT(0)
+#define OPP_CONFIG_REGULATOR BIT(1)
+#define OPP_CONFIG_REGULATOR_HELPER BIT(2)
+#define OPP_CONFIG_PROP_NAME BIT(3)
+#define OPP_CONFIG_SUPPORTED_HW BIT(4)
+#define OPP_CONFIG_GENPD BIT(5)
+
+/**
+ * struct opp_config_data - data for set config operations
+ * @opp_table: OPP table
+ * @flags: OPP config flags
+ *
+ * This structure stores the OPP config information for each OPP table
+ * configuration by the callers.
+ */
+struct opp_config_data {
+ struct opp_table *opp_table;
+ unsigned int flags;
+};
+
/*
* Internal data structure organization with the OPP layer library is as
* follows:
@@ -58,7 +79,7 @@ extern struct list_head opp_tables, lazy_opp_tables;
* @suspend: true if suspend OPP
* @removed: flag indicating that OPP's reference is dropped by OPP core.
* @pstate: Device's power domain's performance state.
- * @rate: Frequency in hertz
+ * @rates: Frequencies in hertz
* @level: Performance level
* @supplies: Power supplies voltage/current values
* @bandwidth: Interconnect bandwidth values
@@ -81,7 +102,7 @@ struct dev_pm_opp {
bool suspend;
bool removed;
unsigned int pstate;
- unsigned long rate;
+ unsigned long *rates;
unsigned int level;
struct dev_pm_opp_supply *supplies;
@@ -138,7 +159,7 @@ enum opp_table_access {
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
- * @current_rate: Currently configured frequency.
+ * @rate_clk_single: Currently configured frequency for single clk.
* @current_opp: Currently configured OPP for the table.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
@@ -149,7 +170,11 @@ enum opp_table_access {
* @supported_hw: Array of version number to support.
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
- * @clk: Device's clock handle
+ * @config_clks: Platform specific config_clks() callback.
+ * @clks: Device's clock handles, for multiple clocks.
+ * @clk: Device's clock handle, for single clock.
+ * @clk_count: Number of clocks.
+ * @config_regulators: Platform specific config_regulators() callback.
* @regulators: Supply regulators
* @regulator_count: Number of power supply regulators. Its value can be -1
* (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
@@ -159,9 +184,6 @@ enum opp_table_access {
* @enabled: Set to true if the device's resources are enabled/configured.
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
- * @set_opp: Platform specific set_opp callback
- * @sod_supplies: Set opp data supplies
- * @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -188,7 +210,7 @@ struct opp_table {
unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
- unsigned long current_rate;
+ unsigned long rate_clk_single;
struct dev_pm_opp *current_opp;
struct dev_pm_opp *suspend_opp;
@@ -200,7 +222,11 @@ struct opp_table {
unsigned int *supported_hw;
unsigned int supported_hw_count;
const char *prop_name;
+ config_clks_t config_clks;
+ struct clk **clks;
struct clk *clk;
+ int clk_count;
+ config_regulators_t config_regulators;
struct regulator **regulators;
int regulator_count;
struct icc_path **paths;
@@ -209,10 +235,6 @@ struct opp_table {
bool genpd_performance_state;
bool is_genpd;
- int (*set_opp)(struct dev_pm_set_opp_data *data);
- struct dev_pm_opp_supply *sod_supplies;
- struct dev_pm_set_opp_data *set_opp_data;
-
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
char dentry_name[NAME_MAX];
@@ -228,8 +250,8 @@ struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
-int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
-int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
+int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk);
@@ -245,14 +267,12 @@ static inline bool lazy_linking_pending(struct opp_table *opp_table)
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);
void _of_clear_opp_table(struct opp_table *opp_table);
struct opp_table *_managed_opp(struct device *dev, int index);
-void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp);
+void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp);
#else
static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) {}
static inline void _of_clear_opp_table(struct opp_table *opp_table) {}
static inline struct opp_table *_managed_opp(struct device *dev, int index) { return NULL; }
-static inline void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp) {}
+static inline void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) {}
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index bd4771f388ab..8f3f13fbbb25 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -36,11 +36,15 @@ struct ti_opp_supply_optimum_voltage_table {
* @vdd_table: Optimized voltage mapping table
* @num_vdd_table: number of entries in vdd_table
* @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply
+ * @old_supplies: Placeholder for supplies information for old OPP.
+ * @new_supplies: Placeholder for supplies information for new OPP.
*/
struct ti_opp_supply_data {
struct ti_opp_supply_optimum_voltage_table *vdd_table;
u32 num_vdd_table;
u32 vdd_absolute_max_voltage_uv;
+ struct dev_pm_opp_supply old_supplies[2];
+ struct dev_pm_opp_supply new_supplies[2];
};
static struct ti_opp_supply_data opp_data;
@@ -266,27 +270,32 @@ static int _opp_set_voltage(struct device *dev,
return 0;
}
-/**
- * ti_opp_supply_set_opp() - do the opp supply transition
- * @data: information on regulators and new and old opps provided by
- * opp core to use in transition
- *
- * Return: If successful, 0, else appropriate error value.
- */
-static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
+/* Do the opp supply transition */
+static int ti_opp_config_regulators(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count)
{
- struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
- struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1];
- struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
- struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1];
- struct device *dev = data->dev;
- unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
- struct clk *clk = data->clk;
- struct regulator *vdd_reg = data->regulators[0];
- struct regulator *vbb_reg = data->regulators[1];
+ struct dev_pm_opp_supply *old_supply_vdd = &opp_data.old_supplies[0];
+ struct dev_pm_opp_supply *old_supply_vbb = &opp_data.old_supplies[1];
+ struct dev_pm_opp_supply *new_supply_vdd = &opp_data.new_supplies[0];
+ struct dev_pm_opp_supply *new_supply_vbb = &opp_data.new_supplies[1];
+ struct regulator *vdd_reg = regulators[0];
+ struct regulator *vbb_reg = regulators[1];
+ unsigned long old_freq, freq;
int vdd_uv;
int ret;
+ /* We must have two regulators here */
+ WARN_ON(count != 2);
+
+ /* Fetch supplies and freq information from OPP core */
+ ret = dev_pm_opp_get_supplies(new_opp, opp_data.new_supplies);
+ WARN_ON(ret);
+
+ old_freq = dev_pm_opp_get_freq(old_opp);
+ freq = dev_pm_opp_get_freq(new_opp);
+ WARN_ON(!old_freq || !freq);
+
vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
new_supply_vdd->u_volt);
@@ -303,39 +312,24 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
if (ret)
goto restore_voltage;
- }
-
- /* Change frequency */
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
- __func__, old_freq, freq);
-
- ret = clk_set_rate(clk, freq);
- if (ret) {
- dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
- ret);
- goto restore_voltage;
- }
-
- /* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
+ } else {
ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
if (ret)
- goto restore_freq;
+ goto restore_voltage;
ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
"vdd");
if (ret)
- goto restore_freq;
+ goto restore_voltage;
}
return 0;
-restore_freq:
- ret = clk_set_rate(clk, old_freq);
- if (ret)
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
restore_voltage:
+ /* Fetch old supplies information only if required */
+ ret = dev_pm_opp_get_supplies(old_opp, opp_data.old_supplies);
+ WARN_ON(ret);
+
/* This shouldn't harm even if the voltages weren't updated earlier */
if (old_supply_vdd->u_volt) {
ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb");
@@ -405,9 +399,8 @@ static int ti_opp_supply_probe(struct platform_device *pdev)
return ret;
}
- ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev,
- ti_opp_supply_set_opp));
- if (ret)
+ ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators);
+ if (ret < 0)
_free_optimized_voltages(dev, &opp_data);
return ret;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 9be007c9420f..f223afe47d10 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -268,7 +268,7 @@ static int ioc_count;
* Each bit can represent a number of pages.
* LSbs represent lower addresses (IOVA's).
*
-* This was was copied from sba_iommu.c. Don't try to unify
+* This was copied from sba_iommu.c. Don't try to unify
* the two resource managers unless a way to have different
* allocation policies is also adjusted. We'd like to avoid
* I/O TLB thrashing by having resource allocation policy
@@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
}
}
-static void __init ccio_init_resources(struct ioc *ioc)
+static int __init ccio_init_resources(struct ioc *ioc)
{
struct resource *res = ioc->mmio_region;
char *name = kmalloc(14, GFP_KERNEL);
-
+ if (unlikely(!name))
+ return -ENOMEM;
snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
+ return 0;
}
static int new_ioc_area(struct resource *res, unsigned long size,
@@ -1543,7 +1545,10 @@ static int __init ccio_probe(struct parisc_device *dev)
return -ENOMEM;
}
ccio_ioc_init(ioc);
- ccio_init_resources(ioc);
+ if (ccio_init_resources(ioc)) {
+ kfree(ioc);
+ return -ENOMEM;
+ }
hppa_dma_ops = &ccio_ops;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 732b516c7bf8..afc6e66ddc31 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1476,9 +1476,13 @@ lba_driver_probe(struct parisc_device *dev)
u32 func_class;
void *tmp_obj;
char *version;
- void __iomem *addr = ioremap(dev->hpa.start, 4096);
+ void __iomem *addr;
int max;
+ addr = ioremap(dev->hpa.start, 4096);
+ if (addr == NULL)
+ return -ENOMEM;
+
/* Read HW Rev First */
func_class = READ_REG32(addr + LBA_FCLASS);
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 1e4a5663d011..d4be9d2ee74d 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -646,7 +646,7 @@ int lcd_print( const char *str )
cancel_delayed_work_sync(&led_task);
/* copy display string to buffer for procfs */
- strlcpy(lcd_text, str, sizeof(lcd_text));
+ strscpy(lcd_text, str, sizeof(lcd_text));
/* Set LCD Cursor to 1st character */
gsc_writeb(lcd_info.reset_cmd1, LCD_CMD_REG);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 133c73207782..55c028af4bd9 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -121,6 +121,9 @@ config XEN_PCIDEV_FRONTEND
config PCI_ATS
bool
+config PCI_DOE
+ bool
+
config PCI_ECAM
bool
@@ -164,6 +167,11 @@ config PCI_PASID
config PCI_P2PDMA
bool "PCI peer-to-peer transfer support"
depends on ZONE_DEVICE
+ #
+ # The need for the scatterlist DMA bus address flag means PCI P2PDMA
+ # requires 64bit
+ #
+ depends on 64BIT
select GENERIC_ALLOCATOR
help
Enableѕ drivers to do PCI peer-to-peer transactions to and from
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 0da6b1ebc694..2680e4c92f0a 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
+obj-$(CONFIG_PCI_DOE) += doe.o
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index b8d96d38064d..d1c5fcf00a8a 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -237,7 +237,7 @@ config PCIE_ROCKCHIP_EP
config PCIE_MEDIATEK
tristate "MediaTek PCIe controller"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
help
@@ -293,7 +293,7 @@ config PCI_HYPERV_INTERFACE
config PCI_LOONGSON
bool "LOONGSON PCI Controller"
depends on MACH_LOONGSON64 || COMPILE_TEST
- depends on OF
+ depends on OF || ACPI
depends on PCI_QUIRKS
default MACH_LOONGSON64
help
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 52767f26048f..13c4032ca379 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -243,7 +243,6 @@ err_phy:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
static int cdns_pcie_suspend_noirq(struct device *dev)
{
struct cdns_pcie *pcie = dev_get_drvdata(dev);
@@ -266,9 +265,8 @@ static int cdns_pcie_resume_noirq(struct device *dev)
return 0;
}
-#endif
const struct dev_pm_ops cdns_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
- cdns_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
+ cdns_pcie_resume_noirq)
};
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index dfcdeb432dc8..38462ed11d07 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -178,7 +178,7 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
dra7xx_pcie_enable_msi_interrupts(dra7xx);
}
-static int dra7xx_pcie_host_init(struct pcie_port *pp)
+static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
@@ -202,7 +202,7 @@ static const struct irq_domain_ops intx_domain_ops = {
.xlate = pci_irqd_intx_xlate,
};
-static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
+static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned long val;
@@ -224,7 +224,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
return 1;
}
-static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
+static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
int ret, i, count, num_ctrls;
@@ -255,8 +255,8 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct dra7xx_pcie *dra7xx;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
- struct pcie_port *pp;
unsigned long reg;
u32 bit;
@@ -344,7 +344,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
@@ -475,7 +475,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
{
int ret;
struct dw_pcie *pci = dra7xx->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
pp->irq = platform_get_irq(pdev, 1);
@@ -483,7 +483,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return pp->irq;
/* MSI IRQ is muxed */
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
@@ -862,7 +862,6 @@ err_link:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
static int dra7xx_pcie_suspend(struct device *dev)
{
struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
@@ -919,7 +918,6 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)
return 0;
}
-#endif
static void dra7xx_pcie_shutdown(struct platform_device *pdev)
{
@@ -940,9 +938,9 @@ static void dra7xx_pcie_shutdown(struct platform_device *pdev)
}
static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
- dra7xx_pcie_resume_noirq)
+ SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+ dra7xx_pcie_resume_noirq)
};
static struct platform_driver dra7xx_pcie_driver = {
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 467c8d1cd7e4..ec5611005566 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -249,7 +249,7 @@ static int exynos_pcie_link_up(struct dw_pcie *pci)
return (val & PCIE_ELBI_XMLH_LINKUP);
}
-static int exynos_pcie_host_init(struct pcie_port *pp)
+static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct exynos_pcie *ep = to_exynos_pcie(pci);
@@ -258,9 +258,8 @@ static int exynos_pcie_host_init(struct pcie_port *pp)
exynos_pcie_assert_core_reset(ep);
- phy_reset(ep->phy);
- phy_power_on(ep->phy);
phy_init(ep->phy);
+ phy_power_on(ep->phy);
exynos_pcie_deassert_core_reset(ep);
exynos_pcie_enable_irq_pulse(ep);
@@ -276,7 +275,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,
struct platform_device *pdev)
{
struct dw_pcie *pci = &ep->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -292,7 +291,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,
}
pp->ops = &exynos_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -390,7 +389,7 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)
+static int exynos_pcie_suspend_noirq(struct device *dev)
{
struct exynos_pcie *ep = dev_get_drvdata(dev);
@@ -402,11 +401,11 @@ static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)
return 0;
}
-static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
+static int exynos_pcie_resume_noirq(struct device *dev)
{
struct exynos_pcie *ep = dev_get_drvdata(dev);
struct dw_pcie *pci = &ep->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
@@ -421,8 +420,8 @@ static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops exynos_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
- exynos_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
+ exynos_pcie_resume_noirq)
};
static const struct of_device_id exynos_pcie_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 7a285fb0f619..6e5debdbc55b 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -67,6 +67,7 @@ struct imx6_pcie {
struct dw_pcie *pci;
int reset_gpio;
bool gpio_active_high;
+ bool link_is_up;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie_inbound_axi;
@@ -146,6 +147,31 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
+static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+{
+ WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
+ imx6_pcie->drvdata->variant != IMX8MM);
+ return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+}
+
+static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+{
+ unsigned int mask, val;
+
+ if (imx6_pcie->drvdata->variant == IMX8MQ &&
+ imx6_pcie->controller_id == 1) {
+ mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+ val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT);
+ } else {
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT);
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+}
+
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -271,6 +297,134 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
+static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ /*
+ * The PHY initialization had been done in the PHY
+ * driver, break here directly.
+ */
+ break;
+ case IMX8MQ:
+ /*
+ * TODO: Currently this code assumes external
+ * oscillator is being used
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Regarding the datasheet, the PCIE_VPH is suggested
+ * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
+ * VREG_BYPASS should be cleared to zero.
+ */
+ if (imx6_pcie->vph &&
+ regulator_get_voltage(imx6_pcie->vph) > 3000000)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+ break;
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK,
+ IMX6SX_GPR12_PCIE_RX_EQ_2);
+ fallthrough;
+ default:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
+ break;
+ }
+
+ imx6_pcie_configure_type(imx6_pcie);
+}
+
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+{
+ u32 val;
+ struct device *dev = imx6_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ IOMUXC_GPR22, val,
+ val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT))
+ dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
+static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+{
+ unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ int mult, div;
+ u16 val;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ return 0;
+
+ switch (phy_rate) {
+ case 125000000:
+ /*
+ * The default settings of the MPLL are for a 125MHz input
+ * clock, so no need to reconfigure anything in that case.
+ */
+ return 0;
+ case 100000000:
+ mult = 25;
+ div = 0;
+ break;
+ case 200000000:
+ mult = 25;
+ div = 1;
+ break;
+ default:
+ dev_err(imx6_pcie->pci->dev,
+ "Unsupported PHY reference clock rate %lu\n", phy_rate);
+ return -EINVAL;
+ }
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+ PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+ val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+ val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+ PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+ val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+ val |= PCIE_PHY_ATEOVRD_EN;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+
+ return 0;
+}
+
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
u16 tmp;
@@ -367,61 +521,6 @@ static int imx6_pcie_attach_pd(struct device *dev)
return 0;
}
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
-{
- struct device *dev = imx6_pcie->pci->dev;
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- case IMX8MQ:
- reset_control_assert(imx6_pcie->pciephy_reset);
- fallthrough;
- case IMX8MM:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- break;
- }
-
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- int ret = regulator_disable(imx6_pcie->vpcie);
-
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
-
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio))
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
-}
-
-static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
-{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MM);
- return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
-}
-
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -482,38 +581,44 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
return ret;
}
-static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
{
- u32 val;
- struct device *dev = imx6_pcie->pci->dev;
-
- if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
- IOMUXC_GPR22, val,
- val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
- PHY_PLL_LOCK_WAIT_USLEEP_MAX,
- PHY_PLL_LOCK_WAIT_TIMEOUT))
- dev_err(dev, "PCIe PLL lock timeout\n");
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ break;
+ case IMX6QP:
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD,
+ IMX6Q_GPR1_PCIE_TEST_PD);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ break;
+ case IMX8MM:
+ case IMX8MQ:
+ clk_disable_unprepare(imx6_pcie->pcie_aux);
+ break;
+ default:
+ break;
+ }
}
-static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
struct device *dev = pci->dev;
int ret;
- if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
- ret = regulator_enable(imx6_pcie->vpcie);
- if (ret) {
- dev_err(dev, "failed to enable vpcie regulator: %d\n",
- ret);
- return;
- }
- }
-
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
dev_err(dev, "unable to enable pcie_phy clock\n");
- goto err_pcie_phy;
+ return ret;
}
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
@@ -534,25 +639,75 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
goto err_ref_clk;
}
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+ return 0;
+
+err_ref_clk:
+ clk_disable_unprepare(imx6_pcie->pcie);
+err_pcie:
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+err_pcie_bus:
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+
+ return ret;
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+ imx6_pcie_disable_ref_clk(imx6_pcie);
+ clk_disable_unprepare(imx6_pcie->pcie);
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+}
+
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+{
switch (imx6_pcie->drvdata->variant) {
+ case IMX7D:
+ case IMX8MQ:
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ fallthrough;
case IMX8MM:
- if (phy_power_on(imx6_pcie->phy))
- dev_err(dev, "unable to power on PHY\n");
+ reset_control_assert(imx6_pcie->apps_reset);
break;
- default:
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST,
+ IMX6Q_GPR1_PCIE_SW_RST);
+ break;
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
}
- /* allow the clocks to stabilize */
- usleep_range(200, 500);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio))
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
+}
+
+static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ:
reset_control_deassert(imx6_pcie->pciephy_reset);
break;
- case IMX8MM:
- if (phy_init(imx6_pcie->phy))
- dev_err(dev, "waiting for phy ready timeout!\n");
- break;
case IMX7D:
reset_control_deassert(imx6_pcie->pciephy_reset);
@@ -588,6 +743,7 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
usleep_range(200, 500);
break;
case IMX6Q: /* Nothing to do */
+ case IMX8MM:
break;
}
@@ -600,153 +756,6 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
msleep(100);
}
- return;
-
-err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
-err_pcie_phy:
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- ret = regulator_disable(imx6_pcie->vpcie);
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
-}
-
-static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
-{
- unsigned int mask, val;
-
- if (imx6_pcie->drvdata->variant == IMX8MQ &&
- imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
- } else {
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
- }
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
-}
-
-static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
-{
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- /*
- * The PHY initialization had been done in the PHY
- * driver, break here directly.
- */
- break;
- case IMX8MQ:
- /*
- * TODO: Currently this code assumes external
- * oscillator is being used
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_REF_USE_PAD,
- IMX8MQ_GPR_PCIE_REF_USE_PAD);
- /*
- * Regarding the datasheet, the PCIE_VPH is suggested
- * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
- * VREG_BYPASS should be cleared to zero.
- */
- if (imx6_pcie->vph &&
- regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_VREG_BYPASS,
- 0);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_RX_EQ_MASK,
- IMX6SX_GPR12_PCIE_RX_EQ_2);
- fallthrough;
- default:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
-
- /* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
- break;
- }
-
- imx6_pcie_configure_type(imx6_pcie);
-}
-
-static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
-{
- unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
- int mult, div;
- u16 val;
-
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
- return 0;
-
- switch (phy_rate) {
- case 125000000:
- /*
- * The default settings of the MPLL are for a 125MHz input
- * clock, so no need to reconfigure anything in that case.
- */
- return 0;
- case 100000000:
- mult = 25;
- div = 0;
- break;
- case 200000000:
- mult = 25;
- div = 1;
- break;
- default:
- dev_err(imx6_pcie->pci->dev,
- "Unsupported PHY reference clock rate %lu\n", phy_rate);
- return -EINVAL;
- }
-
- pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
- val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
- PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
- val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
- val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
- pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
-
- pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
- val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
- PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
- val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
- val |= PCIE_PHY_ATEOVRD_EN;
- pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
-
return 0;
}
@@ -789,6 +798,25 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
}
}
+static void imx6_pcie_ltssm_disable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6Q:
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0);
+ break;
+ case IMX7D:
+ case IMX8MQ:
+ case IMX8MM:
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ }
+}
+
static int imx6_pcie_start_link(struct dw_pcie *pci)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
@@ -802,21 +830,26 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
+ dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
- dw_pcie_wait_for_link(pci);
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
- if (pci->link_gen == 2) {
- /* Allow Gen2 mode after the link is up. */
+ if (pci->link_gen > 1) {
+ /* Allow faster modes after the link is up */
+ dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
+ tmp |= pci->link_gen;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
@@ -826,6 +859,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
if (imx6_pcie->drvdata->flags &
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
@@ -846,34 +880,110 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
}
/* Make sure link training is finished as well! */
- dw_pcie_wait_for_link(pci);
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
} else {
- dev_info(dev, "Link: Gen2 disabled\n");
+ dev_info(dev, "Link: Only Gen1 is enabled\n");
}
+ imx6_pcie->link_is_up = true;
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
+ imx6_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
imx6_pcie_reset_phy(imx6_pcie);
- return ret;
+ return 0;
+}
+
+static void imx6_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+
+ /* Turn off PCIe LTSSM */
+ imx6_pcie_ltssm_disable(dev);
}
-static int imx6_pcie_host_init(struct pcie_port *pp)
+static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ int ret;
+
+ if (imx6_pcie->vpcie) {
+ ret = regulator_enable(imx6_pcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie regulator: %d\n",
+ ret);
+ return ret;
+ }
+ }
imx6_pcie_assert_core_reset(imx6_pcie);
imx6_pcie_init_phy(imx6_pcie);
- imx6_pcie_deassert_core_reset(imx6_pcie);
+
+ ret = imx6_pcie_clk_enable(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
+ goto err_reg_disable;
+ }
+
+ if (imx6_pcie->phy) {
+ ret = phy_power_on(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "pcie PHY power up failed\n");
+ goto err_clk_disable;
+ }
+ }
+
+ ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ if (ret < 0) {
+ dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
+ goto err_phy_off;
+ }
+
+ if (imx6_pcie->phy) {
+ ret = phy_init(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "waiting for PHY ready timeout!\n");
+ goto err_phy_off;
+ }
+ }
imx6_setup_phy_mpll(imx6_pcie);
return 0;
+
+err_phy_off:
+ if (imx6_pcie->phy)
+ phy_power_off(imx6_pcie->phy);
+err_clk_disable:
+ imx6_pcie_clk_disable(imx6_pcie);
+err_reg_disable:
+ if (imx6_pcie->vpcie)
+ regulator_disable(imx6_pcie->vpcie);
+ return ret;
+}
+
+static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ if (imx6_pcie->phy) {
+ if (phy_power_off(imx6_pcie->phy))
+ dev_err(pci->dev, "unable to power off PHY\n");
+ phy_exit(imx6_pcie->phy);
+ }
+ imx6_pcie_clk_disable(imx6_pcie);
+
+ if (imx6_pcie->vpcie)
+ regulator_disable(imx6_pcie->vpcie);
}
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
@@ -884,26 +994,6 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = imx6_pcie_start_link,
};
-#ifdef CONFIG_PM_SLEEP
-static void imx6_pcie_ltssm_disable(struct device *dev)
-{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0);
- break;
- case IMX7D:
- case IMX8MM:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- default:
- dev_err(dev, "ltssm_disable not supported\n");
- }
-}
-
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
struct device *dev = imx6_pcie->pci->dev;
@@ -941,49 +1031,17 @@ pm_turnoff_sleep:
usleep_range(1000, 10000);
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
-{
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- case IMX8MQ:
- case IMX8MM:
- clk_disable_unprepare(imx6_pcie->pcie_aux);
- break;
- default:
- break;
- }
-}
-
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_ltssm_disable(dev);
- imx6_pcie_clk_disable(imx6_pcie);
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- if (phy_power_off(imx6_pcie->phy))
- dev_err(dev, "unable to power off PHY\n");
- phy_exit(imx6_pcie->phy);
- break;
- default:
- break;
- }
+ imx6_pcie_stop_link(imx6_pcie->pci);
+ imx6_pcie_host_exit(pp);
return 0;
}
@@ -992,27 +1050,25 @@ static int imx6_pcie_resume_noirq(struct device *dev)
{
int ret;
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct pcie_port *pp = &imx6_pcie->pci->pp;
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
- imx6_pcie_deassert_core_reset(imx6_pcie);
+ ret = imx6_pcie_host_init(pp);
+ if (ret)
+ return ret;
dw_pcie_setup_rc(pp);
- ret = imx6_pcie_start_link(imx6_pcie->pci);
- if (ret < 0)
- dev_info(dev, "pcie link is down after resume.\n");
+ if (imx6_pcie->link_is_up)
+ imx6_pcie_start_link(imx6_pcie->pci);
return 0;
}
-#endif
static const struct dev_pm_ops imx6_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
- imx6_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
+ imx6_pcie_resume_noirq)
};
static int imx6_pcie_probe(struct platform_device *pdev)
@@ -1291,7 +1347,7 @@ static struct platform_driver imx6_pcie_driver = {
static void imx6_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
/* Bus parent is the PCI bridge, its parent is this platform driver */
if (!bus->dev.parent || !bus->dev.parent->parent)
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index d10e5fd0f83c..78818853af9e 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -109,7 +109,7 @@ struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
const struct dw_pcie_ep_ops *ep_ops;
- unsigned int version;
+ u32 version;
};
struct keystone_pcie {
@@ -147,7 +147,7 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
static void ks_pcie_msi_irq_ack(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
@@ -167,7 +167,7 @@ static void ks_pcie_msi_irq_ack(struct irq_data *data)
static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
struct dw_pcie *pci;
u64 msi_target;
@@ -192,7 +192,7 @@ static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
static void ks_pcie_msi_mask(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
@@ -216,7 +216,7 @@ static void ks_pcie_msi_mask(struct irq_data *data)
static void ks_pcie_msi_unmask(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
@@ -247,7 +247,7 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.irq_unmask = ks_pcie_msi_unmask,
};
-static int ks_pcie_msi_host_init(struct pcie_port *pp)
+static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
@@ -390,7 +390,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
u64 start, end;
struct resource *mem;
int i;
@@ -428,7 +428,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
@@ -456,7 +456,7 @@ static struct pci_ops ks_child_pcie_ops = {
*/
static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
@@ -574,7 +574,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irq;
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 vector, reg, pos;
@@ -799,7 +799,7 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
return 0;
}
-static int __init ks_pcie_host_init(struct pcie_port *pp)
+static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
@@ -1069,19 +1069,19 @@ static int ks_pcie_am654_set_mode(struct device *dev,
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
- .version = 0x365A,
+ .version = DW_PCIE_VER_365A,
};
static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
.host_ops = &ks_pcie_am654_host_ops,
.mode = DW_PCIE_RC_TYPE,
- .version = 0x490A,
+ .version = DW_PCIE_VER_490A,
};
static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
.ep_ops = &ks_pcie_am654_ep_ops,
.mode = DW_PCIE_EP_TYPE,
- .version = 0x490A,
+ .version = DW_PCIE_VER_490A,
};
static const struct of_device_id ks_pcie_of_match[] = {
@@ -1114,12 +1114,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct device_link **link;
struct gpio_desc *gpiod;
struct resource *res;
- unsigned int version;
void __iomem *base;
u32 num_viewport;
struct phy **phy;
u32 num_lanes;
char name[10];
+ u32 version;
int ret;
int irq;
int i;
@@ -1233,7 +1233,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- if (pci->version >= 0x480A)
+ if (dw_pcie_ver_is_ge(pci, 480A))
ret = ks_pcie_am654_set_mode(dev, mode);
else
ret = ks_pcie_set_mode(dev);
@@ -1324,7 +1324,7 @@ static struct platform_driver ks_pcie_driver __refdata = {
.remove = __exit_p(ks_pcie_remove),
.driver = {
.name = "keystone-pcie",
- .of_match_table = of_match_ptr(ks_pcie_of_match),
+ .of_match_table = ks_pcie_of_match,
},
};
builtin_platform_driver(ks_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 39f4664bd84c..ad99707b3b99 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -32,15 +32,6 @@ struct ls_pcie_ep {
const struct ls_pcie_ep_drvdata *drvdata;
};
-static int ls_pcie_establish_link(struct dw_pcie *pci)
-{
- return 0;
-}
-
-static const struct dw_pcie_ops dw_ls_pcie_ep_ops = {
- .start_link = ls_pcie_establish_link,
-};
-
static const struct pci_epc_features*
ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
@@ -106,19 +97,16 @@ static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
.func_offset = 0x20000,
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
.func_offset = 0x8000,
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct of_device_id ls_pcie_ep_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 6a4f0619bb1c..879b8692f96a 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -74,7 +74,7 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
}
-static int ls_pcie_host_init(struct pcie_port *pp)
+static int ls_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index f44bf347904a..c1527693bed9 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -370,7 +370,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int meson_pcie_host_init(struct pcie_port *pp)
+static int meson_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct meson_pcie *mp = to_meson_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index e8afa50129a8..b8cb77c9c4bd 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -217,7 +217,7 @@ static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
unsigned int busnr = bus->number;
struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
@@ -245,7 +245,7 @@ static struct pci_ops al_child_pci_ops = {
static void al_pcie_config_prepare(struct al_pcie *pcie)
{
struct al_pcie_target_bus_cfg *target_bus_cfg;
- struct pcie_port *pp = &pcie->pci->pp;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
unsigned int ecam_bus_mask;
u32 cfg_control_offset;
u8 subordinate_bus;
@@ -289,7 +289,7 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
}
-static int al_pcie_host_init(struct pcie_port *pp)
+static int al_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct al_pcie *pcie = to_al_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 4e2552dcf982..dc469ef8e99b 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -166,7 +166,7 @@ static int armada8k_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int armada8k_pcie_host_init(struct pcie_port *pp)
+static int armada8k_pcie_host_init(struct dw_pcie_rp *pp)
{
u32 reg;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -233,7 +233,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -343,7 +343,7 @@ static struct platform_driver armada8k_pcie_driver = {
.probe = armada8k_pcie_probe,
.driver = {
.name = "armada8k-pcie",
- .of_match_table = of_match_ptr(armada8k_pcie_of_match),
+ .of_match_table = armada8k_pcie_of_match,
.suppress_bind_attrs = true,
},
};
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 2f15441770e1..98102079e26d 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -97,7 +97,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct dw_pcie_ep *ep = &pci->ep;
switch (artpec6_pcie->mode) {
@@ -315,7 +315,7 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
usleep_range(100, 200);
}
-static int artpec6_pcie_host_init(struct pcie_port *pp)
+static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 0eda8236c125..83ddb190292e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -154,22 +154,25 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
return 0;
}
-static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_barno bar, dma_addr_t cpu_addr,
- enum dw_pcie_as_type as_type)
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ dma_addr_t cpu_addr, enum pci_barno bar)
{
int ret;
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ if (!ep->bar_to_atu[bar])
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ else
+ free_win = ep->bar_to_atu[bar];
+
if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
}
- ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr,
- as_type);
+ ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type,
+ cpu_addr, bar);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
@@ -185,8 +188,9 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
phys_addr_t phys_addr,
u64 pci_addr, size_t size)
{
- u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 free_win;
+ int ret;
free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
if (free_win >= pci->num_ob_windows) {
@@ -194,8 +198,10 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return -EINVAL;
}
- dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
+ if (ret)
+ return ret;
set_bit(free_win, ep->ob_window_map);
ep->outbound_addr[free_win] = phys_addr;
@@ -213,38 +219,40 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
clear_bit(atu_index, ep->ib_window_map);
ep->epf_bar[bar] = NULL;
+ ep->bar_to_atu[bar] = 0;
}
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
int flags = epf_bar->flags;
- enum dw_pcie_as_type as_type;
- u32 reg;
unsigned int func_offset = 0;
+ int ret, type;
+ u32 reg;
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
if (!(flags & PCI_BASE_ADDRESS_SPACE))
- as_type = DW_PCIE_AS_MEM;
+ type = PCIE_ATU_TYPE_MEM;
else
- as_type = DW_PCIE_AS_IO;
+ type = PCIE_ATU_TYPE_IO;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, bar,
- epf_bar->phys_addr, as_type);
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
if (ret)
return ret;
+ if (ep->epf_bar[bar])
+ return 0;
+
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
@@ -289,7 +297,7 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
if (ret < 0)
return;
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
clear_bit(atu_index, ep->ob_window_map);
}
@@ -435,8 +443,7 @@ static void dw_pcie_ep_stop(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (pci->ops && pci->ops->stop_link)
- pci->ops->stop_link(pci);
+ dw_pcie_stop_link(pci);
}
static int dw_pcie_ep_start(struct pci_epc *epc)
@@ -444,10 +451,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (!pci->ops || !pci->ops->start_link)
- return -EINVAL;
-
- return pci->ops->start_link(pci);
+ return dw_pcie_start_link(pci);
}
static const struct pci_epc_features*
@@ -699,17 +703,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (!pci->dbi_base2) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- if (!res)
+ if (!res) {
pci->dbi_base2 = pci->dbi_base + SZ_4K;
- else {
+ } else {
pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base2))
return PTR_ERR(pci->dbi_base2);
}
}
- dw_pcie_iatu_detect(pci);
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
return -EINVAL;
@@ -717,17 +719,17 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- ep->ib_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(pci->num_ib_windows),
- sizeof(long),
- GFP_KERNEL);
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
if (!ep->ib_window_map)
return -ENOMEM;
- ep->ob_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(pci->num_ob_windows),
- sizeof(long),
- GFP_KERNEL);
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
if (!ep->ob_window_map)
return -ENOMEM;
@@ -780,8 +782,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
epc->mem->window.page_size);
if (!ep->msi_mem) {
+ ret = -ENOMEM;
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
- return -ENOMEM;
+ goto err_exit_epc_mem;
}
if (ep->ops->get_features) {
@@ -790,6 +793,19 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return 0;
}
- return dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret)
+ goto err_free_epc_mem;
+
+ return 0;
+
+err_free_epc_mem:
+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ epc->mem->window.page_size);
+
+err_exit_epc_mem:
+ pci_epc_mem_exit(epc);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 9979302532b7..7746f94a715f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -53,7 +53,7 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
};
/* MSI int handler */
-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
int i, pos;
unsigned long val;
@@ -88,7 +88,7 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
static void dw_chained_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
chained_irq_enter(chip, desc);
@@ -100,7 +100,7 @@ static void dw_chained_msi_isr(struct irq_desc *desc)
static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
@@ -123,7 +123,7 @@ static int dw_pci_msi_set_affinity(struct irq_data *d,
static void dw_pci_bottom_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -142,7 +142,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)
static void dw_pci_bottom_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -161,7 +161,7 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
static void dw_pci_bottom_ack(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
@@ -185,7 +185,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *args)
{
- struct pcie_port *pp = domain->host_data;
+ struct dw_pcie_rp *pp = domain->host_data;
unsigned long flags;
u32 i;
int bit;
@@ -213,7 +213,7 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct pcie_port *pp = domain->host_data;
+ struct dw_pcie_rp *pp = domain->host_data;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
@@ -229,7 +229,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
.free = dw_pcie_irq_domain_free,
};
-int dw_pcie_allocate_domains(struct pcie_port *pp)
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
@@ -255,10 +255,15 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
return 0;
}
-static void dw_pcie_free_msi(struct pcie_port *pp)
+static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
{
- if (pp->msi_irq)
- irq_set_chained_handler_and_data(pp->msi_irq, NULL, NULL);
+ u32 ctrl;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ NULL, NULL);
+ }
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
@@ -267,12 +272,13 @@ static void dw_pcie_free_msi(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (pp->msi_page)
+ __free_page(pp->msi_page);
}
}
-static void dw_pcie_msi_init(struct pcie_port *pp)
+static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target = (u64)pp->msi_data;
@@ -285,7 +291,112 @@ static void dw_pcie_msi_init(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
-int dw_pcie_host_init(struct pcie_port *pp)
+static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 ctrl, max_vectors;
+ int irq;
+
+ /* Parse any "msiX" IRQs described in the devicetree */
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ char msi_name[] = "msiX";
+
+ msi_name[3] = '0' + ctrl;
+ irq = platform_get_irq_byname_optional(pdev, msi_name);
+ if (irq == -ENXIO)
+ break;
+ if (irq < 0)
+ return dev_err_probe(dev, irq,
+ "Failed to parse MSI IRQ '%s'\n",
+ msi_name);
+
+ pp->msi_irq[ctrl] = irq;
+ }
+
+ /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
+ if (ctrl == 0)
+ return -ENXIO;
+
+ max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
+ if (pp->num_vectors > max_vectors) {
+ dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
+ max_vectors);
+ pp->num_vectors = max_vectors;
+ }
+ if (!pp->num_vectors)
+ pp->num_vectors = max_vectors;
+
+ return 0;
+}
+
+static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+ u32 ctrl, num_ctrls;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
+ pp->irq_mask[ctrl] = ~0;
+
+ if (!pp->msi_irq[0]) {
+ ret = dw_pcie_parse_split_msi_irq(pp);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ }
+
+ if (!pp->num_vectors)
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ if (!pp->msi_irq[0]) {
+ pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
+ if (pp->msi_irq[0] < 0) {
+ pp->msi_irq[0] = platform_get_irq(pdev, 0);
+ if (pp->msi_irq[0] < 0)
+ return pp->msi_irq[0];
+ }
+ }
+
+ dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
+
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
+ ret = dw_pcie_allocate_domains(pp);
+ if (ret)
+ return ret;
+
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ dw_chained_msi_isr, pp);
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+
+ pp->msi_page = alloc_page(GFP_DMA32);
+ pp->msi_data = dma_map_page(dev, pp->msi_page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dev, pp->msi_data);
+ if (ret) {
+ dev_err(pci->dev, "Failed to map MSI data\n");
+ __free_page(pp->msi_page);
+ pp->msi_page = NULL;
+ pp->msi_data = 0;
+ dw_pcie_free_msi(pp);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
@@ -293,17 +404,17 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
struct pci_host_bridge *bridge;
- struct resource *cfg_res;
+ struct resource *res;
int ret;
- raw_spin_lock_init(&pci->pp.lock);
+ raw_spin_lock_init(&pp->lock);
- cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res);
- pp->cfg0_base = cfg_res->start;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (res) {
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, cfg_res);
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pp->va_cfg0_base))
return PTR_ERR(pp->va_cfg0_base);
} else {
@@ -312,8 +423,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
if (!pci->dbi_base) {
- struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
}
@@ -350,67 +461,39 @@ int dw_pcie_host_init(struct pcie_port *pp)
of_property_read_bool(np, "msi-parent") ||
of_property_read_bool(np, "msi-map"));
- if (!pp->num_vectors) {
+ /*
+ * For the has_msi_ctrl case the default assignment is handled
+ * in the dw_pcie_msi_host_init().
+ */
+ if (!pp->has_msi_ctrl && !pp->num_vectors) {
pp->num_vectors = MSI_DEF_NUM_VECTORS;
} else if (pp->num_vectors > MAX_MSI_IRQS) {
dev_err(dev, "Invalid number of vectors\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_deinit_host;
}
if (pp->ops->msi_host_init) {
ret = pp->ops->msi_host_init(pp);
if (ret < 0)
- return ret;
+ goto err_deinit_host;
} else if (pp->has_msi_ctrl) {
- u32 ctrl, num_ctrls;
-
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
- for (ctrl = 0; ctrl < num_ctrls; ctrl++)
- pp->irq_mask[ctrl] = ~0;
-
- if (!pp->msi_irq) {
- pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
- if (pp->msi_irq < 0) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
- }
-
- pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
-
- ret = dw_pcie_allocate_domains(pp);
- if (ret)
- return ret;
-
- if (pp->msi_irq > 0)
- irq_set_chained_handler_and_data(pp->msi_irq,
- dw_chained_msi_isr,
- pp);
-
- ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
- if (ret)
- dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
-
- pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
- sizeof(pp->msi_msg),
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- ret = dma_mapping_error(pci->dev, pp->msi_data);
- if (ret) {
- dev_err(pci->dev, "Failed to map MSI data\n");
- pp->msi_data = 0;
- goto err_free_msi;
- }
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret < 0)
+ goto err_deinit_host;
}
}
+ dw_pcie_version_detect(pci);
+
dw_pcie_iatu_detect(pci);
- dw_pcie_setup_rc(pp);
+ ret = dw_pcie_setup_rc(pp);
+ if (ret)
+ goto err_free_msi;
- if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) {
- ret = pci->ops->start_link(pci);
+ if (!dw_pcie_link_up(pci)) {
+ ret = dw_pcie_start_link(pci);
if (ret)
goto err_free_msi;
}
@@ -421,32 +504,50 @@ int dw_pcie_host_init(struct pcie_port *pp)
bridge->sysdata = pp;
ret = pci_host_probe(bridge);
- if (!ret)
- return 0;
+ if (ret)
+ goto err_stop_link;
+
+ return 0;
+
+err_stop_link:
+ dw_pcie_stop_link(pci);
err_free_msi:
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
+
+err_deinit_host:
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
+
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_host_init);
-void dw_pcie_host_deinit(struct pcie_port *pp)
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
+
+ dw_pcie_stop_link(pci);
+
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
+
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- int type;
- u32 busdev;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int type, ret;
+ u32 busdev;
/*
* Checking whether the link is up here is a last line of defense
@@ -467,8 +568,10 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
else
type = PCIE_ATU_TYPE_CFG1;
-
- dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
+ ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
+ pp->cfg0_size);
+ if (ret)
+ return NULL;
return pp->va_cfg0_base + where;
}
@@ -476,33 +579,45 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- int ret;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
ret = pci_generic_config_read(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
- if (!ret && pci->io_cfg_atu_shared)
- dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
- return ret;
+ return PCIBIOS_SUCCESSFUL;
}
static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- int ret;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
ret = pci_generic_config_write(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
- if (!ret && pci->io_cfg_atu_shared)
- dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
- return ret;
+ return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops dw_child_pcie_ops = {
@@ -513,7 +628,7 @@ static struct pci_ops dw_child_pcie_ops = {
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
if (PCI_SLOT(devfn) > 0)
@@ -529,11 +644,72 @@ static struct pci_ops dw_pcie_ops = {
.write = pci_generic_config_write,
};
-void dw_pcie_setup_rc(struct pcie_port *pp)
+static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
- int i;
- u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct resource_entry *entry;
+ int i, ret;
+
+ /* Note the very first outbound ATU is used for CFG IOs */
+ if (!pci->num_ob_windows) {
+ dev_err(pci->dev, "No outbound iATU found\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure all outbound windows are disabled before proceeding with
+ * the MEM/IO ranges setups.
+ */
+ for (i = 0; i < pci->num_ob_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
+
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ob_windows <= ++i)
+ break;
+
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set MEM range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pp->io_size) {
+ if (pci->num_ob_windows > ++i) {
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
+ pp->io_base,
+ pp->io_bus_addr,
+ pp->io_size);
+ if (ret) {
+ dev_err(pci->dev, "Failed to set IO range %pr\n",
+ entry->res);
+ return ret;
+ }
+ } else {
+ pp->cfg0_io_shared = true;
+ }
+ }
+
+ if (pci->num_ob_windows <= i)
+ dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
+ pci->num_ob_windows);
+
+ return 0;
+}
+
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u32 val, ctrl, num_ctrls;
+ int ret;
/*
* Enable DBI read-only registers for writing/updating configuration.
@@ -582,45 +758,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
- /* Ensure all outbound windows are disabled so there are multiple matches */
- for (i = 0; i < pci->num_ob_windows; i++)
- dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND);
-
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
* ATU, so we should not program the ATU here.
*/
if (pp->bridge->child_ops == &dw_child_pcie_ops) {
- int atu_idx = 0;
- struct resource_entry *entry;
-
- /* Get last memory resource entry */
- resource_list_for_each_entry(entry, &pp->bridge->windows) {
- if (resource_type(entry->res) != IORESOURCE_MEM)
- continue;
-
- if (pci->num_ob_windows <= ++atu_idx)
- break;
-
- dw_pcie_prog_outbound_atu(pci, atu_idx,
- PCIE_ATU_TYPE_MEM, entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
- }
-
- if (pp->io_size) {
- if (pci->num_ob_windows > ++atu_idx)
- dw_pcie_prog_outbound_atu(pci, atu_idx,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
- else
- pci->io_cfg_atu_shared = true;
- }
-
- if (pci->num_ob_windows <= atu_idx)
- dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
- pci->num_ob_windows);
+ ret = dw_pcie_iatu_setup(pp);
+ if (ret)
+ return ret;
}
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
@@ -633,5 +779,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 0c5de87d3cc6..1fcfb840f238 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -17,13 +17,11 @@
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/types.h>
-#include <linux/regmap.h>
#include "pcie-designware.h"
struct dw_plat_pcie {
struct dw_pcie *pci;
- struct regmap *regmap;
enum dw_pcie_device_mode mode;
};
@@ -31,20 +29,9 @@ struct dw_plat_pcie_of_data {
enum dw_pcie_device_mode mode;
};
-static const struct of_device_id dw_plat_pcie_of_match[];
-
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
};
-static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
-{
- return 0;
-}
-
-static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = dw_plat_pcie_establish_link,
-};
-
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -96,7 +83,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = dw_plat_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -140,7 +127,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
- pci->ops = &dw_pcie_ops;
dw_plat_pcie->pci = pci;
dw_plat_pcie->mode = mode;
@@ -153,20 +139,21 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
return -ENODEV;
ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
- if (ret < 0)
- return ret;
break;
case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
return -ENODEV;
pci->ep.ops = &pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+ ret = -EINVAL;
+ break;
}
- return 0;
+ return ret;
}
static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index d92c8a25094f..c6725c519a47 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -8,14 +8,41 @@
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/align.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/sizes.h>
#include <linux/types.h>
#include "../../pci.h"
#include "pcie-designware.h"
+void dw_pcie_version_detect(struct dw_pcie *pci)
+{
+ u32 ver;
+
+ /* The content of the CSR is zero on DWC PCIe older than v4.70a */
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
+ if (!ver)
+ return;
+
+ if (pci->version && pci->version != ver)
+ dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
+ pci->version, ver);
+ else
+ pci->version = ver;
+
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
+
+ if (pci->type && pci->type != ver)
+ dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
+ pci->type, ver);
+ else
+ pci->type = ver;
+}
+
/*
* These interfaces resemble the pci_find_*capability() interfaces, but these
* are for configuring host controllers, which are bridges *to* PCI devices but
@@ -181,48 +208,61 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
dev_err(pci->dev, "write DBI address failed\n");
}
-static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
+static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
+ u32 index)
{
+ if (pci->iatu_unroll_enabled)
+ return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
+ return pci->atu_base;
+}
+
+static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
+{
+ void __iomem *base;
int ret;
u32 val;
+ base = dw_pcie_select_atu(pci, dir, index);
+
if (pci->ops && pci->ops->read_dbi)
- return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
+ return pci->ops->read_dbi(pci, base, reg, 4);
- ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
+ ret = dw_pcie_read(base + reg, 4, &val);
if (ret)
dev_err(pci->dev, "Read ATU address failed\n");
return val;
}
-static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
+static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
+ u32 reg, u32 val)
{
+ void __iomem *base;
int ret;
+ base = dw_pcie_select_atu(pci, dir, index);
+
if (pci->ops && pci->ops->write_dbi) {
- pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
+ pci->ops->write_dbi(pci, base, reg, 4, val);
return;
}
- ret = dw_pcie_write(pci->atu_base + reg, 4, val);
+ ret = dw_pcie_write(base + reg, 4, val);
if (ret)
dev_err(pci->dev, "Write ATU address failed\n");
}
-static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- return dw_pcie_readl_atu(pci, offset + reg);
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
}
-static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- dw_pcie_writel_atu(pci, offset + reg, val);
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
}
static inline u32 dw_pcie_enable_ecrc(u32 val)
@@ -266,264 +306,160 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
return val | PCIE_ATU_TD;
}
-static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
- int index, int type,
- u64 cpu_addr, u64 pci_addr,
- u64 size)
+static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
{
u32 retries, val;
- u64 limit_addr = cpu_addr + size - 1;
-
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
- lower_32_bits(limit_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
- upper_32_bits(limit_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(pci_addr));
- val = type | PCIE_ATU_FUNC_NUM(func_no);
- val = upper_32_bits(size - 1) ?
- val | PCIE_ATU_INCREASE_REGION_SIZE : val;
- if (pci->version == 0x490A)
- val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_ENABLE);
+ u64 limit_addr;
- /*
- * Make sure ATU enable takes effect before any subsequent config
- * and I/O accesses.
- */
- for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_ob_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- if (val & PCIE_ATU_ENABLE)
- return;
+ if (pci->ops && pci->ops->cpu_addr_fixup)
+ cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
- mdelay(LINK_WAIT_IATU);
+ limit_addr = cpu_addr + size - 1;
+
+ if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ return -EINVAL;
}
- dev_err(pci->dev, "Outbound iATU is not being enabled\n");
-}
-static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
-{
- u32 retries, val;
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(cpu_addr));
- if (pci->ops && pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
- if (pci->iatu_unroll_enabled) {
- dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
- return;
- }
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
- PCIE_ATU_REGION_OUTBOUND | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
- if (pci->version >= 0x460A)
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_LIMIT,
- upper_32_bits(cpu_addr + size - 1));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
val = type | PCIE_ATU_FUNC_NUM(func_no);
- val = ((upper_32_bits(size - 1)) && (pci->version >= 0x460A)) ?
- val | PCIE_ATU_INCREASE_REGION_SIZE : val;
- if (pci->version == 0x490A)
+ if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, val);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
- return;
+ return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
-}
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
-{
- __dw_pcie_prog_outbound_atu(pci, 0, index, type,
- cpu_addr, pci_addr, size);
+ return -ETIMEDOUT;
}
-void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size)
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
{
- __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
+ return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
+ cpu_addr, pci_addr, size);
}
-static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u64 size)
{
- u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
-
- return dw_pcie_readl_atu(pci, offset + reg);
+ return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
}
-static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
- u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
-
- dw_pcie_writel_atu(pci, offset + reg, val);
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
}
-static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
- int index, int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type)
+static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
- int type;
- u32 retries, val;
-
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(cpu_addr));
-
- switch (as_type) {
- case DW_PCIE_AS_MEM:
- type = PCIE_ATU_TYPE_MEM;
- break;
- case DW_PCIE_AS_IO:
- type = PCIE_ATU_TYPE_IO;
- break;
- default:
- return -EINVAL;
- }
-
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
- PCIE_ATU_FUNC_NUM(func_no));
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_FUNC_NUM_MATCH_EN |
- PCIE_ATU_ENABLE |
- PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
-
- /*
- * Make sure ATU enable takes effect before any subsequent config
- * and I/O accesses.
- */
- for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_ib_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- if (val & PCIE_ATU_ENABLE)
- return 0;
-
- mdelay(LINK_WAIT_IATU);
- }
- dev_err(pci->dev, "Inbound iATU is not being enabled\n");
-
- return -EBUSY;
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type)
+ int type, u64 cpu_addr, u8 bar)
{
- int type;
u32 retries, val;
- if (pci->iatu_unroll_enabled)
- return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
- cpu_addr, as_type);
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
- index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
-
- switch (as_type) {
- case DW_PCIE_AS_MEM:
- type = PCIE_ATU_TYPE_MEM;
- break;
- case DW_PCIE_AS_IO:
- type = PCIE_ATU_TYPE_IO;
- break;
- default:
+ if (!IS_ALIGNED(cpu_addr, pci->region_align))
return -EINVAL;
- }
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
- PCIE_ATU_FUNC_NUM(func_no));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
- PCIE_ATU_FUNC_NUM_MATCH_EN |
- PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
+ PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
- return -EBUSY;
+ return -ETIMEDOUT;
}
-void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
- enum dw_pcie_region_type type)
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
{
- int region;
-
- switch (type) {
- case DW_PCIE_REGION_INBOUND:
- region = PCIE_ATU_REGION_INBOUND;
- break;
- case DW_PCIE_REGION_OUTBOUND:
- region = PCIE_ATU_REGION_OUTBOUND;
- break;
- default:
- return;
- }
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
+ dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
+ u32 offset, val;
int retries;
/* Check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "Link up\n");
- return 0;
- }
+ if (dw_pcie_link_up(pci))
+ break;
+
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_info(pci->dev, "Phy link never came up\n");
+ if (retries >= LINK_WAIT_MAX_RETRIES) {
+ dev_err(pci->dev, "Phy link never came up\n");
+ return -ETIMEDOUT;
+ }
- return -ETIMEDOUT;
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+
+ dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
+ FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
+
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
@@ -534,7 +470,7 @@ int dw_pcie_link_up(struct dw_pcie *pci)
if (pci->ops && pci->ops->link_up)
return pci->ops->link_up(pci);
- val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
}
@@ -586,95 +522,81 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
}
-static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
if (val == 0xffffffff)
- return 1;
+ return true;
- return 0;
+ return false;
}
-static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
+static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
{
- int max_region, i, ob = 0, ib = 0;
- u32 val;
+ int max_region, ob, ib;
+ u32 val, min, dir;
+ u64 max;
- max_region = min((int)pci->atu_size / 512, 256);
-
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
- 0x11110000);
+ if (pci->iatu_unroll_enabled) {
+ max_region = min((int)pci->atu_size / 512, 256);
+ } else {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
+ max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
+ }
- val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
- if (val == 0x11110000)
- ob++;
- else
+ for (ob = 0; ob < max_region; ob++) {
+ dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
break;
}
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
- 0x11110000);
-
- val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
- if (val == 0x11110000)
- ib++;
- else
+ for (ib = 0; ib < max_region; ib++) {
+ dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
break;
}
- pci->num_ib_windows = ib;
- pci->num_ob_windows = ob;
-}
-
-static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
-{
- int max_region, i, ob = 0, ib = 0;
- u32 val;
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
- max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
- if (val == 0x11110000)
- ob++;
- else
- break;
+ if (ob) {
+ dir = PCIE_ATU_REGION_DIR_OB;
+ } else if (ib) {
+ dir = PCIE_ATU_REGION_DIR_IB;
+ } else {
+ dev_err(pci->dev, "No iATU regions found\n");
+ return;
}
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
- if (val == 0x11110000)
- ib++;
- else
- break;
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
+ min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
+
+ if (dw_pcie_ver_is_ge(pci, 460A)) {
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
+ max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
+ } else {
+ max = 0;
}
- pci->num_ib_windows = ib;
pci->num_ob_windows = ob;
+ pci->num_ib_windows = ib;
+ pci->region_align = 1 << fls(min);
+ pci->region_limit = (max << 32) | (SZ_4G - 1);
}
void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(pci->dev);
- if (pci->version >= 0x480A || (!pci->version &&
- dw_pcie_iatu_unroll_enabled(pci))) {
- pci->iatu_unroll_enabled = true;
+ pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
+ if (pci->iatu_unroll_enabled) {
if (!pci->atu_base) {
struct resource *res =
platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
if (res) {
pci->atu_size = resource_size(res);
- pci->atu_base = devm_ioremap_resource(dev, res);
+ pci->atu_base = devm_ioremap_resource(pci->dev, res);
}
if (!pci->atu_base || IS_ERR(pci->atu_base))
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
@@ -683,23 +605,25 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)
if (!pci->atu_size)
/* Pick a minimal default, enough for 8 in and 8 out windows */
pci->atu_size = SZ_4K;
+ } else {
+ pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
+ pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+ }
- dw_pcie_iatu_detect_regions_unroll(pci);
- } else
- dw_pcie_iatu_detect_regions(pci);
+ dw_pcie_iatu_detect_regions(pci);
dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
"enabled" : "disabled");
- dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
- pci->num_ob_windows, pci->num_ib_windows);
+ dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n",
+ pci->num_ob_windows, pci->num_ib_windows,
+ pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
}
void dw_pcie_setup(struct dw_pcie *pci)
{
+ struct device_node *np = pci->dev->of_node;
u32 val;
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
if (pci->link_gen > 0)
dw_pcie_link_set_max_speed(pci, pci->link_gen);
@@ -726,6 +650,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+ if (of_property_read_bool(np, "snps,enable-cdm-check")) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+ PCIE_PL_CHK_REG_CHK_REG_START;
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ }
+
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
if (!pci->num_lanes) {
dev_dbg(pci->dev, "Using h/w default number of lanes\n");
@@ -772,11 +703,4 @@ void dw_pcie_setup(struct dw_pcie *pci)
break;
}
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
-
- if (of_property_read_bool(np, "snps,enable-cdm-check")) {
- val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
- val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
- PCIE_PL_CHK_REG_CHK_REG_START;
- dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
- }
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 7d6e9b7576be..09b887093a84 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -20,6 +20,29 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+/* DWC PCIe IP-core versions (native support since v4.70a) */
+#define DW_PCIE_VER_365A 0x3336352a
+#define DW_PCIE_VER_460A 0x3436302a
+#define DW_PCIE_VER_470A 0x3437302a
+#define DW_PCIE_VER_480A 0x3438302a
+#define DW_PCIE_VER_490A 0x3439302a
+#define DW_PCIE_VER_520A 0x3532302a
+
+#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
+ ((_pci)->version _op DW_PCIE_VER_ ## _ver)
+
+#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==)
+
+#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=)
+
+#define dw_pcie_ver_type_is(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==))
+
+#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
+
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
@@ -74,13 +97,34 @@
#define PCIE_MSI_INTR0_MASK 0x82C
#define PCIE_MSI_INTR0_STATUS 0x830
+#define GEN3_RELATED_OFF 0x890
+#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
+#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13)
+#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
+#define PCIE_VERSION_NUMBER 0x8F8
+#define PCIE_VERSION_TYPE 0x8FC
+
+/*
+ * iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each
+ * iATU region CSRs had been indirectly accessible by means of the dedicated
+ * viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe
+ * v4.80a in a way so the viewport was unrolled into the directly accessible
+ * iATU/eDMA CSRs space.
+ */
#define PCIE_ATU_VIEWPORT 0x900
-#define PCIE_ATU_REGION_INBOUND BIT(31)
-#define PCIE_ATU_REGION_OUTBOUND 0
-#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_REGION_DIR_IB BIT(31)
+#define PCIE_ATU_REGION_DIR_OB 0
+#define PCIE_ATU_VIEWPORT_BASE 0x904
+#define PCIE_ATU_UNROLL_BASE(dir, index) \
+ (((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0))
+#define PCIE_ATU_VIEWPORT_SIZE 0x2C
+#define PCIE_ATU_REGION_CTRL1 0x000
#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
#define PCIE_ATU_TYPE_MEM 0x0
#define PCIE_ATU_TYPE_IO 0x2
@@ -88,19 +132,19 @@
#define PCIE_ATU_TYPE_CFG1 0x5
#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
-#define PCIE_ATU_CR2 0x908
+#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
-#define PCIE_ATU_LOWER_BASE 0x90C
-#define PCIE_ATU_UPPER_BASE 0x910
-#define PCIE_ATU_LIMIT 0x914
-#define PCIE_ATU_LOWER_TARGET 0x918
+#define PCIE_ATU_LOWER_BASE 0x008
+#define PCIE_ATU_UPPER_BASE 0x00C
+#define PCIE_ATU_LIMIT 0x010
+#define PCIE_ATU_LOWER_TARGET 0x014
#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x)
#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
-#define PCIE_ATU_UPPER_TARGET 0x91C
-#define PCIE_ATU_UPPER_LIMIT 0x924
+#define PCIE_ATU_UPPER_TARGET 0x018
+#define PCIE_ATU_UPPER_LIMIT 0x020
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
#define PCIE_DBI_RO_WR_EN BIT(0)
@@ -131,6 +175,25 @@
#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
/*
+ * RAS-DES register definitions
+ */
+#define PCIE_RAS_DES_EVENT_COUNTER_CONTROL 0x8
+#define EVENT_COUNTER_ALL_CLEAR 0x3
+#define EVENT_COUNTER_ENABLE_ALL 0x7
+#define EVENT_COUNTER_ENABLE_SHIFT 2
+#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
+#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
+#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
+#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
+#define EVENT_COUNTER_EVENT_L1 0x5
+#define EVENT_COUNTER_EVENT_L1_1 0x7
+#define EVENT_COUNTER_EVENT_L1_2 0x8
+#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
+#define EVENT_COUNTER_GROUP_5 0x5
+
+#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+
+/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
* default; the driver core automatically derives atu_base from dbi_base using
@@ -138,13 +201,6 @@
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
-/* Register address builder */
-#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
- ((region) << 9)
-
-#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
- (((region) << 9) | BIT(8))
-
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
@@ -155,16 +211,10 @@
#define MAX_IATU_IN 256
#define MAX_IATU_OUT 256
-struct pcie_port;
struct dw_pcie;
+struct dw_pcie_rp;
struct dw_pcie_ep;
-enum dw_pcie_region_type {
- DW_PCIE_REGION_UNKNOWN,
- DW_PCIE_REGION_INBOUND,
- DW_PCIE_REGION_OUTBOUND,
-};
-
enum dw_pcie_device_mode {
DW_PCIE_UNKNOWN_TYPE,
DW_PCIE_EP_TYPE,
@@ -173,12 +223,14 @@ enum dw_pcie_device_mode {
};
struct dw_pcie_host_ops {
- int (*host_init)(struct pcie_port *pp);
- int (*msi_host_init)(struct pcie_port *pp);
+ int (*host_init)(struct dw_pcie_rp *pp);
+ void (*host_deinit)(struct dw_pcie_rp *pp);
+ int (*msi_host_init)(struct dw_pcie_rp *pp);
};
-struct pcie_port {
+struct dw_pcie_rp {
bool has_msi_ctrl:1;
+ bool cfg0_io_shared:1;
u64 cfg0_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
@@ -187,11 +239,11 @@ struct pcie_port {
u32 io_size;
int irq;
const struct dw_pcie_host_ops *ops;
- int msi_irq;
+ int msi_irq[MAX_MSI_CTRLS];
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
- u16 msi_msg;
dma_addr_t msi_data;
+ struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
@@ -200,12 +252,6 @@ struct pcie_port {
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
-enum dw_pcie_as_type {
- DW_PCIE_AS_UNKNOWN,
- DW_PCIE_AS_MEM,
- DW_PCIE_AS_IO,
-};
-
struct dw_pcie_ep_ops {
void (*ep_init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
@@ -261,20 +307,21 @@ struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
void __iomem *dbi_base2;
- /* Used when iatu_unroll_enabled is true */
void __iomem *atu_base;
size_t atu_size;
u32 num_ib_windows;
u32 num_ob_windows;
- struct pcie_port pp;
+ u32 region_align;
+ u64 region_limit;
+ struct dw_pcie_rp pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
- unsigned int version;
+ u32 version;
+ u32 type;
int num_lanes;
int link_gen;
u8 n_fts[2];
bool iatu_unroll_enabled: 1;
- bool io_cfg_atu_shared: 1;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -282,6 +329,8 @@ struct dw_pcie {
#define to_dw_pcie_from_ep(endpoint) \
container_of((endpoint), struct dw_pcie, ep)
+void dw_pcie_version_detect(struct dw_pcie *pci);
+
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
@@ -294,17 +343,13 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size);
-void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type);
-void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
- enum dw_pcie_region_type type);
+ int type, u64 cpu_addr, u8 bar);
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
@@ -365,34 +410,49 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, reg, val);
}
+static inline int dw_pcie_start_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->start_link)
+ return pci->ops->start_link(pci);
+
+ return 0;
+}
+
+static inline void dw_pcie_stop_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->stop_link)
+ pci->ops->stop_link(pci);
+}
+
#ifdef CONFIG_PCIE_DW_HOST
-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
-void dw_pcie_setup_rc(struct pcie_port *pp);
-int dw_pcie_host_init(struct pcie_port *pp);
-void dw_pcie_host_deinit(struct pcie_port *pp);
-int dw_pcie_allocate_domains(struct pcie_port *pp);
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
+int dw_pcie_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
-static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
}
-static inline void dw_pcie_setup_rc(struct pcie_port *pp)
+static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
+ return 0;
}
-static inline int dw_pcie_host_init(struct pcie_port *pp)
+static inline int dw_pcie_host_init(struct dw_pcie_rp *pp)
{
return 0;
}
-static inline void dw_pcie_host_deinit(struct pcie_port *pp)
+static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
}
-static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
+static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 8c5bb9d7cc36..c1e7653e508e 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -186,7 +186,7 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int rockchip_pcie_host_init(struct pcie_port *pp)
+static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
@@ -288,7 +288,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_pcie *rockchip;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
int ret;
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
index 02cc70d8cc06..0c90583c078b 100644
--- a/drivers/pci/controller/dwc/pcie-fu740.c
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -16,11 +16,9 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
#include <linux/resource.h>
#include <linux/types.h>
#include <linux/interrupt.h>
@@ -236,7 +234,7 @@ err:
return ret;
}
-static int fu740_pcie_host_init(struct pcie_port *pp)
+static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct fu740_pcie *afp = to_fu740_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 410555dccb6d..e2b80f10030d 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -74,7 +74,7 @@ static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
writel(val, histb_pcie->ctrl + reg);
}
-static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
+static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -88,7 +88,7 @@ static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
}
-static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
+static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -180,7 +180,7 @@ static int histb_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int histb_pcie_host_init(struct pcie_port *pp)
+static int histb_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -219,7 +219,7 @@ static void histb_pcie_host_disable(struct histb_pcie *hipcie)
regulator_disable(hipcie->vpcie);
}
-static int histb_pcie_host_enable(struct pcie_port *pp)
+static int histb_pcie_host_enable(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -297,7 +297,7 @@ static int histb_pcie_probe(struct platform_device *pdev)
{
struct histb_pcie *hipcie;
struct dw_pcie *pci;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
enum of_gpio_flags of_flags;
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index 5ba144924ff8..333c33d98a70 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -58,10 +58,6 @@
#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
-struct intel_pcie_soc {
- unsigned int pcie_ver;
-};
-
struct intel_pcie {
struct dw_pcie pci;
void __iomem *app_base;
@@ -306,7 +302,11 @@ static int intel_pcie_host_setup(struct intel_pcie *pcie)
intel_pcie_ltssm_disable(pcie);
intel_pcie_link_setup(pcie);
intel_pcie_init_n_fts(pci);
- dw_pcie_setup_rc(&pci->pp);
+
+ ret = dw_pcie_setup_rc(&pci->pp);
+ if (ret)
+ goto app_init_err;
+
dw_pcie_upconfig_setup(pci);
intel_pcie_device_rst_deassert(pcie);
@@ -343,7 +343,7 @@ static void __intel_pcie_remove(struct intel_pcie *pcie)
static int intel_pcie_remove(struct platform_device *pdev)
{
struct intel_pcie *pcie = platform_get_drvdata(pdev);
- struct pcie_port *pp = &pcie->pci.pp;
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
dw_pcie_host_deinit(pp);
__intel_pcie_remove(pcie);
@@ -351,7 +351,7 @@ static int intel_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
+static int intel_pcie_suspend_noirq(struct device *dev)
{
struct intel_pcie *pcie = dev_get_drvdata(dev);
int ret;
@@ -366,14 +366,14 @@ static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
return ret;
}
-static int __maybe_unused intel_pcie_resume_noirq(struct device *dev)
+static int intel_pcie_resume_noirq(struct device *dev)
{
struct intel_pcie *pcie = dev_get_drvdata(dev);
return intel_pcie_host_setup(pcie);
}
-static int intel_pcie_rc_init(struct pcie_port *pp)
+static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct intel_pcie *pcie = dev_get_drvdata(pci->dev);
@@ -394,16 +394,11 @@ static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
.host_init = intel_pcie_rc_init,
};
-static const struct intel_pcie_soc pcie_data = {
- .pcie_ver = 0x520A,
-};
-
static int intel_pcie_probe(struct platform_device *pdev)
{
- const struct intel_pcie_soc *data;
struct device *dev = &pdev->dev;
struct intel_pcie *pcie;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
int ret;
@@ -424,12 +419,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- data = device_get_match_data(dev);
- if (!data)
- return -ENODEV;
-
pci->ops = &intel_pcie_ops;
- pci->version = data->pcie_ver;
pp->ops = &intel_pcie_dw_ops;
ret = dw_pcie_host_init(pp);
@@ -442,12 +432,12 @@ static int intel_pcie_probe(struct platform_device *pdev)
}
static const struct dev_pm_ops intel_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
- intel_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
+ intel_pcie_resume_noirq)
};
static const struct of_device_id of_intel_pcie_match[] = {
- { .compatible = "intel,lgm-pcie", .data = &pcie_data },
+ { .compatible = "intel,lgm-pcie" },
{}
};
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 1ac29a6eef22..f90f36bac018 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -231,7 +231,7 @@ static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 val, mask, status;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
/*
* Keem Bay PCIe Controller provides an additional IP logic on top of
@@ -332,13 +332,13 @@ static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
u32 val;
int ret;
pp->ops = &keembay_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = keembay_pcie_setup_msi_irq(pcie);
if (ret)
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index a52cad269f85..7f67aad71df4 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -620,7 +620,7 @@ static int kirin_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int kirin_pcie_host_init(struct pcie_port *pp)
+static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
{
pp->bridge->ops = &kirin_pci_ops;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 2ea13750b492..66886dc6e777 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -41,6 +41,9 @@
#define L23_CLK_RMV_DIS BIT(2)
#define L1_CLK_RMV_DIS BIT(1)
+#define PCIE20_PARF_PM_CTRL 0x20
+#define REQ_NOT_ENTR_L1 BIT(5)
+
#define PCIE20_PARF_PHY_CTRL 0x40
#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
@@ -52,6 +55,10 @@
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define AHB_CLK_EN BIT(0)
+#define MSTR_AXI_CLK_EN BIT(1)
+#define BYPASS BIT(4)
+
#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
#define PCIE20_PARF_LTSSM 0x1B0
@@ -69,7 +76,20 @@
#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
#define CFG_BRIDGE_SB_INIT BIT(0)
-#define PCIE_CAP_LINK1_VAL 0x2FD7F
+#define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \
+ 250)
+#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \
+ 1)
+#define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \
+ PCI_EXP_SLTCAP_PCP | \
+ PCI_EXP_SLTCAP_MRLSP | \
+ PCI_EXP_SLTCAP_AIP | \
+ PCI_EXP_SLTCAP_PIP | \
+ PCI_EXP_SLTCAP_HPS | \
+ PCI_EXP_SLTCAP_HPC | \
+ PCI_EXP_SLTCAP_EIP | \
+ PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
+ PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
#define PCIE20_PARF_Q2A_FLUSH 0x1AC
@@ -128,7 +148,6 @@ struct qcom_pcie_resources_2_3_2 {
struct clk *master_clk;
struct clk *slave_clk;
struct clk *cfg_clk;
- struct clk *pipe_clk;
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
@@ -165,10 +184,11 @@ struct qcom_pcie_resources_2_7_0 {
int num_clks;
struct regulator_bulk_data supplies[2];
struct reset_control *pci_reset;
- struct clk *pipe_clk;
- struct clk *pipe_clk_src;
- struct clk *phy_pipe_clk;
- struct clk *ref_clk_src;
+};
+
+struct qcom_pcie_resources_2_9_0 {
+ struct clk_bulk_data clks[5];
+ struct reset_control *rst;
};
union qcom_pcie_resources {
@@ -178,6 +198,7 @@ union qcom_pcie_resources {
struct qcom_pcie_resources_2_3_3 v2_3_3;
struct qcom_pcie_resources_2_4_0 v2_4_0;
struct qcom_pcie_resources_2_7_0 v2_7_0;
+ struct qcom_pcie_resources_2_9_0 v2_9_0;
};
struct qcom_pcie;
@@ -194,7 +215,6 @@ struct qcom_pcie_ops {
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
- unsigned int pipe_clk_need_muxing:1;
unsigned int has_tbu_clk:1;
unsigned int has_ddrss_sf_tbu_clk:1;
unsigned int has_aggre0_clk:1;
@@ -325,8 +345,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- struct device_node *node = dev->of_node;
- u32 val;
int ret;
/* reset the PCIe interface as uboot can leave it undefined state */
@@ -337,8 +355,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
reset_control_assert(res->ext_reset);
reset_control_assert(res->phy_reset);
- writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
-
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
if (ret < 0) {
dev_err(dev, "cannot enable regulators\n");
@@ -381,15 +397,42 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
goto err_deassert_axi;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
- if (ret)
- goto err_clks;
+ return 0;
+
+err_deassert_axi:
+ reset_control_assert(res->por_reset);
+err_deassert_por:
+ reset_control_assert(res->pci_reset);
+err_deassert_pci:
+ reset_control_assert(res->phy_reset);
+err_deassert_phy:
+ reset_control_assert(res->ext_reset);
+err_deassert_ext:
+ reset_control_assert(res->ahb_reset);
+err_deassert_ahb:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ u32 val;
+ int ret;
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret)
+ return ret;
+
if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
@@ -428,23 +471,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
return 0;
-
-err_clks:
- reset_control_assert(res->axi_reset);
-err_deassert_axi:
- reset_control_assert(res->por_reset);
-err_deassert_por:
- reset_control_assert(res->pci_reset);
-err_deassert_pci:
- reset_control_assert(res->phy_reset);
-err_deassert_phy:
- reset_control_assert(res->ext_reset);
-err_deassert_ext:
- reset_control_assert(res->ahb_reset);
-err_deassert_ahb:
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
-
- return ret;
}
static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
@@ -532,16 +558,6 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
goto err_slave;
}
- /* change DBI base address */
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
-
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
- }
-
return 0;
err_slave:
clk_disable_unprepare(res->slave_bus);
@@ -557,6 +573,21 @@ err_res:
return ret;
}
+static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
+{
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ return 0;
+}
+
static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
@@ -597,8 +628,7 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
if (IS_ERR(res->slave_clk))
return PTR_ERR(res->slave_clk);
- res->pipe_clk = devm_clk_get(dev, "pipe");
- return PTR_ERR_OR_ZERO(res->pipe_clk);
+ return 0;
}
static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
@@ -613,19 +643,11 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
-static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
-{
- struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
-
- clk_disable_unprepare(res->pipe_clk);
-}
-
static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u32 val;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
@@ -658,6 +680,25 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
goto err_slave_clk;
}
+ return 0;
+
+err_slave_clk:
+ clk_disable_unprepare(res->master_clk);
+err_master_clk:
+ clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+ clk_disable_unprepare(res->aux_clk);
+
+err_aux_clk:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+ u32 val;
+
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
@@ -680,34 +721,6 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
return 0;
-
-err_slave_clk:
- clk_disable_unprepare(res->master_clk);
-err_master_clk:
- clk_disable_unprepare(res->cfg_clk);
-err_cfg_clk:
- clk_disable_unprepare(res->aux_clk);
-
-err_aux_clk:
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
-
- return ret;
-}
-
-static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
-{
- struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
- int ret;
-
- ret = clk_prepare_enable(res->pipe_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable pipe clock\n");
- return ret;
- }
-
- return 0;
}
static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
@@ -814,7 +827,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u32 val;
int ret;
ret = reset_control_assert(res->axi_m_reset);
@@ -939,6 +951,33 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
if (ret)
goto err_clks;
+ return 0;
+
+err_clks:
+ reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+ reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+ reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+ reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+ reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+ reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+ reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+ reset_control_assert(res->phy_reset);
+err_rst_phy:
+ reset_control_assert(res->phy_ahb_reset);
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie)
+{
+ u32 val;
+
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
@@ -961,26 +1000,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
return 0;
-
-err_clks:
- reset_control_assert(res->ahb_reset);
-err_rst_ahb:
- reset_control_assert(res->pwr_reset);
-err_rst_pwr:
- reset_control_assert(res->axi_s_reset);
-err_rst_axi_s:
- reset_control_assert(res->axi_m_sticky_reset);
-err_rst_axi_m_sticky:
- reset_control_assert(res->axi_m_reset);
-err_rst_axi_m:
- reset_control_assert(res->pipe_sticky_reset);
-err_rst_pipe_sticky:
- reset_control_assert(res->pipe_reset);
-err_rst_pipe:
- reset_control_assert(res->phy_reset);
-err_rst_phy:
- reset_control_assert(res->phy_ahb_reset);
- return ret;
}
static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
@@ -1038,9 +1057,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int i, ret;
- u32 val;
for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
ret = reset_control_assert(res->rst[i]);
@@ -1097,6 +1114,33 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
goto err_clk_aux;
}
+ return 0;
+
+err_clk_aux:
+ clk_disable_unprepare(res->ahb_clk);
+err_clk_ahb:
+ clk_disable_unprepare(res->axi_s_clk);
+err_clk_axi_s:
+ clk_disable_unprepare(res->axi_m_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->iface);
+err_clk_iface:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++)
+ reset_control_assert(res->rst[i]);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
writel(SLV_ADDR_SPACE_SZ,
pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
@@ -1114,7 +1158,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
- writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
val &= ~PCI_EXP_LNKCAP_ASPMS;
@@ -1124,24 +1168,6 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
PCI_EXP_DEVCTL2);
return 0;
-
-err_clk_aux:
- clk_disable_unprepare(res->ahb_clk);
-err_clk_ahb:
- clk_disable_unprepare(res->axi_s_clk);
-err_clk_axi_s:
- clk_disable_unprepare(res->axi_m_clk);
-err_clk_axi_m:
- clk_disable_unprepare(res->iface);
-err_clk_iface:
- /*
- * Not checking for failure, will anyway return
- * the original failure in 'ret'.
- */
- for (i = 0; i < ARRAY_SIZE(res->rst); i++)
- reset_control_assert(res->rst[i]);
-
- return ret;
}
static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
@@ -1184,22 +1210,7 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
if (ret < 0)
return ret;
- if (pcie->cfg->pipe_clk_need_muxing) {
- res->pipe_clk_src = devm_clk_get(dev, "pipe_mux");
- if (IS_ERR(res->pipe_clk_src))
- return PTR_ERR(res->pipe_clk_src);
-
- res->phy_pipe_clk = devm_clk_get(dev, "phy_pipe");
- if (IS_ERR(res->phy_pipe_clk))
- return PTR_ERR(res->phy_pipe_clk);
-
- res->ref_clk_src = devm_clk_get(dev, "ref");
- if (IS_ERR(res->ref_clk_src))
- return PTR_ERR(res->ref_clk_src);
- }
-
- res->pipe_clk = devm_clk_get(dev, "pipe");
- return PTR_ERR_OR_ZERO(res->pipe_clk);
+ return 0;
}
static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
@@ -1216,10 +1227,6 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
return ret;
}
- /* Set TCXO as clock source for pcie_pipe_clk_src */
- if (pcie->cfg->pipe_clk_need_muxing)
- clk_set_parent(res->pipe_clk_src, res->ref_clk_src);
-
ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret < 0)
goto err_disable_regulators;
@@ -1261,6 +1268,11 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
val |= BIT(4);
writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ /* Enable L1 and L1SS */
+ val = readl(pcie->parf + PCIE20_PARF_PM_CTRL);
+ val &= ~REQ_NOT_ENTR_L1;
+ writel(val, pcie->parf + PCIE20_PARF_PM_CTRL);
+
if (IS_ENABLED(CONFIG_PCI_MSI)) {
val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
val |= BIT(31);
@@ -1281,25 +1293,114 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
clk_bulk_disable_unprepare(res->num_clks, res->clks);
+
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
-static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
- /* Set pipe clock as clock source for pcie_pipe_clk_src */
- if (pcie->cfg->pipe_clk_need_muxing)
- clk_set_parent(res->pipe_clk_src, res->phy_pipe_clk);
+ res->clks[0].id = "iface";
+ res->clks[1].id = "axi_m";
+ res->clks[2].id = "axi_s";
+ res->clks[3].id = "axi_bridge";
+ res->clks[4].id = "rchng";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(res->rst))
+ return PTR_ERR(res->rst);
- return clk_prepare_enable(res->pipe_clk);
+ return 0;
}
-static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct device *dev = pcie->pci->dev;
+ int ret;
+
+ ret = reset_control_assert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset assert failed (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Delay periods before and after reset deassert are working values
+ * from downstream Codeaurora kernel
+ */
+ usleep_range(2000, 2500);
+
+ ret = reset_control_deassert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset deassert failed (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(2000, 2500);
- clk_disable_unprepare(res->pipe_clk);
+ return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int i;
+
+ writel(SLV_ADDR_SPACE_SZ,
+ pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
+ writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
+ pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
+ GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
+ pci->dbi_base + GEN3_RELATED_OFF);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
+ SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
+
+ for (i = 0; i < 256; i++)
+ writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i));
+
+ return 0;
}
static int qcom_pcie_link_up(struct dw_pcie *pci)
@@ -1381,7 +1482,7 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_host_init(struct pcie_port *pp)
+static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
@@ -1433,6 +1534,7 @@ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
static const struct qcom_pcie_ops ops_2_1_0 = {
.get_resources = qcom_pcie_get_resources_2_1_0,
.init = qcom_pcie_init_2_1_0,
+ .post_init = qcom_pcie_post_init_2_1_0,
.deinit = qcom_pcie_deinit_2_1_0,
.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
};
@@ -1441,6 +1543,7 @@ static const struct qcom_pcie_ops ops_2_1_0 = {
static const struct qcom_pcie_ops ops_1_0_0 = {
.get_resources = qcom_pcie_get_resources_1_0_0,
.init = qcom_pcie_init_1_0_0,
+ .post_init = qcom_pcie_post_init_1_0_0,
.deinit = qcom_pcie_deinit_1_0_0,
.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
};
@@ -1451,7 +1554,6 @@ static const struct qcom_pcie_ops ops_2_3_2 = {
.init = qcom_pcie_init_2_3_2,
.post_init = qcom_pcie_post_init_2_3_2,
.deinit = qcom_pcie_deinit_2_3_2,
- .post_deinit = qcom_pcie_post_deinit_2_3_2,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1459,6 +1561,7 @@ static const struct qcom_pcie_ops ops_2_3_2 = {
static const struct qcom_pcie_ops ops_2_4_0 = {
.get_resources = qcom_pcie_get_resources_2_4_0,
.init = qcom_pcie_init_2_4_0,
+ .post_init = qcom_pcie_post_init_2_4_0,
.deinit = qcom_pcie_deinit_2_4_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1467,6 +1570,7 @@ static const struct qcom_pcie_ops ops_2_4_0 = {
static const struct qcom_pcie_ops ops_2_3_3 = {
.get_resources = qcom_pcie_get_resources_2_3_3,
.init = qcom_pcie_init_2_3_3,
+ .post_init = qcom_pcie_post_init_2_3_3,
.deinit = qcom_pcie_deinit_2_3_3,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1477,8 +1581,6 @@ static const struct qcom_pcie_ops ops_2_7_0 = {
.init = qcom_pcie_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
- .post_init = qcom_pcie_post_init_2_7_0,
- .post_deinit = qcom_pcie_post_deinit_2_7_0,
};
/* Qcom IP rev.: 1.9.0 */
@@ -1487,11 +1589,18 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
.init = qcom_pcie_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
- .post_init = qcom_pcie_post_init_2_7_0,
- .post_deinit = qcom_pcie_post_deinit_2_7_0,
.config_sid = qcom_pcie_config_sid_sm8250,
};
+/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
+static const struct qcom_pcie_ops ops_2_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_9_0,
+ .init = qcom_pcie_init_2_9_0,
+ .post_init = qcom_pcie_post_init_2_9_0,
+ .deinit = qcom_pcie_deinit_2_9_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
static const struct qcom_pcie_cfg apq8084_cfg = {
.ops = &ops_1_0_0,
};
@@ -1533,7 +1642,6 @@ static const struct qcom_pcie_cfg sm8250_cfg = {
static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
.ops = &ops_1_9_0,
.has_ddrss_sf_tbu_clk = true,
- .pipe_clk_need_muxing = true,
.has_aggre0_clk = true,
.has_aggre1_clk = true,
};
@@ -1541,14 +1649,12 @@ static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {
.ops = &ops_1_9_0,
.has_ddrss_sf_tbu_clk = true,
- .pipe_clk_need_muxing = true,
.has_aggre1_clk = true,
};
static const struct qcom_pcie_cfg sc7280_cfg = {
.ops = &ops_1_9_0,
.has_tbu_clk = true,
- .pipe_clk_need_muxing = true,
};
static const struct qcom_pcie_cfg sc8180x_cfg = {
@@ -1556,6 +1662,10 @@ static const struct qcom_pcie_cfg sc8180x_cfg = {
.has_tbu_clk = true,
};
+static const struct qcom_pcie_cfg ipq6018_cfg = {
+ .ops = &ops_2_9_0,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1564,7 +1674,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int qcom_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
struct qcom_pcie *pcie;
const struct qcom_pcie_cfg *pcie_cfg;
@@ -1666,6 +1776,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
{ .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
+ { .compatible = "qcom,pcie-ipq6018", .data = &ipq6018_cfg },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 1569e82b5568..99d47ae80331 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -85,7 +85,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
struct spear13xx_pcie *spear13xx_pcie = arg;
struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
unsigned int status;
status = readl(&app_reg->int_sts);
@@ -121,7 +121,7 @@ static int spear13xx_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int spear13xx_pcie_host_init(struct pcie_port *pp)
+static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
@@ -155,7 +155,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -172,7 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
}
pp->ops = &spear13xx_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -258,7 +258,7 @@ static struct platform_driver spear13xx_pcie_driver = {
.probe = spear13xx_pcie_probe,
.driver = {
.name = "spear-pcie",
- .of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+ .of_match_table = spear13xx_pcie_of_match,
.suppress_bind_attrs = true,
},
};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
index c2de6ed4d86f..55f61914a986 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
@@ -39,7 +39,8 @@ static int tegra194_acpi_init(struct pci_config_window *cfg)
static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
u32 val, u32 reg)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+ u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) +
+ PCIE_ATU_VIEWPORT_BASE;
writel(val, pcie_ecam->iatu_base + offset + reg);
}
@@ -58,8 +59,8 @@ static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
PCIE_ATU_LIMIT);
atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
PCIE_ATU_UPPER_TARGET);
- atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
- atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2);
}
static void __iomem *tegra194_map_bus(struct pci_bus *bus,
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index cc2678490162..1b6b437823d2 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * PCIe host controller driver for Tegra194 SoC
+ * PCIe host controller driver for the following SoCs
+ * Tegra194
+ * Tegra234
*
- * Copyright (C) 2019 NVIDIA Corporation.
+ * Copyright (C) 2019-2022 NVIDIA Corporation.
*
* Author: Vidya Sagar <vidyas@nvidia.com>
*/
@@ -35,6 +37,9 @@
#include <soc/tegra/bpmp-abi.h>
#include "../../pci.h"
+#define TEGRA194_DWC_IP_VER 0x490A
+#define TEGRA234_DWC_IP_VER 0x562A
+
#define APPL_PINMUX 0x0
#define APPL_PINMUX_PEX_RST BIT(0)
#define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
@@ -49,6 +54,7 @@
#define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2
#define APPL_INTR_EN_L0_0 0x8
#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
@@ -170,19 +176,6 @@
#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
#define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
-#define EVENT_COUNTER_ALL_CLEAR 0x3
-#define EVENT_COUNTER_ENABLE_ALL 0x7
-#define EVENT_COUNTER_ENABLE_SHIFT 2
-#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
-#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
-#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
-#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
-#define EVENT_COUNTER_EVENT_L1 0x5
-#define EVENT_COUNTER_EVENT_L1_1 0x7
-#define EVENT_COUNTER_EVENT_L1_2 0x8
-#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
-#define EVENT_COUNTER_GROUP_5 0x5
-
#define N_FTS_VAL 52
#define FTS_VAL 52
@@ -191,12 +184,6 @@
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
-#define GEN3_RELATED_OFF 0x890
-#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
-#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
-#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
-#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
-
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
@@ -243,7 +230,19 @@ static const unsigned int pcie_gen_freq[] = {
GEN4_CORE_CLK_FREQ
};
-struct tegra194_pcie {
+struct tegra_pcie_dw_of_data {
+ u32 version;
+ enum dw_pcie_device_mode mode;
+ bool has_msix_doorbell_access_fix;
+ bool has_sbr_reset_fix;
+ bool has_l1ss_exit_fix;
+ bool has_ltr_req_fix;
+ u32 cdm_chk_int_en_bit;
+ u32 gen4_preset_vec;
+ u8 n_fts[2];
+};
+
+struct tegra_pcie_dw {
struct device *dev;
struct resource *appl_res;
struct resource *dbi_res;
@@ -255,17 +254,20 @@ struct tegra194_pcie {
struct dw_pcie pci;
struct tegra_bpmp *bpmp;
- enum dw_pcie_device_mode mode;
+ struct tegra_pcie_dw_of_data *of_data;
bool supports_clkreq;
bool enable_cdm_check;
+ bool enable_srns;
bool link_state;
bool update_fc_fixup;
+ bool enable_ext_refclk;
u8 init_link_width;
u32 msi_ctrl_int;
u32 num_lanes;
u32 cid;
u32 cfg_link_cap_l1sub;
+ u32 ras_des_cap;
u32 pcie_cap_base;
u32 aspm_cmrt;
u32 aspm_pwr_on_t;
@@ -287,22 +289,18 @@ struct tegra194_pcie {
int ep_state;
};
-struct tegra194_pcie_of_data {
- enum dw_pcie_device_mode mode;
-};
-
-static inline struct tegra194_pcie *to_tegra_pcie(struct dw_pcie *pci)
+static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
{
- return container_of(pci, struct tegra194_pcie, pci);
+ return container_of(pci, struct tegra_pcie_dw, pci);
}
-static inline void appl_writel(struct tegra194_pcie *pcie, const u32 value,
+static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
const u32 reg)
{
writel_relaxed(value, pcie->appl_base + reg);
}
-static inline u32 appl_readl(struct tegra194_pcie *pcie, const u32 reg)
+static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
{
return readl_relaxed(pcie->appl_base + reg);
}
@@ -311,10 +309,10 @@ struct tegra_pcie_soc {
enum dw_pcie_device_mode mode;
};
-static void apply_bad_link_workaround(struct pcie_port *pp)
+static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 current_link_width;
u16 val;
@@ -347,18 +345,18 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
- u32 val, tmp;
+ struct dw_pcie_rp *pp = &pci->pp;
+ u32 val, status_l0, status_l1;
u16 val_w;
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
- if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
-
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+ if (!pcie->of_data->has_sbr_reset_fix &&
+ status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
/* SBR & Surprise Link Down WAR */
val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
@@ -374,15 +372,21 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
}
}
- if (val & APPL_INTR_STATUS_L0_INT_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
- if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
+ if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
appl_writel(pcie,
APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
APPL_INTR_STATUS_L1_8_0);
apply_bad_link_workaround(pp);
}
- if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_w |= PCI_EXP_LNKSTA_LBMS;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA, val_w);
+
appl_writel(pcie,
APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
APPL_INTR_STATUS_L1_8_0);
@@ -394,31 +398,30 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
}
}
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
- tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
+ if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
dev_info(pci->dev, "CDM check complete\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
}
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
dev_err(pci->dev, "CDM comparison mismatch\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
}
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
dev_err(pci->dev, "CDM Logic error\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
+ val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
}
- dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp);
- tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
- dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp);
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
+ dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
}
return IRQ_HANDLED;
}
-static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie)
+static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -446,7 +449,7 @@ static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie)
static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
u32 val, speed;
@@ -454,6 +457,9 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
PCI_EXP_LNKSTA_CLS;
clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+ if (pcie->of_data->has_ltr_req_fix)
+ return IRQ_HANDLED;
+
/* If EP doesn't advertise L1SS, just return */
val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
@@ -492,7 +498,7 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
u32 status_l0, status_l1, link_status;
@@ -535,16 +541,21 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
*val = 0x00000000;
return PCIBIOS_SUCCESSFUL;
}
@@ -552,16 +563,21 @@ static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
return pci_generic_config_read(bus, devfn, where, size, val);
}
-static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 val)
{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
return PCIBIOS_SUCCESSFUL;
return pci_generic_config_write(bus, devfn, where, size, val);
@@ -569,30 +585,12 @@ static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
static struct pci_ops tegra_pci_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
- .read = tegra194_pcie_rd_own_conf,
- .write = tegra194_pcie_wr_own_conf,
+ .read = tegra_pcie_dw_rd_own_conf,
+ .write = tegra_pcie_dw_wr_own_conf,
};
#if defined(CONFIG_PCIEASPM)
-static const u32 event_cntr_ctrl_offset[] = {
- 0x1d8,
- 0x1a8,
- 0x1a8,
- 0x1a8,
- 0x1c4,
- 0x1d8
-};
-
-static const u32 event_cntr_data_offset[] = {
- 0x1dc,
- 0x1ac,
- 0x1ac,
- 0x1ac,
- 0x1c8,
- 0x1dc
-};
-
-static void disable_aspm_l11(struct tegra194_pcie *pcie)
+static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -601,7 +599,7 @@ static void disable_aspm_l11(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
}
-static void disable_aspm_l12(struct tegra194_pcie *pcie)
+static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -610,24 +608,27 @@ static void disable_aspm_l12(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
}
-static inline u32 event_counter_prog(struct tegra194_pcie *pcie, u32 event)
+static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
{
u32 val;
- val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]);
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
- dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
- val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]);
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_DATA);
return val;
}
static int aspm_state_cnt(struct seq_file *s, void *data)
{
- struct tegra194_pcie *pcie = (struct tegra194_pcie *)
+ struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
dev_get_drvdata(s->private);
u32 val;
@@ -647,18 +648,20 @@ static int aspm_state_cnt(struct seq_file *s, void *data)
event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
/* Clear all counters */
- dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid],
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
EVENT_COUNTER_ALL_CLEAR);
/* Re-enable counting */
val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
- dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
return 0;
}
-static void init_host_aspm(struct tegra194_pcie *pcie)
+static void init_host_aspm(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
u32 val;
@@ -666,10 +669,14 @@ static void init_host_aspm(struct tegra194_pcie *pcie)
val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
+ pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
+ PCI_EXT_CAP_ID_VNDR);
+
/* Enable ASPM counters */
val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
- dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val);
+ dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
/* Program T_cmrt and T_pwr_on values */
val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
@@ -686,22 +693,22 @@ static void init_host_aspm(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
}
-static void init_debugfs(struct tegra194_pcie *pcie)
+static void init_debugfs(struct tegra_pcie_dw *pcie)
{
debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
aspm_state_cnt);
}
#else
-static inline void disable_aspm_l12(struct tegra194_pcie *pcie) { return; }
-static inline void disable_aspm_l11(struct tegra194_pcie *pcie) { return; }
-static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; }
-static inline void init_debugfs(struct tegra194_pcie *pcie) { return; }
+static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
+static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
#endif
-static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
u16 val_w;
@@ -709,13 +716,15 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
- val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
- val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
- appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+ }
if (pcie->enable_cdm_check) {
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
- val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
+ val |= pcie->of_data->cdm_chk_int_en_bit;
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
val = appl_readl(pcie, APPL_INTR_EN_L1_18);
@@ -736,10 +745,10 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
val_w);
}
-static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
/* Enable legacy interrupt generation */
@@ -757,10 +766,10 @@ static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
}
-static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
/* Enable MSI interrupt generation */
@@ -770,10 +779,10 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
}
-static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
/* Clear interrupt statuses before enabling interrupts */
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
@@ -798,7 +807,7 @@ static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
tegra_pcie_enable_msi_interrupts(pp);
}
-static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
+static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
u32 val, offset, i;
@@ -842,7 +851,8 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+ val |= (pcie->of_data->gen4_preset_vec <<
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
@@ -851,11 +861,12 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
}
-static int tegra194_pcie_host_init(struct pcie_port *pp)
+static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
+ u16 val_16;
pp->bridge->ops = &tegra_pci_ops;
@@ -863,6 +874,11 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
+ val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
+
val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
@@ -887,6 +903,15 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
config_gen3_gen4_eq_presets(pcie);
init_host_aspm(pcie);
@@ -897,9 +922,11 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
disable_aspm_l12(pcie);
}
- val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
- val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
- dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
if (pcie->update_fc_fixup) {
val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
@@ -912,14 +939,14 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
return 0;
}
-static int tegra194_pcie_start_link(struct dw_pcie *pci)
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
{
u32 val, offset, speed, tmp;
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
- struct pcie_port *pp = &pci->pp;
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct dw_pcie_rp *pp = &pci->pp;
bool retry = true;
- if (pcie->mode == DW_PCIE_EP_TYPE) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
enable_irq(pcie->pex_rst_irq);
return 0;
}
@@ -978,9 +1005,9 @@ retry_link:
offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
val &= ~PCI_DLF_EXCHANGE_ENABLE;
- dw_pcie_writel_dbi(pci, offset, val);
+ dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
- tegra194_pcie_host_init(pp);
+ tegra_pcie_dw_host_init(pp);
dw_pcie_setup_rc(pp);
retry = false;
@@ -996,32 +1023,32 @@ retry_link:
return 0;
}
-static int tegra194_pcie_link_up(struct dw_pcie *pci)
+static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
{
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
-static void tegra194_pcie_stop_link(struct dw_pcie *pci)
+static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
{
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
disable_irq(pcie->pex_rst_irq);
}
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
- .link_up = tegra194_pcie_link_up,
- .start_link = tegra194_pcie_start_link,
- .stop_link = tegra194_pcie_stop_link,
+ .link_up = tegra_pcie_dw_link_up,
+ .start_link = tegra_pcie_dw_start_link,
+ .stop_link = tegra_pcie_dw_stop_link,
};
-static const struct dw_pcie_host_ops tegra194_pcie_host_ops = {
- .host_init = tegra194_pcie_host_init,
+static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
+ .host_init = tegra_pcie_dw_host_init,
};
-static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie)
+static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
{
unsigned int phy_count = pcie->phy_count;
@@ -1031,7 +1058,7 @@ static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie)
}
}
-static int tegra_pcie_enable_phy(struct tegra194_pcie *pcie)
+static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
{
unsigned int i;
int ret;
@@ -1058,7 +1085,7 @@ phy_exit:
return ret;
}
-static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
+static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device_node *np = pcie->dev->of_node;
@@ -1111,13 +1138,27 @@ static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
pcie->update_fc_fixup = true;
+ /* RP using an external REFCLK is supported only in Tegra234 */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
+ pcie->enable_ext_refclk = true;
+ } else {
+ pcie->enable_ext_refclk =
+ of_property_read_bool(pcie->dev->of_node,
+ "nvidia,enable-ext-refclk");
+ }
+
pcie->supports_clkreq =
of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
pcie->enable_cdm_check =
of_property_read_bool(np, "snps,enable-cdm-check");
- if (pcie->mode == DW_PCIE_RC_TYPE)
+ if (pcie->of_data->version == TEGRA234_DWC_IP_VER)
+ pcie->enable_srns =
+ of_property_read_bool(np, "nvidia,enable-srns");
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE)
return 0;
/* Endpoint mode specific DT entries */
@@ -1154,15 +1195,18 @@ static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
return 0;
}
-static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie,
+static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
bool enable)
{
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
- /* Controller-5 doesn't need to have its state set by BPMP-FW */
- if (pcie->cid == 5)
+ /*
+ * Controller-5 doesn't need to have its state set by BPMP-FW in
+ * Tegra194
+ */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)
return 0;
memset(&req, 0, sizeof(req));
@@ -1182,7 +1226,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
-static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,
+static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
bool enable)
{
struct mrq_uphy_response resp;
@@ -1210,9 +1254,9 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
-static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
+static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
- struct pcie_port *pp = &pcie->pci.pp;
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
struct pci_bus *child, *root_bus = NULL;
struct pci_dev *pdev;
@@ -1248,7 +1292,7 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
}
}
-static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie)
+static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
{
pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
if (IS_ERR(pcie->slot_ctl_3v3)) {
@@ -1269,7 +1313,7 @@ static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie)
return 0;
}
-static int tegra_pcie_enable_slot_regulators(struct tegra194_pcie *pcie)
+static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
{
int ret;
@@ -1307,7 +1351,7 @@ fail_12v_enable:
return ret;
}
-static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie)
+static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
{
if (pcie->slot_ctl_12v)
regulator_disable(pcie->slot_ctl_12v);
@@ -1315,7 +1359,7 @@ static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie)
regulator_disable(pcie->slot_ctl_3v3);
}
-static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
+static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
bool en_hw_hot_rst)
{
int ret;
@@ -1328,6 +1372,14 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
return ret;
}
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret);
+ goto fail_pll_init;
+ }
+ }
+
ret = tegra_pcie_enable_slot_regulators(pcie);
if (ret < 0)
goto fail_slot_reg_en;
@@ -1351,11 +1403,13 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
goto fail_core_apb_rst;
}
- if (en_hw_hot_rst) {
+ if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {
/* Enable HW_HOT_RST mode */
val = appl_readl(pcie, APPL_CTRL);
val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
val |= APPL_CTRL_HW_HOT_RST_EN;
appl_writel(pcie, val, APPL_CTRL);
}
@@ -1382,6 +1436,19 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
appl_writel(pcie, val, APPL_CFG_MISC);
+ if (pcie->enable_srns || pcie->enable_ext_refclk) {
+ /*
+ * When Tegra PCIe RP is using external clock, it cannot supply
+ * same clock to its downstream hierarchy. Hence, gate PCIe RP
+ * REFCLK out pads when RP & EP are using separate clocks or RP
+ * is using an external REFCLK.
+ */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+ }
+
if (!pcie->supports_clkreq) {
val = appl_readl(pcie, APPL_PINMUX);
val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
@@ -1407,12 +1474,15 @@ fail_core_clk:
fail_reg_en:
tegra_pcie_disable_slot_regulators(pcie);
fail_slot_reg_en:
+ if (pcie->enable_ext_refclk)
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
tegra_pcie_bpmp_set_ctrl_state(pcie, false);
return ret;
}
-static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)
+static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
{
int ret;
@@ -1434,23 +1504,29 @@ static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)
tegra_pcie_disable_slot_regulators(pcie);
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret);
+ }
+
ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
if (ret)
dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
pcie->cid, ret);
}
-static int tegra_pcie_init_controller(struct tegra194_pcie *pcie)
+static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
int ret;
ret = tegra_pcie_config_controller(pcie, false);
if (ret < 0)
return ret;
- pp->ops = &tegra194_pcie_host_ops;
+ pp->ops = &tegra_pcie_dw_host_ops;
ret = dw_pcie_host_init(pp);
if (ret < 0) {
@@ -1465,11 +1541,11 @@ fail_host_init:
return ret;
}
-static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie)
+static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
{
u32 val;
- if (!tegra194_pcie_link_up(&pcie->pci))
+ if (!tegra_pcie_dw_link_up(&pcie->pci))
return 0;
val = appl_readl(pcie, APPL_RADM_STATUS);
@@ -1481,12 +1557,12 @@ static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie)
1, PME_ACK_TIMEOUT);
}
-static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie)
+static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
{
u32 data;
int err;
- if (!tegra194_pcie_link_up(&pcie->pci)) {
+ if (!tegra_pcie_dw_link_up(&pcie->pci)) {
dev_dbg(pcie->dev, "PCIe link is not up...!\n");
return;
}
@@ -1543,15 +1619,15 @@ static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie)
appl_writel(pcie, data, APPL_PINMUX);
}
-static void tegra_pcie_deinit_controller(struct tegra194_pcie *pcie)
+static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
{
tegra_pcie_downstream_dev_to_D0(pcie);
dw_pcie_host_deinit(&pcie->pci.pp);
- tegra194_pcie_pme_turnoff(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
}
-static int tegra_pcie_config_rp(struct tegra194_pcie *pcie)
+static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
struct device *dev = pcie->dev;
char *name;
@@ -1578,7 +1654,7 @@ static int tegra_pcie_config_rp(struct tegra194_pcie *pcie)
goto fail_pm_get_sync;
}
- pcie->link_state = tegra194_pcie_link_up(&pcie->pci);
+ pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
if (!pcie->link_state) {
ret = -ENOMEDIUM;
goto fail_host_init;
@@ -1603,7 +1679,7 @@ fail_pm_get_sync:
return ret;
}
-static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
+static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
{
u32 val;
int ret;
@@ -1634,6 +1710,13 @@ static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
pm_runtime_put_sync(pcie->dev);
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n",
+ ret);
+ }
+
ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
if (ret)
dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
@@ -1642,13 +1725,14 @@ static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
}
-static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
+static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
struct dw_pcie_ep *ep = &pci->ep;
struct device *dev = pcie->dev;
u32 val;
int ret;
+ u16 val_16;
if (pcie->ep_state == EP_STATE_ENABLED)
return;
@@ -1660,10 +1744,20 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
return;
}
- ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
if (ret) {
- dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
- goto fail_pll_init;
+ dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
+ pcie->cid, ret);
+ goto fail_set_ctrl_state;
+ }
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n",
+ ret);
+ goto fail_pll_init;
+ }
}
ret = clk_prepare_enable(pcie->core_clk);
@@ -1760,12 +1854,29 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
disable_aspm_l12(pcie);
}
- val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
- val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
- dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
+
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
+ val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
@@ -1782,6 +1893,13 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
dw_pcie_ep_init_notify(ep);
+ /* Program the private control to allow sending LTR upstream */
+ if (pcie->of_data->has_ltr_req_fix) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+ }
+
/* Enable LTSSM */
val = appl_readl(pcie, APPL_CTRL);
val |= APPL_CTRL_LTSSM_EN;
@@ -1802,12 +1920,14 @@ fail_core_apb_rst:
fail_core_clk_enable:
tegra_pcie_bpmp_set_pll_state(pcie, false);
fail_pll_init:
+ tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+fail_set_ctrl_state:
pm_runtime_put_sync(dev);
}
static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
if (gpiod_get_value(pcie->pex_rst_gpiod))
pex_ep_event_pex_rst_assert(pcie);
@@ -1817,7 +1937,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq)
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
if (irq > 1)
@@ -1829,7 +1949,7 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq)
return 0;
}
-static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq)
+static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
if (unlikely(irq > 31))
return -EINVAL;
@@ -1839,7 +1959,7 @@ static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq)
return 0;
}
-static int tegra_pcie_ep_raise_msix_irq(struct tegra194_pcie *pcie, u16 irq)
+static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
struct dw_pcie_ep *ep = &pcie->pci.ep;
@@ -1853,7 +1973,7 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
@@ -1894,7 +2014,7 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
.get_features = tegra_pcie_ep_get_features,
};
-static int tegra_pcie_config_ep(struct tegra194_pcie *pcie,
+static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
@@ -1949,19 +2069,20 @@ static int tegra_pcie_config_ep(struct tegra194_pcie *pcie,
if (ret) {
dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
ret);
+ pm_runtime_disable(dev);
return ret;
}
return 0;
}
-static int tegra194_pcie_probe(struct platform_device *pdev)
+static int tegra_pcie_dw_probe(struct platform_device *pdev)
{
- const struct tegra194_pcie_of_data *data;
+ const struct tegra_pcie_dw_of_data *data;
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
- struct tegra194_pcie *pcie;
- struct pcie_port *pp;
+ struct tegra_pcie_dw *pcie;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
struct phy **phys;
char *name;
@@ -1977,16 +2098,14 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
pci = &pcie->pci;
pci->dev = &pdev->dev;
pci->ops = &tegra_dw_pcie_ops;
- pci->n_fts[0] = N_FTS_VAL;
- pci->n_fts[1] = FTS_VAL;
- pci->version = 0x490A;
-
+ pcie->dev = &pdev->dev;
+ pcie->of_data = (struct tegra_pcie_dw_of_data *)data;
+ pci->n_fts[0] = pcie->of_data->n_fts[0];
+ pci->n_fts[1] = pcie->of_data->n_fts[1];
pp = &pci->pp;
pp->num_vectors = MAX_MSI_IRQS;
- pcie->dev = &pdev->dev;
- pcie->mode = (enum dw_pcie_device_mode)data->mode;
- ret = tegra194_pcie_parse_dt(pcie);
+ ret = tegra_pcie_dw_parse_dt(pcie);
if (ret < 0) {
const char *level = KERN_ERR;
@@ -2101,7 +2220,7 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
- switch (pcie->mode) {
+ switch (pcie->of_data->mode) {
case DW_PCIE_RC_TYPE:
ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
IRQF_SHARED, "tegra-pcie-intr", pcie);
@@ -2136,7 +2255,8 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
break;
default:
- dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
+ dev_err(dev, "Invalid PCIe device type %d\n",
+ pcie->of_data->mode);
}
fail:
@@ -2144,16 +2264,22 @@ fail:
return ret;
}
-static int tegra194_pcie_remove(struct platform_device *pdev)
+static int tegra_pcie_dw_remove(struct platform_device *pdev)
{
- struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
- if (!pcie->link_state)
- return 0;
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return 0;
+
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_deinit_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
- debugfs_remove_recursive(pcie->debugfs);
- tegra_pcie_deinit_controller(pcie);
- pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
tegra_bpmp_put(pcie->bpmp);
if (pcie->pex_refclk_sel_gpiod)
@@ -2162,41 +2288,48 @@ static int tegra194_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int tegra194_pcie_suspend_late(struct device *dev)
+static int tegra_pcie_dw_suspend_late(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
u32 val;
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
+ return -EPERM;
+ }
+
if (!pcie->link_state)
return 0;
/* Enable HW_HOT_RST mode */
- val = appl_readl(pcie, APPL_CTRL);
- val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
- APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
- val |= APPL_CTRL_HW_HOT_RST_EN;
- appl_writel(pcie, val, APPL_CTRL);
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
return 0;
}
-static int tegra194_pcie_suspend_noirq(struct device *dev)
+static int tegra_pcie_dw_suspend_noirq(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
if (!pcie->link_state)
return 0;
tegra_pcie_downstream_dev_to_D0(pcie);
- tegra194_pcie_pme_turnoff(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
return 0;
}
-static int tegra194_pcie_resume_noirq(struct device *dev)
+static int tegra_pcie_dw_resume_noirq(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
int ret;
if (!pcie->link_state)
@@ -2206,7 +2339,7 @@ static int tegra194_pcie_resume_noirq(struct device *dev)
if (ret < 0)
return ret;
- ret = tegra194_pcie_host_init(&pcie->pci.pp);
+ ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
if (ret < 0) {
dev_err(dev, "Failed to init host: %d\n", ret);
goto fail_host_init;
@@ -2214,7 +2347,7 @@ static int tegra194_pcie_resume_noirq(struct device *dev)
dw_pcie_setup_rc(&pcie->pci.pp);
- ret = tegra194_pcie_start_link(&pcie->pci);
+ ret = tegra_pcie_dw_start_link(&pcie->pci);
if (ret < 0)
goto fail_host_init;
@@ -2225,12 +2358,12 @@ fail_host_init:
return ret;
}
-static int tegra194_pcie_resume_early(struct device *dev)
+static int tegra_pcie_dw_resume_early(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
u32 val;
- if (pcie->mode == DW_PCIE_EP_TYPE) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
dev_err(dev, "Suspend is not supported in EP mode");
return -ENOTSUPP;
}
@@ -2239,75 +2372,124 @@ static int tegra194_pcie_resume_early(struct device *dev)
return 0;
/* Disable HW_HOT_RST mode */
- val = appl_readl(pcie, APPL_CTRL);
- val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
- APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
- val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
- APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
- val &= ~APPL_CTRL_HW_HOT_RST_EN;
- appl_writel(pcie, val, APPL_CTRL);
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
+ val &= ~APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
return 0;
}
-static void tegra194_pcie_shutdown(struct platform_device *pdev)
+static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
{
- struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
- if (!pcie->link_state)
- return;
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return;
- debugfs_remove_recursive(pcie->debugfs);
- tegra_pcie_downstream_dev_to_D0(pcie);
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_downstream_dev_to_D0(pcie);
- disable_irq(pcie->pci.pp.irq);
- if (IS_ENABLED(CONFIG_PCI_MSI))
- disable_irq(pcie->pci.pp.msi_irq);
+ disable_irq(pcie->pci.pp.irq);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ disable_irq(pcie->pci.pp.msi_irq[0]);
- tegra194_pcie_pme_turnoff(pcie);
- tegra_pcie_unconfig_controller(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
}
-static const struct tegra194_pcie_of_data tegra194_pcie_rc_of_data = {
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_RC_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_EP_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
.mode = DW_PCIE_RC_TYPE,
+ .has_msix_doorbell_access_fix = true,
+ .has_sbr_reset_fix = true,
+ .has_l1ss_exit_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
};
-static const struct tegra194_pcie_of_data tegra194_pcie_ep_of_data = {
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
.mode = DW_PCIE_EP_TYPE,
+ .has_l1ss_exit_fix = true,
+ .has_ltr_req_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
};
-static const struct of_device_id tegra194_pcie_of_match[] = {
+static const struct of_device_id tegra_pcie_dw_of_match[] = {
{
.compatible = "nvidia,tegra194-pcie",
- .data = &tegra194_pcie_rc_of_data,
+ .data = &tegra194_pcie_dw_rc_of_data,
},
{
.compatible = "nvidia,tegra194-pcie-ep",
- .data = &tegra194_pcie_ep_of_data,
+ .data = &tegra194_pcie_dw_ep_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie",
+ .data = &tegra234_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie-ep",
+ .data = &tegra234_pcie_dw_ep_of_data,
},
- {},
+ {}
};
-static const struct dev_pm_ops tegra194_pcie_pm_ops = {
- .suspend_late = tegra194_pcie_suspend_late,
- .suspend_noirq = tegra194_pcie_suspend_noirq,
- .resume_noirq = tegra194_pcie_resume_noirq,
- .resume_early = tegra194_pcie_resume_early,
+static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
+ .suspend_late = tegra_pcie_dw_suspend_late,
+ .suspend_noirq = tegra_pcie_dw_suspend_noirq,
+ .resume_noirq = tegra_pcie_dw_resume_noirq,
+ .resume_early = tegra_pcie_dw_resume_early,
};
-static struct platform_driver tegra194_pcie_driver = {
- .probe = tegra194_pcie_probe,
- .remove = tegra194_pcie_remove,
- .shutdown = tegra194_pcie_shutdown,
+static struct platform_driver tegra_pcie_dw_driver = {
+ .probe = tegra_pcie_dw_probe,
+ .remove = tegra_pcie_dw_remove,
+ .shutdown = tegra_pcie_dw_shutdown,
.driver = {
.name = "tegra194-pcie",
- .pm = &tegra194_pcie_pm_ops,
- .of_match_table = tegra194_pcie_of_match,
+ .pm = &tegra_pcie_dw_pm_ops,
+ .of_match_table = tegra_pcie_dw_of_match,
},
};
-module_platform_driver(tegra194_pcie_driver);
+module_platform_driver(tegra_pcie_dw_driver);
-MODULE_DEVICE_TABLE(of, tegra194_pcie_of_match);
+MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index b45ac3754242..48c3eba817b4 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -171,7 +171,7 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie)
static void uniphier_pcie_irq_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
@@ -188,7 +188,7 @@ static void uniphier_pcie_irq_mask(struct irq_data *d)
static void uniphier_pcie_irq_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
@@ -225,7 +225,7 @@ static const struct irq_domain_ops uniphier_intx_domain_ops = {
static void uniphier_pcie_irq_handler(struct irq_desc *desc)
{
- struct pcie_port *pp = irq_desc_get_handler_data(desc);
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -258,7 +258,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
+static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
@@ -295,7 +295,7 @@ out_put_node:
return ret;
}
-static int uniphier_pcie_host_init(struct pcie_port *pp)
+static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
index 50f80f07e4db..71026fefa366 100644
--- a/drivers/pci/controller/dwc/pcie-visconti.c
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -178,7 +178,7 @@ static void visconti_pcie_stop_link(struct dw_pcie *pci)
*/
static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
return cpu_addr & ~pp->io_base;
}
@@ -190,7 +190,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.stop_link = visconti_pcie_stop_link,
};
-static int visconti_pcie_host_init(struct pcie_port *pp)
+static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
@@ -278,7 +278,7 @@ static int visconti_add_pcie_port(struct visconti_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
pp->irq = platform_get_irq_byname(pdev, "intr");
if (pp->irq < 0)
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index ffec82c8a523..966c8b48bd96 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -8,6 +8,7 @@
* Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
@@ -33,6 +34,7 @@
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_DEV_REV_REG 0x8
#define PCIE_CORE_PCIEXP_CAP 0xc0
+#define PCIE_CORE_PCIERR_CAP 0x100
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
@@ -857,14 +859,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
switch (reg) {
- case PCI_EXP_SLTCTL:
- *value = PCI_EXP_SLTSTA_PDS << 16;
- return PCI_BRIDGE_EMUL_HANDLED;
-
/*
- * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need
- * to be handled here, because their values are stored in emulated
- * config space buffer, and we read them from there when needed.
+ * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are
+ * also supported, but do not need to be handled here, because their
+ * values are stored in emulated config space buffer, and we read them
+ * from there when needed.
*/
case PCI_EXP_LNKCAP: {
@@ -944,11 +943,89 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
}
}
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case 0:
+ *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
+
+ /*
+ * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada
+ * 3700 Functional Specification does not document registers
+ * at those addresses.
+ *
+ * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error
+ * Reporting Capability header the last Extended Capability.
+ * If we obtain documentation for those registers in the
+ * future, this can be changed.
+ */
+ *value &= 0x000fffff;
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG + 0:
+ case PCI_ERR_HEADER_LOG + 4:
+ case PCI_ERR_HEADER_LOG + 8:
+ case PCI_ERR_HEADER_LOG + 12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_STATUS:
+ case PCI_ERR_ROOT_ERR_SRC:
+ *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+}
+
+static void
+advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ /* These are W1C registers, so clear other bits */
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_ROOT_STATUS:
+ new &= mask;
+ fallthrough;
+
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG + 0:
+ case PCI_ERR_HEADER_LOG + 4:
+ case PCI_ERR_HEADER_LOG + 8:
+ case PCI_ERR_HEADER_LOG + 12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_ERR_SRC:
+ advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg);
+ break;
+
+ default:
+ break;
+ }
+}
+
static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
.read_base = advk_pci_bridge_emul_base_conf_read,
.write_base = advk_pci_bridge_emul_base_conf_write,
.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
.write_pcie = advk_pci_bridge_emul_pcie_conf_write,
+ .read_ext = advk_pci_bridge_emul_ext_conf_read,
+ .write_ext = advk_pci_bridge_emul_ext_conf_write,
};
/*
@@ -977,8 +1054,25 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
/* Support interrupt A for MSI feature */
bridge->conf.intpin = PCI_INTERRUPT_INTA;
- /* Aardvark HW provides PCIe Capability structure in version 2 */
- bridge->pcie_conf.cap = cpu_to_le16(2);
+ /*
+ * Aardvark HW provides PCIe Capability structure in version 2 and
+ * indicate slot support, which is emulated.
+ */
+ bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT);
+
+ /*
+ * Set Presence Detect State bit permanently since there is no support
+ * for unplugging the card nor detecting whether it is plugged. (If a
+ * platform exists in the future that supports it, via a GPIO for
+ * example, it should be implemented via this bit.)
+ *
+ * Set physical slot number to 1 since there is only one port and zero
+ * value is reserved for ports within the same silicon as Root Port
+ * which is not our case.
+ */
+ bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN,
+ 1));
+ bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 50a8e1d6f70a..05c50408f13b 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -9,6 +9,8 @@
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include "../pci.h"
@@ -18,18 +20,31 @@
#define DEV_PCIE_PORT_2 0x7a29
#define DEV_LS2K_APB 0x7a02
-#define DEV_LS7A_CONF 0x7a10
+#define DEV_LS7A_GMAC 0x7a03
+#define DEV_LS7A_DC1 0x7a06
#define DEV_LS7A_LPC 0x7a0c
+#define DEV_LS7A_AHCI 0x7a08
+#define DEV_LS7A_CONF 0x7a10
+#define DEV_LS7A_GNET 0x7a13
+#define DEV_LS7A_EHCI 0x7a14
+#define DEV_LS7A_DC2 0x7a36
+#define DEV_LS7A_HDMI 0x7a37
#define FLAG_CFG0 BIT(0)
#define FLAG_CFG1 BIT(1)
#define FLAG_DEV_FIX BIT(2)
+#define FLAG_DEV_HIDDEN BIT(3)
+
+struct loongson_pci_data {
+ u32 flags;
+ struct pci_ops *ops;
+};
struct loongson_pci {
void __iomem *cfg0_base;
void __iomem *cfg1_base;
struct platform_device *pdev;
- u32 flags;
+ const struct loongson_pci_data *data;
};
/* Fixup wrong class code in PCIe bridges */
@@ -92,55 +107,106 @@ static void loongson_mrrs_quirk(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk);
-static void __iomem *cfg1_map(struct loongson_pci *priv, int bus,
- unsigned int devfn, int where)
+static void loongson_pci_pin_quirk(struct pci_dev *pdev)
{
- unsigned long addroff = 0x0;
+ pdev->pin = 1 + (PCI_FUNC(pdev->devfn) & 3);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_DC1, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_DC2, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_GMAC, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_AHCI, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_EHCI, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_GNET, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+
+static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg;
- if (bus != 0)
- addroff |= BIT(28); /* Type 1 Access */
- addroff |= (where & 0xff) | ((where & 0xf00) << 16);
- addroff |= (bus << 16) | (devfn << 8);
- return priv->cfg1_base + addroff;
+ if (acpi_disabled)
+ return (struct loongson_pci *)(bus->sysdata);
+
+ cfg = bus->sysdata;
+ return (struct loongson_pci *)(cfg->priv);
}
-static void __iomem *cfg0_map(struct loongson_pci *priv, int bus,
- unsigned int devfn, int where)
+static void __iomem *cfg0_map(struct loongson_pci *priv, struct pci_bus *bus,
+ unsigned int devfn, int where)
{
unsigned long addroff = 0x0;
+ unsigned char busnum = bus->number;
- if (bus != 0)
+ if (!pci_is_root_bus(bus)) {
addroff |= BIT(24); /* Type 1 Access */
- addroff |= (bus << 16) | (devfn << 8) | where;
+ addroff |= (busnum << 16);
+ }
+ addroff |= (devfn << 8) | where;
return priv->cfg0_base + addroff;
}
-static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *cfg1_map(struct loongson_pci *priv, struct pci_bus *bus,
+ unsigned int devfn, int where)
{
+ unsigned long addroff = 0x0;
unsigned char busnum = bus->number;
- struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
- struct loongson_pci *priv = pci_host_bridge_priv(bridge);
+
+ if (!pci_is_root_bus(bus)) {
+ addroff |= BIT(28); /* Type 1 Access */
+ addroff |= (busnum << 16);
+ }
+ addroff |= (devfn << 8) | (where & 0xff) | ((where & 0xf00) << 16);
+ return priv->cfg1_base + addroff;
+}
+
+static bool pdev_may_exist(struct pci_bus *bus, unsigned int device,
+ unsigned int function)
+{
+ return !(pci_is_root_bus(bus) &&
+ (device >= 9 && device <= 20) && (function > 0));
+}
+
+static void __iomem *pci_loongson_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ unsigned int device = PCI_SLOT(devfn);
+ unsigned int function = PCI_FUNC(devfn);
+ struct loongson_pci *priv = pci_bus_to_loongson_pci(bus);
/*
* Do not read more than one device on the bus other than
- * the host bus. For our hardware the root bus is always bus 0.
+ * the host bus.
*/
- if (priv->flags & FLAG_DEV_FIX && busnum != 0 &&
- PCI_SLOT(devfn) > 0)
- return NULL;
+ if ((priv->data->flags & FLAG_DEV_FIX) && bus->self) {
+ if (!pci_is_root_bus(bus) && (device > 0))
+ return NULL;
+ }
+
+ /* Don't access non-existent devices */
+ if (priv->data->flags & FLAG_DEV_HIDDEN) {
+ if (!pdev_may_exist(bus, device, function))
+ return NULL;
+ }
/* CFG0 can only access standard space */
if (where < PCI_CFG_SPACE_SIZE && priv->cfg0_base)
- return cfg0_map(priv, busnum, devfn, where);
+ return cfg0_map(priv, bus, devfn, where);
/* CFG1 can access extended space */
if (where < PCI_CFG_SPACE_EXP_SIZE && priv->cfg1_base)
- return cfg1_map(priv, busnum, devfn, where);
+ return cfg1_map(priv, bus, devfn, where);
return NULL;
}
+#ifdef CONFIG_OF
+
static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
@@ -159,20 +225,42 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return val;
}
-/* H/w only accept 32-bit PCI operations */
+/* LS2K/LS7A accept 8/16/32-bit PCI config operations */
static struct pci_ops loongson_pci_ops = {
.map_bus = pci_loongson_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/* RS780/SR5690 only accept 32-bit PCI config operations */
+static struct pci_ops loongson_pci_ops32 = {
+ .map_bus = pci_loongson_map_bus,
.read = pci_generic_config_read32,
.write = pci_generic_config_write32,
};
+static const struct loongson_pci_data ls2k_pci_data = {
+ .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN,
+ .ops = &loongson_pci_ops,
+};
+
+static const struct loongson_pci_data ls7a_pci_data = {
+ .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN,
+ .ops = &loongson_pci_ops,
+};
+
+static const struct loongson_pci_data rs780e_pci_data = {
+ .flags = FLAG_CFG0,
+ .ops = &loongson_pci_ops32,
+};
+
static const struct of_device_id loongson_pci_of_match[] = {
{ .compatible = "loongson,ls2k-pci",
- .data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), },
+ .data = &ls2k_pci_data, },
{ .compatible = "loongson,ls7a-pci",
- .data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), },
+ .data = &ls7a_pci_data, },
{ .compatible = "loongson,rs780e-pci",
- .data = (void *)(FLAG_CFG0), },
+ .data = &rs780e_pci_data, },
{}
};
@@ -193,20 +281,20 @@ static int loongson_pci_probe(struct platform_device *pdev)
priv = pci_host_bridge_priv(bridge);
priv->pdev = pdev;
- priv->flags = (unsigned long)of_device_get_match_data(dev);
+ priv->data = of_device_get_match_data(dev);
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_err(dev, "missing mem resources for cfg0\n");
- return -EINVAL;
+ if (priv->data->flags & FLAG_CFG0) {
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ dev_err(dev, "missing mem resources for cfg0\n");
+ else {
+ priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(priv->cfg0_base))
+ return PTR_ERR(priv->cfg0_base);
+ }
}
- priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs);
- if (IS_ERR(priv->cfg0_base))
- return PTR_ERR(priv->cfg0_base);
-
- /* CFG1 is optional */
- if (priv->flags & FLAG_CFG1) {
+ if (priv->data->flags & FLAG_CFG1) {
regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!regs)
dev_info(dev, "missing mem resource for cfg1\n");
@@ -218,7 +306,7 @@ static int loongson_pci_probe(struct platform_device *pdev)
}
bridge->sysdata = priv;
- bridge->ops = &loongson_pci_ops;
+ bridge->ops = priv->data->ops;
bridge->map_irq = loongson_map_irq;
return pci_host_probe(bridge);
@@ -232,3 +320,41 @@ static struct platform_driver loongson_pci_driver = {
.probe = loongson_pci_probe,
};
builtin_platform_driver(loongson_pci_driver);
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static int loongson_pci_ecam_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct loongson_pci *priv;
+ struct loongson_pci_data *data;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ cfg->priv = priv;
+ data->flags = FLAG_CFG1 | FLAG_DEV_HIDDEN;
+ priv->data = data;
+ priv->cfg1_base = cfg->win - (cfg->busr.start << 16);
+
+ return 0;
+}
+
+const struct pci_ecam_ops loongson_pci_ecam_ops = {
+ .bus_shift = 16,
+ .init = loongson_pci_ecam_init,
+ .pci_ops = {
+ .map_bus = pci_loongson_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+#endif
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index c1ffdb06c971..af915c951f06 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1216,7 +1216,6 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
return -ENOENT;
}
-#ifdef CONFIG_PM_SLEEP
static int mvebu_pcie_suspend(struct device *dev)
{
struct mvebu_pcie *pcie;
@@ -1249,7 +1248,6 @@ static int mvebu_pcie_resume(struct device *dev)
return 0;
}
-#endif
static void mvebu_pcie_port_clk_put(void *data)
{
@@ -1737,7 +1735,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
};
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
};
static struct platform_driver mvebu_pcie_driver = {
diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c
index 35804ea394fd..839695791757 100644
--- a/drivers/pci/controller/pci-rcar-gen2.c
+++ b/drivers/pci/controller/pci-rcar-gen2.c
@@ -328,6 +328,7 @@ static const struct of_device_id rcar_pci_of_match[] = {
{ .compatible = "renesas,pci-r8a7791", },
{ .compatible = "renesas,pci-r8a7794", },
{ .compatible = "renesas,pci-rcar-gen2", },
+ { .compatible = "renesas,pci-rzn1", },
{ },
};
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 0457ec02ab70..8e323e93be91 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2707,7 +2707,7 @@ static int tegra_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
+static int tegra_pcie_pm_suspend(struct device *dev)
{
struct tegra_pcie *pcie = dev_get_drvdata(dev);
struct tegra_pcie_port *port;
@@ -2742,7 +2742,7 @@ static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
+static int tegra_pcie_pm_resume(struct device *dev)
{
struct tegra_pcie *pcie = dev_get_drvdata(dev);
int err;
@@ -2798,9 +2798,8 @@ poweroff:
}
static const struct dev_pm_ops tegra_pcie_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
- tegra_pcie_pm_resume)
+ RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)
};
static struct platform_driver tegra_pcie_driver = {
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index eb6240958bb0..549d3bd6d1c2 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -641,7 +641,7 @@ static const struct of_device_id xgene_pcie_match_table[] = {
static struct platform_driver xgene_pcie_driver = {
.driver = {
.name = "xgene-pcie",
- .of_match_table = of_match_ptr(xgene_pcie_match_table),
+ .of_match_table = xgene_pcie_match_table,
.suppress_bind_attrs = true,
},
.probe = xgene_pcie_probe,
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index e61058e13818..521acd632f1a 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -190,11 +191,6 @@
/* Forward declarations */
struct brcm_pcie;
-static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
enum {
RGR1_SW_INIT_1,
@@ -223,64 +219,9 @@ struct pcie_cfg_data {
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
};
-static const int pcie_offsets[] = {
- [RGR1_SW_INIT_1] = 0x9210,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
-};
-
-static const int pcie_offsets_bmips_7425[] = {
- [RGR1_SW_INIT_1] = 0x8010,
- [EXT_CFG_INDEX] = 0x8300,
- [EXT_CFG_DATA] = 0x8304,
-};
-
-static const struct pcie_cfg_data generic_cfg = {
- .offsets = pcie_offsets,
- .type = GENERIC,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm7425_cfg = {
- .offsets = pcie_offsets_bmips_7425,
- .type = BCM7425,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm7435_cfg = {
- .offsets = pcie_offsets,
- .type = BCM7435,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm4908_cfg = {
- .offsets = pcie_offsets,
- .type = BCM4908,
- .perst_set = brcm_pcie_perst_set_4908,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const int pcie_offset_bcm7278[] = {
- [RGR1_SW_INIT_1] = 0xc010,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
-};
-
-static const struct pcie_cfg_data bcm7278_cfg = {
- .offsets = pcie_offset_bcm7278,
- .type = BCM7278,
- .perst_set = brcm_pcie_perst_set_7278,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
-};
-
-static const struct pcie_cfg_data bcm2711_cfg = {
- .offsets = pcie_offsets,
- .type = BCM2711,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+struct subdev_regulators {
+ unsigned int num_supplies;
+ struct regulator_bulk_data supplies[];
};
struct brcm_msi {
@@ -320,6 +261,8 @@ struct brcm_pcie {
u32 hw_rev;
void (*perst_set)(struct brcm_pcie *pcie, u32 val);
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ struct subdev_regulators *sr;
+ bool ep_wakeup_capable;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
@@ -741,52 +684,48 @@ static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
return dla && plu;
}
-static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
struct brcm_pcie *pcie = bus->sysdata;
void __iomem *base = pcie->base;
int idx;
- /* Accesses to the RC go right to the RC registers if slot==0 */
+ /* Accesses to the RC go right to the RC registers if !devfn */
if (pci_is_root_bus(bus))
- return PCI_SLOT(devfn) ? NULL : base + where;
+ return devfn ? NULL : base + PCIE_ECAM_REG(where);
+
+ /* An access to our HW w/o link-up will cause a CPU Abort */
+ if (!brcm_pcie_link_up(pcie))
+ return NULL;
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
- return base + PCIE_EXT_CFG_DATA + where;
+ return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
}
-static void __iomem *brcm_pcie_map_conf32(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
struct brcm_pcie *pcie = bus->sysdata;
void __iomem *base = pcie->base;
int idx;
- /* Accesses to the RC go right to the RC registers if slot==0 */
+ /* Accesses to the RC go right to the RC registers if !devfn */
if (pci_is_root_bus(bus))
- return PCI_SLOT(devfn) ? NULL : base + (where & ~0x3);
+ return devfn ? NULL : base + PCIE_ECAM_REG(where);
+
+ /* An access to our HW w/o link-up will cause a CPU Abort */
+ if (!brcm_pcie_link_up(pcie))
+ return NULL;
/* For devices, write to the config space index register */
- idx = PCIE_ECAM_OFFSET(bus->number, devfn, (where & ~3));
+ idx = PCIE_ECAM_OFFSET(bus->number, devfn, where);
writel(idx, base + IDX_ADDR(pcie));
return base + DATA_ADDR(pcie);
}
-static struct pci_ops brcm_pcie_ops = {
- .map_bus = brcm_pcie_map_conf,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
-};
-
-static struct pci_ops brcm_pcie_ops32 = {
- .map_bus = brcm_pcie_map_conf32,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
-};
-
static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
@@ -926,17 +865,13 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u64 rc_bar2_offset, rc_bar2_size;
void __iomem *base = pcie->base;
- struct device *dev = pcie->dev;
+ struct pci_host_bridge *bridge;
struct resource_entry *entry;
- bool ssc_good = false;
- struct resource *res;
- int num_out_wins = 0;
- u16 nlw, cls, lnksta;
- int i, ret, memc;
u32 tmp, burst, aspm_support;
+ int num_out_wins = 0;
+ int ret, memc;
/* Reset the bridge */
pcie->bridge_sw_init_set(pcie, 1);
@@ -1012,6 +947,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
+
/* disable the PCIe->GISB memory window (RC_BAR1) */
tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
@@ -1022,31 +962,27 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- if (pcie->gen)
- brcm_pcie_set_gen(pcie, pcie->gen);
-
- /* Unassert the fundamental reset */
- pcie->perst_set(pcie, 0);
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
/*
- * Give the RC/EP time to wake up, before trying to configure RC.
- * Intermittently check status for link-up, up to a total of 100ms.
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
*/
- for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
- msleep(5);
-
- if (!brcm_pcie_link_up(pcie)) {
- dev_err(dev, "link down\n");
- return -ENODEV;
- }
-
- if (!brcm_pcie_rc_mode(pcie)) {
- dev_err(dev, "PCIe misconfigured; is in EP mode\n");
- return -EINVAL;
- }
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ u32p_replace_bits(&tmp, 0x060400,
+ PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ bridge = pci_host_bridge_from_priv(pcie);
resource_list_for_each_entry(entry, &bridge->windows) {
- res = entry->res;
+ struct resource *res = entry->res;
if (resource_type(res) != IORESOURCE_MEM)
continue;
@@ -1075,23 +1011,41 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
- /* Don't advertise L0s capability if 'aspm-no-l0s' */
- aspm_support = PCIE_LINK_STATE_L1;
- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
- aspm_support |= PCIE_LINK_STATE_L0S;
- tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- u32p_replace_bits(&tmp, aspm_support,
- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ /* PCIe->SCB endian mode for BAR */
+ tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
+ writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+
+ return 0;
+}
+
+static int brcm_pcie_start_link(struct brcm_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ void __iomem *base = pcie->base;
+ u16 nlw, cls, lnksta;
+ bool ssc_good = false;
+ u32 tmp;
+ int ret, i;
+
+ /* Unassert the fundamental reset */
+ pcie->perst_set(pcie, 0);
/*
- * For config space accesses on the RC, show the right class for
- * a PCIe-PCIe bridge (the default setting is to be EP mode).
+ * Give the RC/EP time to wake up, before trying to configure RC.
+ * Intermittently check status for link-up, up to a total of 100ms.
*/
- tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
- u32p_replace_bits(&tmp, 0x060400,
- PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
+ msleep(5);
+
+ if (!brcm_pcie_link_up(pcie)) {
+ dev_err(dev, "link down\n");
+ return -ENODEV;
+ }
+
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
@@ -1108,12 +1062,6 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
pci_speed_string(pcie_link_speed[cls]), nlw,
ssc_good ? "(SSC)" : "(!SSC)");
- /* PCIe->SCB endian mode for BAR */
- tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
- u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
- PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
- writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
-
/*
* Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
* is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
@@ -1125,6 +1073,82 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
return 0;
}
+static const char * const supplies[] = {
+ "vpcie3v3",
+ "vpcie3v3aux",
+ "vpcie12v",
+};
+
+static void *alloc_subdev_regulators(struct device *dev)
+{
+ const size_t size = sizeof(struct subdev_regulators) +
+ sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
+ struct subdev_regulators *sr;
+ int i;
+
+ sr = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (sr) {
+ sr->num_supplies = ARRAY_SIZE(supplies);
+ for (i = 0; i < ARRAY_SIZE(supplies); i++)
+ sr->supplies[i].supply = supplies[i];
+ }
+
+ return sr;
+}
+
+static int brcm_pcie_add_bus(struct pci_bus *bus)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ struct device *dev = &bus->dev;
+ struct subdev_regulators *sr;
+ int ret;
+
+ if (!bus->parent || !pci_is_root_bus(bus->parent))
+ return 0;
+
+ if (dev->of_node) {
+ sr = alloc_subdev_regulators(dev);
+ if (!sr) {
+ dev_info(dev, "Can't allocate regulators for downstream device\n");
+ goto no_regulators;
+ }
+
+ pcie->sr = sr;
+
+ ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_info(dev, "No regulators for downstream device\n");
+ goto no_regulators;
+ }
+
+ ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_err(dev, "Can't enable regulators for downstream device\n");
+ regulator_bulk_free(sr->num_supplies, sr->supplies);
+ pcie->sr = NULL;
+ }
+ }
+
+no_regulators:
+ brcm_pcie_start_link(pcie);
+ return 0;
+}
+
+static void brcm_pcie_remove_bus(struct pci_bus *bus)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ struct subdev_regulators *sr = pcie->sr;
+ struct device *dev = &bus->dev;
+
+ if (!sr)
+ return;
+
+ if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
+ dev_err(dev, "Failed to disable regulators for downstream device\n");
+ regulator_bulk_free(sr->num_supplies, sr->supplies);
+ pcie->sr = NULL;
+}
+
/* L23 is a low-power PCIe link state */
static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
{
@@ -1221,9 +1245,21 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
pcie->bridge_sw_init_set(pcie, 1);
}
-static int brcm_pcie_suspend(struct device *dev)
+static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
+{
+ bool *ret = data;
+
+ if (device_may_wakeup(&dev->dev)) {
+ *ret = true;
+ dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n");
+ }
+ return (int) *ret;
+}
+
+static int brcm_pcie_suspend_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
int ret;
brcm_pcie_turn_off(pcie);
@@ -1241,12 +1277,31 @@ static int brcm_pcie_suspend(struct device *dev)
return ret;
}
+ if (pcie->sr) {
+ /*
+ * Now turn off the regulators, but if at least one
+ * downstream device is enabled as a wake-up source, do not
+ * turn off regulators.
+ */
+ pcie->ep_wakeup_capable = false;
+ pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
+ &pcie->ep_wakeup_capable);
+ if (!pcie->ep_wakeup_capable) {
+ ret = regulator_bulk_disable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn off regulators\n");
+ reset_control_reset(pcie->rescal);
+ return ret;
+ }
+ }
+ }
clk_disable_unprepare(pcie->clk);
return 0;
}
-static int brcm_pcie_resume(struct device *dev)
+static int brcm_pcie_resume_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
void __iomem *base;
@@ -1281,11 +1336,37 @@ static int brcm_pcie_resume(struct device *dev)
if (ret)
goto err_reset;
+ if (pcie->sr) {
+ if (pcie->ep_wakeup_capable) {
+ /*
+ * We are resuming from a suspend. In the suspend we
+ * did not disable the power supplies, so there is
+ * no need to enable them (and falsely increase their
+ * usage count).
+ */
+ pcie->ep_wakeup_capable = false;
+ } else {
+ ret = regulator_bulk_enable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn on regulators\n");
+ goto err_reset;
+ }
+ }
+ }
+
+ ret = brcm_pcie_start_link(pcie);
+ if (ret)
+ goto err_regulator;
+
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
return 0;
+err_regulator:
+ if (pcie->sr)
+ regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
reset_control_rearm(pcie->rescal);
err_disable_clk:
@@ -1316,6 +1397,66 @@ static int brcm_pcie_remove(struct platform_device *pdev)
return 0;
}
+static const int pcie_offsets[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const int pcie_offsets_bmips_7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+};
+
+static const struct pcie_cfg_data generic_cfg = {
+ .offsets = pcie_offsets,
+ .type = GENERIC,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bmips_7425,
+ .type = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM7435,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm4908_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM4908,
+ .perst_set = brcm_pcie_perst_set_4908,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const int pcie_offset_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const struct pcie_cfg_data bcm7278_cfg = {
+ .offsets = pcie_offset_bcm7278,
+ .type = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+};
+
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM2711,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
@@ -1328,6 +1469,22 @@ static const struct of_device_id brcm_pcie_match[] = {
{},
};
+static struct pci_ops brcm_pcie_ops = {
+ .map_bus = brcm_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = brcm_pcie_remove_bus,
+};
+
+static struct pci_ops brcm7425_pcie_ops = {
+ .map_bus = brcm7425_pcie_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = brcm_pcie_remove_bus,
+};
+
static int brcm_pcie_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *msi_np;
@@ -1414,12 +1571,22 @@ static int brcm_pcie_probe(struct platform_device *pdev)
}
}
- bridge->ops = pcie->type == BCM7425 ? &brcm_pcie_ops32 : &brcm_pcie_ops;
+ bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
- return pci_host_probe(bridge);
+ ret = pci_host_probe(bridge);
+ if (!ret && !brcm_pcie_link_up(pcie))
+ ret = -ENODEV;
+
+ if (ret) {
+ brcm_pcie_remove(pdev);
+ return ret;
+ }
+
+ return 0;
+
fail:
__brcm_pcie_remove(pcie);
return ret;
@@ -1428,8 +1595,8 @@ fail:
MODULE_DEVICE_TABLE(of, brcm_pcie_match);
static const struct dev_pm_ops brcm_pcie_pm_ops = {
- .suspend = brcm_pcie_suspend,
- .resume = brcm_pcie_resume,
+ .suspend_noirq = brcm_pcie_suspend_noirq,
+ .resume_noirq = brcm_pcie_resume_noirq,
};
static struct platform_driver brcm_pcie_driver = {
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 757b7fbcdc59..fee036b07cd4 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -589,8 +589,8 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
msi->has_inten_reg = true;
msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
- msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
- sizeof(*msi->bitmap), GFP_KERNEL);
+ msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs,
+ GFP_KERNEL);
if (!msi->bitmap)
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 5d9fd36b02d1..11cdb9b6f109 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -153,6 +153,37 @@ struct mtk_gen3_pcie {
DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
};
+/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
+static const char *const ltssm_str[] = {
+ "detect.quiet", /* 0x00 */
+ "detect.active", /* 0x01 */
+ "polling.active", /* 0x02 */
+ "polling.compliance", /* 0x03 */
+ "polling.configuration", /* 0x04 */
+ "config.linkwidthstart", /* 0x05 */
+ "config.linkwidthaccept", /* 0x06 */
+ "config.lanenumwait", /* 0x07 */
+ "config.lanenumaccept", /* 0x08 */
+ "config.complete", /* 0x09 */
+ "config.idle", /* 0x0A */
+ "recovery.receiverlock", /* 0x0B */
+ "recovery.equalization", /* 0x0C */
+ "recovery.speed", /* 0x0D */
+ "recovery.receiverconfig", /* 0x0E */
+ "recovery.idle", /* 0x0F */
+ "L0", /* 0x10 */
+ "L0s", /* 0x11 */
+ "L1.entry", /* 0x12 */
+ "L1.idle", /* 0x13 */
+ "L2.idle", /* 0x14 */
+ "L2.transmitwake", /* 0x15 */
+ "disable", /* 0x16 */
+ "loopback.entry", /* 0x17 */
+ "loopback.active", /* 0x18 */
+ "loopback.exit", /* 0x19 */
+ "hotreset", /* 0x1A */
+};
+
/**
* mtk_pcie_config_tlp_header() - Configure a configuration TLP header
* @bus: PCI bus to query
@@ -327,8 +358,16 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
!!(val & PCIE_PORT_LINKUP), 20,
PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
if (err) {
+ const char *ltssm_state;
+ int ltssm_index;
+
val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
- dev_err(pcie->dev, "PCIe link down, ltssm reg val: %#x\n", val);
+ ltssm_index = PCIE_LTSSM_STATE(val);
+ ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
+ "Unknown state" : ltssm_str[ltssm_index];
+ dev_err(pcie->dev,
+ "PCIe link down, current LTSSM state: %s (%#x)\n",
+ ltssm_state, val);
return err;
}
@@ -600,7 +639,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
&intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put_node;
}
/* Setup MSI */
@@ -623,13 +663,15 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
goto err_msi_domain;
}
+ of_node_put(intc_node);
return 0;
err_msi_domain:
irq_domain_remove(pcie->msi_bottom_domain);
err_msi_bottom_domain:
irq_domain_remove(pcie->intx_domain);
-
+out_put_node:
+ of_node_put(intc_node);
return ret;
}
@@ -917,7 +959,7 @@ static int mtk_pcie_remove(struct platform_device *pdev)
return 0;
}
-static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
+static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
{
int i;
@@ -935,7 +977,7 @@ static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
raw_spin_unlock(&pcie->irq_lock);
}
-static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
+static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
{
int i;
@@ -953,7 +995,7 @@ static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
raw_spin_unlock(&pcie->irq_lock);
}
-static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
+static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
{
u32 val;
@@ -968,7 +1010,7 @@ static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
50 * USEC_PER_MSEC);
}
-static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+static int mtk_pcie_suspend_noirq(struct device *dev)
{
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
@@ -994,7 +1036,7 @@ static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
return 0;
}
-static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+static int mtk_pcie_resume_noirq(struct device *dev)
{
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
@@ -1015,8 +1057,8 @@ static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops mtk_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
- mtk_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
};
static const struct of_device_id mtk_pcie_of_match[] = {
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index be8bd919cb88..ae5ad05ddc1d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1150,7 +1150,7 @@ static int mtk_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+static int mtk_pcie_suspend_noirq(struct device *dev)
{
struct mtk_pcie *pcie = dev_get_drvdata(dev);
struct mtk_pcie_port *port;
@@ -1174,7 +1174,7 @@ static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
return 0;
}
-static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+static int mtk_pcie_resume_noirq(struct device *dev)
{
struct mtk_pcie *pcie = dev_get_drvdata(dev);
struct mtk_pcie_port *port, *tmp;
@@ -1195,8 +1195,8 @@ static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops mtk_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
- mtk_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
};
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index dd5dba419047..7263d175b5ad 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -904,6 +904,7 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port)
&event_domain_ops, port);
if (!port->event_domain) {
dev_err(dev, "failed to get event domain\n");
+ of_node_put(pcie_intc_node);
return -ENOMEM;
}
@@ -913,6 +914,7 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port)
&intx_domain_ops, port);
if (!port->intx_domain) {
dev_err(dev, "failed to get an INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
return -ENOMEM;
}
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 997c4df6a1e7..e4faf90feaf5 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -1072,7 +1072,7 @@ err_pm_put:
return err;
}
-static int __maybe_unused rcar_pcie_resume(struct device *dev)
+static int rcar_pcie_resume(struct device *dev)
{
struct rcar_pcie_host *host = dev_get_drvdata(dev);
struct rcar_pcie *pcie = &host->pcie;
@@ -1127,7 +1127,7 @@ static int rcar_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops rcar_pcie_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
+ SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
.resume_noirq = rcar_pcie_resume_noirq,
};
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 7f56f99b4116..7352b5ff8d35 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -864,7 +864,7 @@ static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
return 0;
}
-static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
+static int rockchip_pcie_suspend_noirq(struct device *dev)
{
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
int ret;
@@ -889,7 +889,7 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
return ret;
}
-static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
+static int rockchip_pcie_resume_noirq(struct device *dev)
{
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
int err;
@@ -1035,8 +1035,8 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops rockchip_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
- rockchip_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
+ rockchip_pcie_resume_noirq)
};
static const struct of_device_id rockchip_pcie_of_match[] = {
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index c7cd44ed4dfc..e4ab48041eb6 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -35,6 +35,10 @@
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+
/* Interrupt registers definitions */
#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
#define XILINX_CPM_PCIE_INTR_HOT_RESET 3
@@ -98,6 +102,19 @@
/* Phy Status/Control Register definitions */
#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
+enum xilinx_cpm_version {
+ CPM,
+ CPM5,
+};
+
+/**
+ * struct xilinx_cpm_variant - CPM variant information
+ * @version: CPM version
+ */
+struct xilinx_cpm_variant {
+ enum xilinx_cpm_version version;
+};
+
/**
* struct xilinx_cpm_pcie - PCIe port information
* @dev: Device pointer
@@ -109,6 +126,7 @@
* @intx_irq: legacy interrupt number
* @irq: Error interrupt number
* @lock: lock protecting shared register access
+ * @variant: CPM version check pointer
*/
struct xilinx_cpm_pcie {
struct device *dev;
@@ -120,6 +138,7 @@ struct xilinx_cpm_pcie {
int intx_irq;
int irq;
raw_spinlock_t lock;
+ const struct xilinx_cpm_variant *variant;
};
static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg)
@@ -285,6 +304,13 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
+ if (port->variant->version == CPM5) {
+ val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (val)
+ writel_relaxed(val, port->cpm_base +
+ XILINX_CPM_PCIE_IR_STATUS);
+ }
+
/*
* XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
* CPM SLCR block.
@@ -484,6 +510,12 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
*/
writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
+
+ if (port->variant->version == CPM5) {
+ writel(XILINX_CPM_PCIE_IR_LOCAL,
+ port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ }
+
/* Enable the Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
XILINX_CPM_PCIE_REG_RPSC_BEN,
@@ -518,7 +550,14 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
if (IS_ERR(port->cfg))
return PTR_ERR(port->cfg);
- port->reg_base = port->cfg->win;
+ if (port->variant->version == CPM5) {
+ port->reg_base = devm_platform_ioremap_resource_byname(pdev,
+ "cpm_csr");
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+ } else {
+ port->reg_base = port->cfg->win;
+ }
return 0;
}
@@ -559,6 +598,8 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
if (!bus)
return -ENODEV;
+ port->variant = of_device_get_match_data(dev);
+
err = xilinx_cpm_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
@@ -591,8 +632,23 @@ err_parse_dt:
return err;
}
+static const struct xilinx_cpm_variant cpm_host = {
+ .version = CPM,
+};
+
+static const struct xilinx_cpm_variant cpm5_host = {
+ .version = CPM5,
+};
+
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
- { .compatible = "xlnx,versal-cpm-host-1.00", },
+ {
+ .compatible = "xlnx,versal-cpm-host-1.00",
+ .data = &cpm_host,
+ },
+ {
+ .compatible = "xlnx,versal-cpm5-host",
+ .data = &cpm5_host,
+ },
{}
};
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 94a14a3d7e55..e06e9f4fc50f 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -898,7 +898,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (vmd->instance < 0)
return vmd->instance;
- vmd->name = kasprintf(GFP_KERNEL, "vmd%d", vmd->instance);
+ vmd->name = devm_kasprintf(&dev->dev, GFP_KERNEL, "vmd%d",
+ vmd->instance);
if (!vmd->name) {
err = -ENOMEM;
goto out_release_instance;
@@ -936,7 +937,6 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_release_instance:
ida_simple_remove(&vmd_instance_ida, vmd->instance);
- kfree(vmd->name);
return err;
}
@@ -959,7 +959,6 @@ static void vmd_remove(struct pci_dev *dev)
vmd_detach_resources(vmd);
vmd_remove_irq_domain(vmd);
ida_simple_remove(&vmd_instance_ida, vmd->instance);
- kfree(vmd->name);
}
#ifdef CONFIG_PM_SLEEP
@@ -1013,6 +1012,14 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7d0b),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xad0b),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
new file mode 100644
index 000000000000..e402f05068a5
--- /dev/null
+++ b/drivers/pci/doe.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Data Object Exchange
+ * PCIe r6.0, sec 6.30 DOE
+ *
+ * Copyright (C) 2021 Huawei
+ * Jonathan Cameron <Jonathan.Cameron@huawei.com>
+ *
+ * Copyright (C) 2022 Intel Corporation
+ * Ira Weiny <ira.weiny@intel.com>
+ */
+
+#define dev_fmt(fmt) "DOE: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci-doe.h>
+#include <linux/workqueue.h>
+
+#define PCI_DOE_PROTOCOL_DISCOVERY 0
+
+/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
+#define PCI_DOE_TIMEOUT HZ
+#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
+
+#define PCI_DOE_FLAG_CANCEL 0
+#define PCI_DOE_FLAG_DEAD 1
+
+/**
+ * struct pci_doe_mb - State for a single DOE mailbox
+ *
+ * This state is used to manage a single DOE mailbox capability. All fields
+ * should be considered opaque to the consumers and the structure passed into
+ * the helpers below after being created by devm_pci_doe_create()
+ *
+ * @pdev: PCI device this mailbox belongs to
+ * @cap_offset: Capability offset
+ * @prots: Array of protocols supported (encoded as long values)
+ * @wq: Wait queue for work item
+ * @work_queue: Queue of pci_doe_work items
+ * @flags: Bit array of PCI_DOE_FLAG_* flags
+ */
+struct pci_doe_mb {
+ struct pci_dev *pdev;
+ u16 cap_offset;
+ struct xarray prots;
+
+ wait_queue_head_t wq;
+ struct workqueue_struct *work_queue;
+ unsigned long flags;
+};
+
+static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
+{
+ if (wait_event_timeout(doe_mb->wq,
+ test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags),
+ timeout))
+ return -EIO;
+ return 0;
+}
+
+static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+
+ pci_write_config_dword(pdev, offset + PCI_DOE_CTRL, val);
+}
+
+static int pci_doe_abort(struct pci_doe_mb *doe_mb)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ unsigned long timeout_jiffies;
+
+ pci_dbg(pdev, "[%x] Issuing Abort\n", offset);
+
+ timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
+ pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT);
+
+ do {
+ int rc;
+ u32 val;
+
+ rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
+ if (rc)
+ return rc;
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+
+ /* Abort success! */
+ if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) &&
+ !FIELD_GET(PCI_DOE_STATUS_BUSY, val))
+ return 0;
+
+ } while (!time_after(jiffies, timeout_jiffies));
+
+ /* Abort has timed out and the MB is dead */
+ pci_err(pdev, "[%x] ABORT timed out\n", offset);
+ return -EIO;
+}
+
+static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
+ struct pci_doe_task *task)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ u32 val;
+ int i;
+
+ /*
+ * Check the DOE busy bit is not set. If it is set, this could indicate
+ * someone other than Linux (e.g. firmware) is using the mailbox. Note
+ * it is expected that firmware and OS will negotiate access rights via
+ * an, as yet to be defined, method.
+ */
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_BUSY, val))
+ return -EBUSY;
+
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ return -EIO;
+
+ /* Write DOE Header */
+ val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
+ /* Length is 2 DW of header + length of payload in DW */
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
+ 2 + task->request_pl_sz /
+ sizeof(u32)));
+ for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ task->request_pl[i]);
+
+ pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
+
+ return 0;
+}
+
+static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ u32 val;
+
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val))
+ return true;
+ return false;
+}
+
+static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ size_t length, payload_length;
+ u32 val;
+ int i;
+
+ /* Read the first dword to get the protocol */
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
+ (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
+ dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
+ doe_mb->cap_offset, task->prot.vid, task->prot.type,
+ FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
+ FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
+ return -EIO;
+ }
+
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ /* Read the second dword to get the length */
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+
+ length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val);
+ if (length > SZ_1M || length < 2)
+ return -EIO;
+
+ /* First 2 dwords have already been read */
+ length -= 2;
+ payload_length = min(length, task->response_pl_sz / sizeof(u32));
+ /* Read the rest of the response payload */
+ for (i = 0; i < payload_length; i++) {
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ,
+ &task->response_pl[i]);
+ /* Prior to the last ack, ensure Data Object Ready */
+ if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
+ return -EIO;
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ }
+
+ /* Flush excess length */
+ for (; i < length; i++) {
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ }
+
+ /* Final error check to pick up on any since Data Object Ready */
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ return -EIO;
+
+ return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
+}
+
+static void signal_task_complete(struct pci_doe_task *task, int rv)
+{
+ task->rv = rv;
+ task->complete(task);
+}
+
+static void signal_task_abort(struct pci_doe_task *task, int rv)
+{
+ struct pci_doe_mb *doe_mb = task->doe_mb;
+ struct pci_dev *pdev = doe_mb->pdev;
+
+ if (pci_doe_abort(doe_mb)) {
+ /*
+ * If the device can't process an abort; set the mailbox dead
+ * - no more submissions
+ */
+ pci_err(pdev, "[%x] Abort failed marking mailbox dead\n",
+ doe_mb->cap_offset);
+ set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
+ }
+ signal_task_complete(task, rv);
+}
+
+static void doe_statemachine_work(struct work_struct *work)
+{
+ struct pci_doe_task *task = container_of(work, struct pci_doe_task,
+ work);
+ struct pci_doe_mb *doe_mb = task->doe_mb;
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ unsigned long timeout_jiffies;
+ u32 val;
+ int rc;
+
+ if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) {
+ signal_task_complete(task, -EIO);
+ return;
+ }
+
+ /* Send request */
+ rc = pci_doe_send_req(doe_mb, task);
+ if (rc) {
+ /*
+ * The specification does not provide any guidance on how to
+ * resolve conflicting requests from other entities.
+ * Furthermore, it is likely that busy will not be detected
+ * most of the time. Flag any detection of status busy with an
+ * error.
+ */
+ if (rc == -EBUSY)
+ dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n",
+ offset);
+ signal_task_abort(task, rc);
+ return;
+ }
+
+ timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
+ /* Poll for response */
+retry_resp:
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) {
+ signal_task_abort(task, -EIO);
+ return;
+ }
+
+ if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) {
+ if (time_after(jiffies, timeout_jiffies)) {
+ signal_task_abort(task, -EIO);
+ return;
+ }
+ rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
+ if (rc) {
+ signal_task_abort(task, rc);
+ return;
+ }
+ goto retry_resp;
+ }
+
+ rc = pci_doe_recv_resp(doe_mb, task);
+ if (rc < 0) {
+ signal_task_abort(task, rc);
+ return;
+ }
+
+ signal_task_complete(task, rc);
+}
+
+static void pci_doe_task_complete(struct pci_doe_task *task)
+{
+ complete(task->private);
+}
+
+static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
+ u8 *protocol)
+{
+ u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
+ *index);
+ u32 response_pl;
+ DECLARE_COMPLETION_ONSTACK(c);
+ struct pci_doe_task task = {
+ .prot.vid = PCI_VENDOR_ID_PCI_SIG,
+ .prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
+ .request_pl = &request_pl,
+ .request_pl_sz = sizeof(request_pl),
+ .response_pl = &response_pl,
+ .response_pl_sz = sizeof(response_pl),
+ .complete = pci_doe_task_complete,
+ .private = &c,
+ };
+ int rc;
+
+ rc = pci_doe_submit_task(doe_mb, &task);
+ if (rc < 0)
+ return rc;
+
+ wait_for_completion(&c);
+
+ if (task.rv != sizeof(response_pl))
+ return -EIO;
+
+ *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
+ *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
+ response_pl);
+ *index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
+ response_pl);
+
+ return 0;
+}
+
+static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
+{
+ return xa_mk_value((vid << 8) | prot);
+}
+
+static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
+{
+ u8 index = 0;
+ u8 xa_idx = 0;
+
+ do {
+ int rc;
+ u16 vid;
+ u8 prot;
+
+ rc = pci_doe_discovery(doe_mb, &index, &vid, &prot);
+ if (rc)
+ return rc;
+
+ pci_dbg(doe_mb->pdev,
+ "[%x] Found protocol %d vid: %x prot: %x\n",
+ doe_mb->cap_offset, xa_idx, vid, prot);
+
+ rc = xa_insert(&doe_mb->prots, xa_idx++,
+ pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
+ if (rc)
+ return rc;
+ } while (index);
+
+ return 0;
+}
+
+static void pci_doe_xa_destroy(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ xa_destroy(&doe_mb->prots);
+}
+
+static void pci_doe_destroy_workqueue(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ destroy_workqueue(doe_mb->work_queue);
+}
+
+static void pci_doe_flush_mb(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ /* Stop all pending work items from starting */
+ set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
+
+ /* Cancel an in progress work item, if necessary */
+ set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags);
+ wake_up(&doe_mb->wq);
+
+ /* Flush all work items */
+ flush_workqueue(doe_mb->work_queue);
+}
+
+/**
+ * pcim_doe_create_mb() - Create a DOE mailbox object
+ *
+ * @pdev: PCI device to create the DOE mailbox for
+ * @cap_offset: Offset of the DOE mailbox
+ *
+ * Create a single mailbox object to manage the mailbox protocol at the
+ * cap_offset specified.
+ *
+ * RETURNS: created mailbox object on success
+ * ERR_PTR(-errno) on failure
+ */
+struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset)
+{
+ struct pci_doe_mb *doe_mb;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ doe_mb = devm_kzalloc(dev, sizeof(*doe_mb), GFP_KERNEL);
+ if (!doe_mb)
+ return ERR_PTR(-ENOMEM);
+
+ doe_mb->pdev = pdev;
+ doe_mb->cap_offset = cap_offset;
+ init_waitqueue_head(&doe_mb->wq);
+
+ xa_init(&doe_mb->prots);
+ rc = devm_add_action(dev, pci_doe_xa_destroy, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
+ dev_driver_string(&pdev->dev),
+ pci_name(pdev),
+ doe_mb->cap_offset);
+ if (!doe_mb->work_queue) {
+ pci_err(pdev, "[%x] failed to allocate work queue\n",
+ doe_mb->cap_offset);
+ return ERR_PTR(-ENOMEM);
+ }
+ rc = devm_add_action_or_reset(dev, pci_doe_destroy_workqueue, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* Reset the mailbox by issuing an abort */
+ rc = pci_doe_abort(doe_mb);
+ if (rc) {
+ pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n",
+ doe_mb->cap_offset, rc);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * The state machine and the mailbox should be in sync now;
+ * Set up mailbox flush prior to using the mailbox to query protocols.
+ */
+ rc = devm_add_action_or_reset(dev, pci_doe_flush_mb, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = pci_doe_cache_protocols(doe_mb);
+ if (rc) {
+ pci_err(pdev, "[%x] failed to cache protocols : %d\n",
+ doe_mb->cap_offset, rc);
+ return ERR_PTR(rc);
+ }
+
+ return doe_mb;
+}
+EXPORT_SYMBOL_GPL(pcim_doe_create_mb);
+
+/**
+ * pci_doe_supports_prot() - Return if the DOE instance supports the given
+ * protocol
+ * @doe_mb: DOE mailbox capability to query
+ * @vid: Protocol Vendor ID
+ * @type: Protocol type
+ *
+ * RETURNS: True if the DOE mailbox supports the protocol specified
+ */
+bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
+{
+ unsigned long index;
+ void *entry;
+
+ /* The discovery protocol must always be supported */
+ if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
+ return true;
+
+ xa_for_each(&doe_mb->prots, index, entry)
+ if (entry == pci_doe_xa_prot_entry(vid, type))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
+
+/**
+ * pci_doe_submit_task() - Submit a task to be processed by the state machine
+ *
+ * @doe_mb: DOE mailbox capability to submit to
+ * @task: task to be queued
+ *
+ * Submit a DOE task (request/response) to the DOE mailbox to be processed.
+ * Returns upon queueing the task object. If the queue is full this function
+ * will sleep until there is room in the queue.
+ *
+ * task->complete will be called when the state machine is done processing this
+ * task.
+ *
+ * Excess data will be discarded.
+ *
+ * RETURNS: 0 when task has been successfully queued, -ERRNO on error
+ */
+int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
+{
+ if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
+ return -EINVAL;
+
+ /*
+ * DOE requests must be a whole number of DW and the response needs to
+ * be big enough for at least 1 DW
+ */
+ if (task->request_pl_sz % sizeof(u32) ||
+ task->response_pl_sz < sizeof(u32))
+ return -EINVAL;
+
+ if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
+ return -EIO;
+
+ task->doe_mb = doe_mb;
+ INIT_WORK(&task->work, doe_statemachine_work);
+ queue_work(doe_mb->work_queue, &task->work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_doe_submit_task);
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 5f1242ca2f4e..295a033ee9a2 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -25,3 +25,15 @@ config PCI_EPF_NTB
device tree.
If in doubt, say "N" to disable Endpoint NTB driver.
+
+config PCI_EPF_VNTB
+ tristate "PCI Endpoint NTB driver"
+ depends on PCI_ENDPOINT
+ depends on NTB
+ select CONFIGFS_FS
+ help
+ Select this configuration option to enable the Non-Transparent
+ Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB
+ between PCI Root Port and PCIe Endpoint.
+
+ If in doubt, say "N" to disable Endpoint NTB driver.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index 96ab932a537a..5c13001deaba 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
+obj-$(CONFIG_PCI_EPF_VNTB) += pci-epf-vntb.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 5b833f00e980..36b1801a061b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -52,9 +52,11 @@ struct pci_epf_test {
enum pci_barno test_reg_bar;
size_t msix_table_offset;
struct delayed_work cmd_handler;
- struct dma_chan *dma_chan;
+ struct dma_chan *dma_chan_tx;
+ struct dma_chan *dma_chan_rx;
struct completion transfer_complete;
bool dma_supported;
+ bool dma_private;
const struct pci_epc_features *epc_features;
};
@@ -96,6 +98,8 @@ static void pci_epf_test_dma_callback(void *param)
* @dma_src: The source address of the data transfer. It can be a physical
* address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
* @len: The size of the data transfer
+ * @dma_remote: remote RC physical address
+ * @dir: DMA transfer direction
*
* Function that uses dmaengine API to transfer data between PCIe EP and remote
* PCIe RC. The source and destination address can be a physical address given
@@ -105,12 +109,16 @@ static void pci_epf_test_dma_callback(void *param)
*/
static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
dma_addr_t dma_dst, dma_addr_t dma_src,
- size_t len)
+ size_t len, dma_addr_t dma_remote,
+ enum dma_transfer_direction dir)
{
+ struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ?
+ epf_test->dma_chan_tx : epf_test->dma_chan_rx;
+ dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
- struct dma_chan *chan = epf_test->dma_chan;
struct pci_epf *epf = epf_test->epf;
struct dma_async_tx_descriptor *tx;
+ struct dma_slave_config sconf = {};
struct device *dev = &epf->dev;
dma_cookie_t cookie;
int ret;
@@ -120,7 +128,24 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
return -EINVAL;
}
- tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
+ if (epf_test->dma_private) {
+ sconf.direction = dir;
+ if (dir == DMA_MEM_TO_DEV)
+ sconf.dst_addr = dma_remote;
+ else
+ sconf.src_addr = dma_remote;
+
+ if (dmaengine_slave_config(chan, &sconf)) {
+ dev_err(dev, "DMA slave config fail\n");
+ return -EIO;
+ }
+ tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
+ flags);
+ } else {
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
+ flags);
+ }
+
if (!tx) {
dev_err(dev, "Failed to prepare DMA memcpy\n");
return -EIO;
@@ -148,6 +173,23 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
return 0;
}
+struct epf_dma_filter {
+ struct device *dev;
+ u32 dma_mask;
+};
+
+static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
+{
+ struct epf_dma_filter *filter = node;
+ struct dma_slave_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+ dma_get_slave_caps(chan, &caps);
+
+ return chan->device->dev == filter->dev
+ && (filter->dma_mask & caps.directions);
+}
+
/**
* pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
* @epf_test: the EPF test device that performs data transfer operation
@@ -158,10 +200,44 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
{
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
+ struct epf_dma_filter filter;
struct dma_chan *dma_chan;
dma_cap_mask_t mask;
int ret;
+ filter.dev = epf->epc->dev.parent;
+ filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
+ if (!dma_chan) {
+ dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
+ goto fail_back_tx;
+ }
+
+ epf_test->dma_chan_rx = dma_chan;
+
+ filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+ dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
+
+ if (!dma_chan) {
+ dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
+ goto fail_back_rx;
+ }
+
+ epf_test->dma_chan_tx = dma_chan;
+ epf_test->dma_private = true;
+
+ init_completion(&epf_test->transfer_complete);
+
+ return 0;
+
+fail_back_rx:
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_tx = NULL;
+
+fail_back_tx:
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
@@ -174,7 +250,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
}
init_completion(&epf_test->transfer_complete);
- epf_test->dma_chan = dma_chan;
+ epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
return 0;
}
@@ -190,8 +266,17 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
if (!epf_test->dma_supported)
return;
- dma_release_channel(epf_test->dma_chan);
- epf_test->dma_chan = NULL;
+ dma_release_channel(epf_test->dma_chan_tx);
+ if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
+ return;
+ }
+
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_rx = NULL;
+
+ return;
}
static void pci_epf_test_print_rate(const char *ops, u64 size,
@@ -280,8 +365,15 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_map_addr;
}
+ if (epf_test->dma_private) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_map_addr;
+ }
+
ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- src_phys_addr, reg->size);
+ src_phys_addr, reg->size, 0,
+ DMA_MEM_TO_MEM);
if (ret)
dev_err(dev, "Data transfer failed\n");
} else {
@@ -373,7 +465,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
ktime_get_ts64(&start);
ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- phys_addr, reg->size);
+ phys_addr, reg->size,
+ reg->src_addr, DMA_DEV_TO_MEM);
if (ret)
dev_err(dev, "Data transfer failed\n");
ktime_get_ts64(&end);
@@ -463,8 +556,11 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
}
ktime_get_ts64(&start);
+
ret = pci_epf_test_data_transfer(epf_test, phys_addr,
- src_phys_addr, reg->size);
+ src_phys_addr, reg->size,
+ reg->dst_addr,
+ DMA_MEM_TO_DEV);
if (ret)
dev_err(dev, "Data transfer failed\n");
ktime_get_ts64(&end);
@@ -627,7 +723,6 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
cancel_delayed_work(&epf_test->cmd_handler);
pci_epf_test_clean_dma_chan(epf_test);
- pci_epc_stop(epc);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
new file mode 100644
index 000000000000..0ea85e1d292e
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -0,0 +1,1442 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Endpoint Function Driver to implement Non-Transparent Bridge functionality
+ * Between PCI RC and EP
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Copyright (C) 2022 NXP
+ *
+ * Based on pci-epf-ntb.c
+ * Author: Frank Li <Frank.Li@nxp.com>
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+/**
+ * +------------+ +---------------------------------------+
+ * | | | |
+ * +------------+ | +--------------+
+ * | NTB | | | NTB |
+ * | NetDev | | | NetDev |
+ * +------------+ | +--------------+
+ * | NTB | | | NTB |
+ * | Transfer | | | Transfer |
+ * +------------+ | +--------------+
+ * | | | | |
+ * | PCI NTB | | | |
+ * | EPF | | | |
+ * | Driver | | | PCI Virtual |
+ * | | +---------------+ | NTB Driver |
+ * | | | PCI EP NTB |<------>| |
+ * | | | FN Driver | | |
+ * +------------+ +---------------+ +--------------+
+ * | | | | | |
+ * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI |
+ * | | PCI | | | Bus |
+ * +------------+ +---------------+--------+--------------+
+ * PCIe Root Port PCI EP
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/ntb.h>
+
+static struct workqueue_struct *kpcintb_workqueue;
+
+#define COMMAND_CONFIGURE_DOORBELL 1
+#define COMMAND_TEARDOWN_DOORBELL 2
+#define COMMAND_CONFIGURE_MW 3
+#define COMMAND_TEARDOWN_MW 4
+#define COMMAND_LINK_UP 5
+#define COMMAND_LINK_DOWN 6
+
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define LINK_STATUS_UP BIT(0)
+
+#define SPAD_COUNT 64
+#define DB_COUNT 4
+#define NTB_MW_OFFSET 2
+#define DB_COUNT_MASK GENMASK(15, 0)
+#define MSIX_ENABLE BIT(16)
+#define MAX_DB_COUNT 32
+#define MAX_MW 4
+
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_DB,
+ BAR_MW0,
+ BAR_MW1,
+ BAR_MW2,
+};
+
+/*
+ * +--------------------------------------------------+ Base
+ * | |
+ * | |
+ * | |
+ * | Common Control Register |
+ * | |
+ * | |
+ * | |
+ * +-----------------------+--------------------------+ Base+span_offset
+ * | | |
+ * | Peer Span Space | Span Space |
+ * | | |
+ * | | |
+ * +-----------------------+--------------------------+ Base+span_offset
+ * | | | +span_count * 4
+ * | | |
+ * | Span Space | Peer Span Space |
+ * | | |
+ * +-----------------------+--------------------------+
+ * Virtual PCI PCIe Endpoint
+ * NTB Driver NTB Driver
+ */
+struct epf_ntb_ctrl {
+ u32 command;
+ u32 argument;
+ u16 command_status;
+ u16 link_status;
+ u32 topology;
+ u64 addr;
+ u64 size;
+ u32 num_mws;
+ u32 reserved;
+ u32 spad_offset;
+ u32 spad_count;
+ u32 db_entry_size;
+ u32 db_data[MAX_DB_COUNT];
+ u32 db_offset[MAX_DB_COUNT];
+} __packed;
+
+struct epf_ntb {
+ struct ntb_dev ntb;
+ struct pci_epf *epf;
+ struct config_group group;
+
+ u32 num_mws;
+ u32 db_count;
+ u32 spad_count;
+ u64 mws_size[MAX_MW];
+ u64 db;
+ u32 vbus_number;
+ u16 vntb_pid;
+ u16 vntb_vid;
+
+ bool linkup;
+ u32 spad_size;
+
+ enum pci_barno epf_ntb_bar[6];
+
+ struct epf_ntb_ctrl *reg;
+
+ phys_addr_t epf_db_phy;
+ void __iomem *epf_db;
+
+ phys_addr_t vpci_mw_phy[MAX_MW];
+ void __iomem *vpci_mw_addr[MAX_MW];
+
+ struct delayed_work cmd_handler;
+};
+
+#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
+#define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb)
+
+static struct pci_epf_header epf_ntb_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_BASE_CLASS_MEMORY,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+/**
+ * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @link_up: true or false indicating Link is UP or Down
+ *
+ * Once NTB function in HOST invoke ntb_link_enable(),
+ * this NTB function driver will trigger a link event to vhost.
+ */
+static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+{
+ if (link_up)
+ ntb->reg->link_status |= LINK_STATUS_UP;
+ else
+ ntb->reg->link_status &= ~LINK_STATUS_UP;
+
+ ntb_link_event(&ntb->ntb);
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_mw() - Configure the Outbound Address Space for vhost
+ * to access the memory window of host
+ * @ntb: NTB device that facilitates communication between host and vhost
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * EP Outbound Window
+ * +--------+ +-----------+
+ * | | | |
+ * | | | |
+ * | | | |
+ * | | | |
+ * | | +-----------+
+ * | Virtual| | Memory Win|
+ * | NTB | -----------> | |
+ * | Driver | | |
+ * | | +-----------+
+ * | | | |
+ * | | | |
+ * +--------+ +-----------+
+ * VHost PCI EP
+ */
+static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
+{
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ u64 addr, size;
+ int ret = 0;
+
+ phys_addr = ntb->vpci_mw_phy[mw];
+ addr = ntb->reg->addr;
+ size = ntb->reg->size;
+
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size);
+ if (ret)
+ dev_err(&ntb->epf->epc->dev,
+ "Failed to map memory window %d address\n", mw);
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_mw() - Teardown the configured OB ATU
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
+ * pci_epc_unmap_addr()
+ */
+static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
+{
+ pci_epc_unmap_addr(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ ntb->vpci_mw_phy[mw]);
+}
+
+/**
+ * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host
+ * @work: work_struct for the epf_ntb_epc
+ *
+ * Workqueue function that gets invoked for the two epf_ntb_epc
+ * periodically (once every 5ms) to see if it has received any commands
+ * from NTB host. The host can send commands to configure doorbell or
+ * configure memory window or to update link status.
+ */
+static void epf_ntb_cmd_handler(struct work_struct *work)
+{
+ struct epf_ntb_ctrl *ctrl;
+ u32 command, argument;
+ struct epf_ntb *ntb;
+ struct device *dev;
+ int ret;
+ int i;
+
+ ntb = container_of(work, struct epf_ntb, cmd_handler.work);
+
+ for (i = 1; i < ntb->db_count; i++) {
+ if (readl(ntb->epf_db + i * 4)) {
+ if (readl(ntb->epf_db + i * 4))
+ ntb->db |= 1 << (i - 1);
+
+ ntb_db_event(&ntb->ntb, i);
+ writel(0, ntb->epf_db + i * 4);
+ }
+ }
+
+ ctrl = ntb->reg;
+ command = ctrl->command;
+ if (!command)
+ goto reset_handler;
+ argument = ctrl->argument;
+
+ ctrl->command = 0;
+ ctrl->argument = 0;
+
+ ctrl = ntb->reg;
+ dev = &ntb->epf->dev;
+
+ switch (command) {
+ case COMMAND_CONFIGURE_DOORBELL:
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_DOORBELL:
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_CONFIGURE_MW:
+ ret = epf_ntb_configure_mw(ntb, argument);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_MW:
+ epf_ntb_teardown_mw(ntb, argument);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_UP:
+ ntb->linkup = true;
+ ret = epf_ntb_link_up(ntb, true);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ goto reset_handler;
+ case COMMAND_LINK_DOWN:
+ ntb->linkup = false;
+ ret = epf_ntb_link_up(ntb, false);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ default:
+ dev_err(dev, "UNKNOWN command: %d\n", command);
+ break;
+ }
+
+reset_handler:
+ queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
+ msecs_to_jiffies(5));
+}
+
+/**
+ * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
+ *
+ * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region (removes inbound ATU configuration). While BAR0 is
+ * the default self scratchpad BAR, an NTB could have other BARs for self
+ * scratchpad (because of reserved BARs). This function can get the exact BAR
+ * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb->epf->bar[barno];
+
+ pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+}
+
+/**
+ * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region.
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR.
+ */
+static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb->epf->bar[barno];
+
+ ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
+ * config + scratchpad region
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+{
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ pci_epf_free_space(ntb->epf, ntb->reg, barno, 0);
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
+ * region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Allocate the Local Memory mentioned in the above diagram. The size of
+ * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
+ * is obtained from "spad-count" configfs entry.
+ */
+static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
+{
+ size_t align;
+ enum pci_barno barno;
+ struct epf_ntb_ctrl *ctrl;
+ u32 spad_size, ctrl_size;
+ u64 size;
+ struct pci_epf *epf = ntb->epf;
+ struct device *dev = &epf->dev;
+ u32 spad_count;
+ void *base;
+ int i;
+ const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc,
+ epf->func_no,
+ epf->vfunc_no);
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ size = epc_features->bar_fixed_size[barno];
+ align = epc_features->align;
+
+ if ((!IS_ALIGNED(size, align)))
+ return -EINVAL;
+
+ spad_count = ntb->spad_count;
+
+ ctrl_size = sizeof(struct epf_ntb_ctrl);
+ spad_size = 2 * spad_count * 4;
+
+ if (!align) {
+ ctrl_size = roundup_pow_of_two(ctrl_size);
+ spad_size = roundup_pow_of_two(spad_size);
+ } else {
+ ctrl_size = ALIGN(ctrl_size, align);
+ spad_size = ALIGN(spad_size, align);
+ }
+
+ if (!size)
+ size = ctrl_size + spad_size;
+ else if (size < ctrl_size + spad_size)
+ return -EINVAL;
+
+ base = pci_epf_alloc_space(epf, size, barno, align, 0);
+ if (!base) {
+ dev_err(dev, "Config/Status/SPAD alloc region fail\n");
+ return -ENOMEM;
+ }
+
+ ntb->reg = base;
+
+ ctrl = ntb->reg;
+ ctrl->spad_offset = ctrl_size;
+
+ ctrl->spad_count = spad_count;
+ ctrl->num_mws = ntb->num_mws;
+ ntb->spad_size = spad_size;
+
+ ctrl->db_entry_size = 4;
+
+ for (i = 0; i < ntb->db_count; i++) {
+ ntb->reg->db_data[i] = 1 + i;
+ ntb->reg->db_offset[i] = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Configure MSI/MSI-X capability for each interface with number of
+ * interrupts equal to "db_count" configfs entry.
+ */
+static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ struct device *dev;
+ u32 db_count;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
+
+ if (!(epc_features->msix_capable || epc_features->msi_capable)) {
+ dev_err(dev, "MSI or MSI-X is required for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_count = ntb->db_count;
+ if (db_count > MAX_DB_COUNT) {
+ dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
+ return -EINVAL;
+ }
+
+ ntb->db_count = db_count;
+
+ if (epc_features->msi_capable) {
+ ret = pci_epc_set_msi(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ 16);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_db_bar_init() - Configure Doorbell window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ u32 align;
+ struct device *dev = &ntb->epf->dev;
+ int ret;
+ struct pci_epf_bar *epf_bar;
+ void __iomem *mw_addr;
+ enum pci_barno barno;
+ size_t size = 4 * ntb->db_count;
+
+ epc_features = pci_epc_get_features(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no);
+ align = epc_features->align;
+
+ if (size < 128)
+ size = 128;
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
+
+ barno = ntb->epf_ntb_bar[BAR_DB];
+
+ mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
+ if (!mw_addr) {
+ dev_err(dev, "Failed to allocate OB address\n");
+ return -ENOMEM;
+ }
+
+ ntb->epf_db = mw_addr;
+
+ epf_bar = &ntb->epf->bar[barno];
+
+ ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Doorbell BAR set failed\n");
+ goto err_alloc_peer_mem;
+ }
+ return ret;
+
+err_alloc_peer_mem:
+ pci_epc_mem_free_addr(ntb->epf->epc, epf_bar->phys_addr, mw_addr, epf_bar->size);
+ return -1;
+}
+
+static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
+
+/**
+ * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
+ * allocated in peer's outbound address space
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
+{
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_DB];
+ pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+}
+
+/**
+ * epf_ntb_mw_bar_init() - Configure Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ */
+static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
+{
+ int ret = 0;
+ int i;
+ u64 size;
+ enum pci_barno barno;
+ struct device *dev = &ntb->epf->dev;
+
+ for (i = 0; i < ntb->num_mws; i++) {
+ size = ntb->mws_size[i];
+ barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+
+ ntb->epf->bar[barno].barno = barno;
+ ntb->epf->bar[barno].size = size;
+ ntb->epf->bar[barno].addr = NULL;
+ ntb->epf->bar[barno].phys_addr = 0;
+ ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
+ PCI_BASE_ADDRESS_MEM_TYPE_64 :
+ PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ ret = pci_epc_set_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+ if (ret) {
+ dev_err(dev, "MW set failed\n");
+ goto err_alloc_mem;
+ }
+
+ /* Allocate EPC outbound memory windows to vpci vntb device */
+ ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc,
+ &ntb->vpci_mw_phy[i],
+ size);
+ if (!ntb->vpci_mw_addr[i]) {
+ ret = -ENOMEM;
+ dev_err(dev, "Failed to allocate source address\n");
+ goto err_set_bar;
+ }
+ }
+
+ return ret;
+
+err_set_bar:
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+err_alloc_mem:
+ epf_ntb_mw_bar_clear(ntb, i);
+ return ret;
+}
+
+/**
+ * epf_ntb_mw_bar_clear() - Clear Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
+{
+ enum pci_barno barno;
+ int i;
+
+ for (i = 0; i < num_mws; i++) {
+ barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+
+ pci_epc_mem_free_addr(ntb->epf->epc,
+ ntb->vpci_mw_phy[i],
+ ntb->vpci_mw_addr[i],
+ ntb->mws_size[i]);
+ }
+}
+
+/**
+ * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
+ */
+static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+{
+ pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
+ pci_epc_put(ntb->epf->epc);
+}
+
+/**
+ * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
+ * constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ enum pci_barno barno;
+ enum epf_ntb_bar bar;
+ struct device *dev;
+ u32 num_mws;
+ int i;
+
+ barno = BAR_0;
+ num_mws = ntb->num_mws;
+ dev = &ntb->epf->dev;
+ epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
+
+ /* These are required BARs which are mandatory for NTB functionality */
+ for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ dev_err(dev, "Fail to get NTB function BAR\n");
+ return barno;
+ }
+ ntb->epf_ntb_bar[bar] = barno;
+ }
+
+ /* These are optional BARs which don't impact NTB functionality */
+ for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ ntb->num_mws = i;
+ dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
+ }
+ ntb->epf_ntb_bar[bar] = barno;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_init() - Initialize NTB interface
+ * @ntb: NTB device that facilitates communication between HOST and vHOST2
+ *
+ * Wrapper to initialize a particular EPC interface and start the workqueue
+ * to check for commands from host. This function will write to the
+ * EP controller HW for configuring it.
+ */
+static int epf_ntb_epc_init(struct epf_ntb *ntb)
+{
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+ epc = epf->epc;
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = epf_ntb_config_sspad_bar_set(ntb);
+ if (ret) {
+ dev_err(dev, "Config/self SPAD BAR init failed");
+ return ret;
+ }
+
+ ret = epf_ntb_configure_interrupt(ntb);
+ if (ret) {
+ dev_err(dev, "Interrupt configuration failed\n");
+ goto err_config_interrupt;
+ }
+
+ ret = epf_ntb_db_bar_init(ntb);
+ if (ret) {
+ dev_err(dev, "DB BAR init failed\n");
+ goto err_db_bar_init;
+ }
+
+ ret = epf_ntb_mw_bar_init(ntb);
+ if (ret) {
+ dev_err(dev, "MW BAR init failed\n");
+ goto err_mw_bar_init;
+ }
+
+ if (vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ goto err_write_header;
+ }
+ }
+
+ INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler);
+ queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
+
+ return 0;
+
+err_write_header:
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+err_mw_bar_init:
+ epf_ntb_db_bar_clear(ntb);
+err_db_bar_init:
+err_config_interrupt:
+ epf_ntb_config_sspad_bar_clear(ntb);
+
+ return ret;
+}
+
+
+/**
+ * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to cleanup all NTB interfaces.
+ */
+static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+{
+ epf_ntb_db_bar_clear(ntb);
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+}
+
+#define EPF_NTB_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->_name); \
+}
+
+#define EPF_NTB_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ u32 val; \
+ int ret; \
+ \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ ntb->_name = val; \
+ \
+ return len; \
+}
+
+#define EPF_NTB_MW_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (win_no <= 0 || win_no > ntb->num_mws) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+}
+
+#define EPF_NTB_MW_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ u64 val; \
+ int ret; \
+ \
+ ret = kstrtou64(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (win_no <= 0 || win_no > ntb->num_mws) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ ntb->mws_size[win_no - 1] = val; \
+ \
+ return len; \
+}
+
+static ssize_t epf_ntb_num_mws_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct epf_ntb *ntb = to_epf_ntb(group);
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > MAX_MW)
+ return -EINVAL;
+
+ ntb->num_mws = val;
+
+ return len;
+}
+
+EPF_NTB_R(spad_count)
+EPF_NTB_W(spad_count)
+EPF_NTB_R(db_count)
+EPF_NTB_W(db_count)
+EPF_NTB_R(num_mws)
+EPF_NTB_R(vbus_number)
+EPF_NTB_W(vbus_number)
+EPF_NTB_R(vntb_pid)
+EPF_NTB_W(vntb_pid)
+EPF_NTB_R(vntb_vid)
+EPF_NTB_W(vntb_vid)
+EPF_NTB_MW_R(mw1)
+EPF_NTB_MW_W(mw1)
+EPF_NTB_MW_R(mw2)
+EPF_NTB_MW_W(mw2)
+EPF_NTB_MW_R(mw3)
+EPF_NTB_MW_W(mw3)
+EPF_NTB_MW_R(mw4)
+EPF_NTB_MW_W(mw4)
+
+CONFIGFS_ATTR(epf_ntb_, spad_count);
+CONFIGFS_ATTR(epf_ntb_, db_count);
+CONFIGFS_ATTR(epf_ntb_, num_mws);
+CONFIGFS_ATTR(epf_ntb_, mw1);
+CONFIGFS_ATTR(epf_ntb_, mw2);
+CONFIGFS_ATTR(epf_ntb_, mw3);
+CONFIGFS_ATTR(epf_ntb_, mw4);
+CONFIGFS_ATTR(epf_ntb_, vbus_number);
+CONFIGFS_ATTR(epf_ntb_, vntb_pid);
+CONFIGFS_ATTR(epf_ntb_, vntb_vid);
+
+static struct configfs_attribute *epf_ntb_attrs[] = {
+ &epf_ntb_attr_spad_count,
+ &epf_ntb_attr_db_count,
+ &epf_ntb_attr_num_mws,
+ &epf_ntb_attr_mw1,
+ &epf_ntb_attr_mw2,
+ &epf_ntb_attr_mw3,
+ &epf_ntb_attr_mw4,
+ &epf_ntb_attr_vbus_number,
+ &epf_ntb_attr_vntb_pid,
+ &epf_ntb_attr_vntb_vid,
+ NULL,
+};
+
+static const struct config_item_type ntb_group_type = {
+ .ct_attrs = epf_ntb_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/**
+ * epf_ntb_add_cfs() - Add configfs directory specific to NTB
+ * @epf: NTB endpoint function device
+ * @group: A pointer to the config_group structure referencing a group of
+ * config_items of a specific type that belong to a specific sub-system.
+ *
+ * Add configfs directory specific to NTB. This directory will hold
+ * NTB specific properties like db_count, spad_count, num_mws etc.,
+ */
+static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct config_group *ntb_group = &ntb->group;
+ struct device *dev = &epf->dev;
+
+ config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
+
+ return ntb_group;
+}
+
+/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
+
+static u32 pci_space[] = {
+ 0xffffffff, /*DeviceID, Vendor ID*/
+ 0, /*Status, Command*/
+ 0xffffffff, /*Class code, subclass, prog if, revision id*/
+ 0x40, /*bist, header type, latency Timer, cache line size*/
+ 0, /*BAR 0*/
+ 0, /*BAR 1*/
+ 0, /*BAR 2*/
+ 0, /*BAR 3*/
+ 0, /*BAR 4*/
+ 0, /*BAR 5*/
+ 0, /*Cardbus cis point*/
+ 0, /*Subsystem ID Subystem vendor id*/
+ 0, /*ROM Base Address*/
+ 0, /*Reserved, Cap. Point*/
+ 0, /*Reserved,*/
+ 0, /*Max Lat, Min Gnt, interrupt pin, interrupt line*/
+};
+
+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
+{
+ if (devfn == 0) {
+ memcpy(val, ((u8 *)pci_space) + where, size);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
+{
+ return 0;
+}
+
+static struct pci_ops vpci_ops = {
+ .read = pci_read,
+ .write = pci_write,
+};
+
+static int vpci_scan_bus(void *sysdata)
+{
+ struct pci_bus *vpci_bus;
+ struct epf_ntb *ndev = sysdata;
+
+ vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
+ if (vpci_bus)
+ pr_err("create pci bus\n");
+
+ pci_bus_add_devices(vpci_bus);
+
+ return 0;
+}
+
+/*==================== Virtual PCIe NTB driver ==========================*/
+
+static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ struct epf_ntb *ndev = ntb_ndev(ntb);
+
+ return ndev->num_mws;
+}
+
+static int vntb_epf_spad_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->spad_count;
+}
+
+static int vntb_epf_peer_mw_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->num_mws;
+}
+
+static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb)
+{
+ return BIT_ULL(ntb_ndev(ntb)->db_count) - 1;
+}
+
+static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return 0;
+}
+
+static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ int ret;
+ struct device *dev;
+
+ dev = &ntb->ntb.dev;
+ barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
+ epf_bar = &ntb->epf->bar[barno];
+ epf_bar->phys_addr = addr;
+ epf_bar->barno = barno;
+ epf_bar->size = size;
+
+ ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
+ if (ret) {
+ dev_err(dev, "failure set mw trans\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
+{
+ return 0;
+}
+
+static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx,
+ phys_addr_t *base, resource_size_t *size)
+{
+
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ if (base)
+ *base = ntb->vpci_mw_phy[idx];
+
+ if (size)
+ *size = ntb->mws_size[idx];
+
+ return 0;
+}
+
+static int vntb_epf_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ return 0;
+}
+
+static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * 4;
+ u32 val;
+ void __iomem *base = ntb->reg;
+
+ val = readl(base + off + ct + idx * 4);
+ return val;
+}
+
+static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset, ct = ctrl->spad_count * 4;
+ void __iomem *base = ntb->reg;
+
+ writel(val, base + off + ct + idx * 4);
+ return 0;
+}
+
+static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset;
+ void __iomem *base = ntb->reg;
+ u32 val;
+
+ val = readl(base + off + idx * 4);
+ return val;
+}
+
+static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset;
+ void __iomem *base = ntb->reg;
+
+ writel(val, base + off + idx * 4);
+ return 0;
+}
+
+static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
+{
+ u32 interrupt_num = ffs(db_bits) + 1;
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ u8 func_no, vfunc_no;
+ int ret;
+
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = pci_epc_raise_irq(ntb->epf->epc,
+ func_no,
+ vfunc_no,
+ PCI_EPC_IRQ_MSI,
+ interrupt_num + 1);
+ if (ret)
+ dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
+
+ return ret;
+}
+
+static u64 vntb_epf_db_read(struct ntb_dev *ndev)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ return ntb->db;
+}
+
+static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ if (addr_align)
+ *addr_align = SZ_4K;
+
+ if (size_align)
+ *size_align = 1;
+
+ if (size_max)
+ *size_max = ntb->mws_size[idx];
+
+ return 0;
+}
+
+static u64 vntb_epf_link_is_up(struct ntb_dev *ndev,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ return ntb->reg->link_status;
+}
+
+static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits)
+{
+ return 0;
+}
+
+static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ ntb->db &= ~db_bits;
+ return 0;
+}
+
+static int vntb_epf_link_disable(struct ntb_dev *ntb)
+{
+ return 0;
+}
+
+static const struct ntb_dev_ops vntb_epf_ops = {
+ .mw_count = vntb_epf_mw_count,
+ .spad_count = vntb_epf_spad_count,
+ .peer_mw_count = vntb_epf_peer_mw_count,
+ .db_valid_mask = vntb_epf_db_valid_mask,
+ .db_set_mask = vntb_epf_db_set_mask,
+ .mw_set_trans = vntb_epf_mw_set_trans,
+ .mw_clear_trans = vntb_epf_mw_clear_trans,
+ .peer_mw_get_addr = vntb_epf_peer_mw_get_addr,
+ .link_enable = vntb_epf_link_enable,
+ .spad_read = vntb_epf_spad_read,
+ .spad_write = vntb_epf_spad_write,
+ .peer_spad_read = vntb_epf_peer_spad_read,
+ .peer_spad_write = vntb_epf_peer_spad_write,
+ .peer_db_set = vntb_epf_peer_db_set,
+ .db_read = vntb_epf_db_read,
+ .mw_get_align = vntb_epf_mw_get_align,
+ .link_is_up = vntb_epf_link_is_up,
+ .db_clear_mask = vntb_epf_db_clear_mask,
+ .db_clear = vntb_epf_db_clear,
+ .link_disable = vntb_epf_link_disable,
+};
+
+static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int ret;
+ struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata;
+ struct device *dev = &pdev->dev;
+
+ ndev->ntb.pdev = pdev;
+ ndev->ntb.topo = NTB_TOPO_NONE;
+ ndev->ntb.ops = &vntb_epf_ops;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "Cannot set DMA mask\n");
+ return -EINVAL;
+ }
+
+ ret = ntb_register_device(&ndev->ntb);
+ if (ret) {
+ dev_err(dev, "Failed to register NTB device\n");
+ goto err_register_dev;
+ }
+
+ dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
+ return 0;
+
+err_register_dev:
+ return -EINVAL;
+}
+
+static struct pci_device_id pci_vntb_table[] = {
+ {
+ PCI_DEVICE(0xffff, 0xffff),
+ },
+ {},
+};
+
+static struct pci_driver vntb_pci_driver = {
+ .name = "pci-vntb",
+ .id_table = pci_vntb_table,
+ .probe = pci_vntb_probe,
+};
+
+/* ============ PCIe EPF Driver Bind ====================*/
+
+/**
+ * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
+ * @epf: NTB endpoint function device
+ *
+ * Initialize both the endpoint controllers associated with NTB function device.
+ * Invoked when a primary interface or secondary interface is bound to EPC
+ * device. This function will succeed only when EPC is bound to both the
+ * interfaces.
+ */
+static int epf_ntb_bind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (!epf->epc) {
+ dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ ret = epf_ntb_init_epc_bar(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ goto err_bar_init;
+ }
+
+ ret = epf_ntb_config_spad_bar_alloc(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to allocate BAR memory\n");
+ goto err_bar_alloc;
+ }
+
+ ret = epf_ntb_epc_init(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to initialize EPC\n");
+ goto err_bar_alloc;
+ }
+
+ epf_set_drvdata(epf, ntb);
+
+ pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid;
+ pci_vntb_table[0].vendor = ntb->vntb_vid;
+ pci_vntb_table[0].device = ntb->vntb_pid;
+
+ ret = pci_register_driver(&vntb_pci_driver);
+ if (ret) {
+ dev_err(dev, "failure register vntb pci driver\n");
+ goto err_bar_alloc;
+ }
+
+ vpci_scan_bus(ntb);
+
+ return 0;
+
+err_bar_alloc:
+ epf_ntb_config_spad_bar_free(ntb);
+
+err_bar_init:
+ epf_ntb_epc_destroy(ntb);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
+ * @epf: NTB endpoint function device
+ *
+ * Cleanup the initialization from epf_ntb_bind()
+ */
+static void epf_ntb_unbind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+
+ epf_ntb_epc_cleanup(ntb);
+ epf_ntb_config_spad_bar_free(ntb);
+ epf_ntb_epc_destroy(ntb);
+
+ pci_unregister_driver(&vntb_pci_driver);
+}
+
+// EPF driver probe
+static struct pci_epf_ops epf_ntb_ops = {
+ .bind = epf_ntb_bind,
+ .unbind = epf_ntb_unbind,
+ .add_cfs = epf_ntb_add_cfs,
+};
+
+/**
+ * epf_ntb_probe() - Probe NTB function driver
+ * @epf: NTB endpoint function device
+ *
+ * Probe NTB function driver when endpoint function bus detects a NTB
+ * endpoint function.
+ */
+static int epf_ntb_probe(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb;
+ struct device *dev;
+
+ dev = &epf->dev;
+
+ ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
+ if (!ntb)
+ return -ENOMEM;
+
+ epf->header = &epf_ntb_header;
+ ntb->epf = epf;
+ ntb->vbus_number = 0xff;
+ epf_set_drvdata(epf, ntb);
+
+ dev_info(dev, "pci-ep epf driver loaded\n");
+ return 0;
+}
+
+static const struct pci_epf_device_id epf_ntb_ids[] = {
+ {
+ .name = "pci_epf_vntb",
+ },
+ {},
+};
+
+static struct pci_epf_driver epf_ntb_driver = {
+ .driver.name = "pci_epf_vntb",
+ .probe = epf_ntb_probe,
+ .id_table = epf_ntb_ids,
+ .ops = &epf_ntb_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init epf_ntb_init(void)
+{
+ int ret;
+
+ kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&epf_ntb_driver);
+ if (ret) {
+ destroy_workqueue(kpcintb_workqueue);
+ pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(epf_ntb_init);
+
+static void __exit epf_ntb_exit(void)
+{
+ pci_epf_unregister_driver(&epf_ntb_driver);
+ destroy_workqueue(kpcintb_workqueue);
+}
+module_exit(epf_ntb_exit);
+
+MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
+MODULE_AUTHOR("Frank Li <Frank.li@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
index b8c9011987f4..4504039056d1 100644
--- a/drivers/pci/mmap.c
+++ b/drivers/pci/mmap.c
@@ -13,27 +13,6 @@
#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
-/*
- * Modern setup: generic pci_mmap_resource_range(), and implement the legacy
- * pci_mmap_page_range() (if needed) as a wrapper round it.
- */
-
-#ifdef HAVE_PCI_MMAP
-int pci_mmap_page_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- resource_size_t start, end;
-
- pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
-
- /* Adjust vm_pgoff to be the offset within the resource */
- vma->vm_pgoff -= start >> PAGE_SHIFT;
- return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
- write_combine);
-}
-#endif
-
static const struct vm_operations_struct pci_phys_vm_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
.access = generic_access_phys,
@@ -70,27 +49,4 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
vma->vm_page_prot);
}
-#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
-
-/*
- * Legacy setup: Implement pci_mmap_resource_range() as a wrapper around
- * the architecture's pci_mmap_page_range(), converting to "user visible"
- * addresses as necessary.
- */
-
-int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- resource_size_t start, end;
-
- /*
- * pci_mmap_page_range() expects the same kind of entry as coming
- * from /proc/bus/pci/ which is a "user visible" value. If this is
- * different from the resource itself, arch will do necessary fixup.
- */
- pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
- vma->vm_pgoff += start >> PAGE_SHIFT;
- return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
-}
#endif
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 462b429ad243..4496a7c5c478 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) "pci-p2pdma: " fmt
#include <linux/ctype.h>
+#include <linux/dma-map-ops.h>
#include <linux/pci-p2pdma.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -20,13 +21,6 @@
#include <linux/seq_buf.h>
#include <linux/xarray.h>
-enum pci_p2pdma_map_type {
- PCI_P2PDMA_MAP_UNKNOWN = 0,
- PCI_P2PDMA_MAP_NOT_SUPPORTED,
- PCI_P2PDMA_MAP_BUS_ADDR,
- PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
-};
-
struct pci_p2pdma {
struct gen_pool *pool;
bool p2pmem_published;
@@ -854,6 +848,7 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
struct pci_dev *client;
struct pci_p2pdma *p2pdma;
+ int dist;
if (!provider->p2pdma)
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
@@ -870,74 +865,48 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
type = xa_to_value(xa_load(&p2pdma->map_types,
map_types_idx(client)));
rcu_read_unlock();
- return type;
-}
-static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
- struct device *dev, struct scatterlist *sg, int nents)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i) {
- s->dma_address = sg_phys(s) + p2p_pgmap->bus_offset;
- sg_dma_len(s) = s->length;
- }
+ if (type == PCI_P2PDMA_MAP_UNKNOWN)
+ return calc_map_type_and_dist(provider, client, &dist, true);
- return nents;
+ return type;
}
/**
- * pci_p2pdma_map_sg_attrs - map a PCI peer-to-peer scatterlist for DMA
- * @dev: device doing the DMA request
- * @sg: scatter list to map
- * @nents: elements in the scatterlist
- * @dir: DMA direction
- * @attrs: DMA attributes passed to dma_map_sg() (if called)
+ * pci_p2pdma_map_segment - map an sg segment determining the mapping type
+ * @state: State structure that should be declared outside of the for_each_sg()
+ * loop and initialized to zero.
+ * @dev: DMA device that's doing the mapping operation
+ * @sg: scatterlist segment to map
*
- * Scatterlists mapped with this function should be unmapped using
- * pci_p2pdma_unmap_sg_attrs().
+ * This is a helper to be used by non-IOMMU dma_map_sg() implementations where
+ * the sg segment is the same for the page_link and the dma_address.
*
- * Returns the number of SG entries mapped or 0 on error.
+ * Attempt to map a single segment in an SGL with the PCI bus address.
+ * The segment must point to a PCI P2PDMA page and thus must be
+ * wrapped in a is_pci_p2pdma_page(sg_page(sg)) check.
+ *
+ * Returns the type of mapping used and maps the page if the type is
+ * PCI_P2PDMA_MAP_BUS_ADDR.
*/
-int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg)
{
- struct pci_p2pdma_pagemap *p2p_pgmap =
- to_p2p_pgmap(sg_page(sg)->pgmap);
-
- switch (pci_p2pdma_map_type(sg_page(sg)->pgmap, dev)) {
- case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
- return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
- case PCI_P2PDMA_MAP_BUS_ADDR:
- return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
- default:
- WARN_ON_ONCE(1);
- return 0;
+ if (state->pgmap != sg_page(sg)->pgmap) {
+ state->pgmap = sg_page(sg)->pgmap;
+ state->map = pci_p2pdma_map_type(state->pgmap, dev);
+ state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
}
-}
-EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
-/**
- * pci_p2pdma_unmap_sg_attrs - unmap a PCI peer-to-peer scatterlist that was
- * mapped with pci_p2pdma_map_sg()
- * @dev: device doing the DMA request
- * @sg: scatter list to map
- * @nents: number of elements returned by pci_p2pdma_map_sg()
- * @dir: DMA direction
- * @attrs: DMA attributes passed to dma_unmap_sg() (if called)
- */
-void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- enum pci_p2pdma_map_type map_type;
-
- map_type = pci_p2pdma_map_type(sg_page(sg)->pgmap, dev);
+ if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) {
+ sg->dma_address = sg_phys(sg) + state->bus_off;
+ sg_dma_len(sg) = sg->length;
+ sg_dma_mark_bus_address(sg);
+ }
- if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
- dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
+ return state->map;
}
-EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
/**
* pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 3760d85c10d2..a46fec776ad7 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -21,8 +21,9 @@
#include "pci.h"
/*
- * The GUID is defined in the PCI Firmware Specification available here:
- * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
+ * The GUID is defined in the PCI Firmware Specification available
+ * here to PCI-SIG members:
+ * https://members.pcisig.com/wg/PCI-SIG/document/15350
*/
const guid_t pci_acpi_dsm_guid =
GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cfaf40a540a8..95bc329e74c0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -41,8 +41,10 @@ const char *pci_power_names[] = {
};
EXPORT_SYMBOL_GPL(pci_power_names);
+#ifdef CONFIG_X86_32
int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy);
+#endif
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
@@ -1293,9 +1295,6 @@ static int pci_set_full_power_state(struct pci_dev *dev)
pci_restore_bars(dev);
}
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
@@ -1390,9 +1389,6 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
pci_power_name(dev->current_state),
pci_power_name(state));
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index e10cdec6c56e..785f31086313 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -560,12 +560,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 7952e5efd6cf..e2d8a74f83c3 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -392,6 +392,11 @@ void pci_aer_init(struct pci_dev *dev)
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
pci_aer_clear_status(dev);
+
+ if (pci_aer_available())
+ pci_enable_pcie_error_reporting(dev);
+
+ pcie_set_ecrc_checking(dev);
}
void pci_aer_exit(struct pci_dev *dev)
@@ -538,7 +543,7 @@ static const char *aer_agent_string[] = {
u64 *stats = pdev->aer_stats->stats_array; \
size_t len = 0; \
\
- for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
+ for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
if (strings_array[i]) \
len += sysfs_emit_at(buf, len, "%s %llu\n", \
strings_array[i], \
@@ -1228,9 +1233,6 @@ static int set_device_error_reporting(struct pci_dev *dev, void *data)
pci_disable_pcie_error_reporting(dev);
}
- if (enable)
- pcie_set_ecrc_checking(dev);
-
return 0;
}
@@ -1347,6 +1349,11 @@ static int aer_probe(struct pcie_device *dev)
struct device *device = &dev->device;
struct pci_dev *port = dev->port;
+ BUILD_BUG_ON(ARRAY_SIZE(aer_correctable_error_string) <
+ AER_MAX_TYPEOF_COR_ERRS);
+ BUILD_BUG_ON(ARRAY_SIZE(aer_uncorrectable_error_string) <
+ AER_MAX_TYPEOF_UNCOR_ERRS);
+
/* Limit to Root Ports or Root Complex Event Collectors */
if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) &&
(pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT))
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index a96b7424c9bc..a8aec190986c 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1012,25 +1012,6 @@ out:
up_read(&pci_bus_sem);
}
-/* @pdev: the root port or switch downstream port */
-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
-{
- struct pcie_link_state *link = pdev->link_state;
-
- if (aspm_disabled || !link)
- return;
- /*
- * Devices changed PM state, we should recheck if latency
- * meets all functions' requirement
- */
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- pcie_update_aspm_capable(link->root);
- pcie_config_aspm_path(link);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
@@ -1366,4 +1347,3 @@ bool pcie_aspm_support_enabled(void)
{
return aspm_support_enabled;
}
-EXPORT_SYMBOL(pcie_aspm_support_enabled);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 0c5a143025af..59c90d04a609 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -55,10 +55,14 @@ static int report_error_detected(struct pci_dev *dev,
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pci_dev_set_io_state(dev, state) ||
- !pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->error_detected) {
+ if (pci_dev_is_disconnected(dev)) {
+ vote = PCI_ERS_RESULT_DISCONNECT;
+ } else if (!pci_dev_set_io_state(dev, state)) {
+ pci_info(dev, "can't recover (state transition %u -> %u invalid)\n",
+ dev->error_state, state);
+ vote = PCI_ERS_RESULT_NONE;
+ } else if (!pdrv || !pdrv->err_handler ||
+ !pdrv->err_handler->error_detected) {
/*
* If any device in the subtree does not have an error_detected
* callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 604feeb84ee4..1ac7fec47d6f 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -222,15 +222,8 @@ static int get_port_device_capability(struct pci_dev *dev)
#ifdef CONFIG_PCIEAER
if (dev->aer_cap && pci_aer_available() &&
- (pcie_ports_native || host->native_aer)) {
+ (pcie_ports_native || host->native_aer))
services |= PCIE_PORT_SERVICE_AER;
-
- /*
- * Disable AER on this port in case it's been enabled by the
- * BIOS (the AER service driver will enable it when necessary).
- */
- pci_disable_pcie_error_reporting(dev);
- }
#endif
/* Root Ports and Root Complex Event Collectors may generate PMEs */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 17a969942d37..c5286b027f00 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1890,6 +1890,9 @@ int pci_setup_device(struct pci_dev *dev)
dev->broken_intx_masking = pci_intx_mask_broken(dev);
+ /* Clear errors left from system firmware */
+ pci_write_config_word(dev, PCI_STATUS, 0xffff);
+
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
@@ -2312,7 +2315,7 @@ EXPORT_SYMBOL(pci_alloc_dev);
static bool pci_bus_crs_vendor_id(u32 l)
{
- return (l & 0xffff) == 0x0001;
+ return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
}
static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
@@ -2579,33 +2582,39 @@ struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
}
EXPORT_SYMBOL(pci_scan_single_device);
-static unsigned int next_fn(struct pci_bus *bus, struct pci_dev *dev,
- unsigned int fn)
+static int next_ari_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
{
int pos;
u16 cap = 0;
unsigned int next_fn;
- if (pci_ari_enabled(bus)) {
- if (!dev)
- return 0;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
- if (!pos)
- return 0;
+ if (!dev)
+ return -ENODEV;
- pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
- next_fn = PCI_ARI_CAP_NFN(cap);
- if (next_fn <= fn)
- return 0; /* protect against malformed list */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+ if (!pos)
+ return -ENODEV;
- return next_fn;
- }
+ pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
+ next_fn = PCI_ARI_CAP_NFN(cap);
+ if (next_fn <= fn)
+ return -ENODEV; /* protect against malformed list */
- /* dev may be NULL for non-contiguous multifunction devices */
- if (!dev || dev->multifunction)
- return (fn + 1) % 8;
+ return next_fn;
+}
- return 0;
+static int next_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
+{
+ if (pci_ari_enabled(bus))
+ return next_ari_fn(bus, dev, fn);
+
+ if (fn >= 7)
+ return -ENODEV;
+ /* only multifunction devices may have more functions */
+ if (dev && !dev->multifunction)
+ return -ENODEV;
+
+ return fn + 1;
}
static int only_one_child(struct pci_bus *bus)
@@ -2643,26 +2652,30 @@ static int only_one_child(struct pci_bus *bus)
*/
int pci_scan_slot(struct pci_bus *bus, int devfn)
{
- unsigned int fn, nr = 0;
struct pci_dev *dev;
+ int fn = 0, nr = 0;
if (only_one_child(bus) && (devfn > 0))
return 0; /* Already scanned the entire slot */
- dev = pci_scan_single_device(bus, devfn);
- if (!dev)
- return 0;
- if (!pci_dev_is_added(dev))
- nr++;
-
- for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
+ do {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
if (!pci_dev_is_added(dev))
nr++;
- dev->multifunction = 1;
+ if (fn > 0)
+ dev->multifunction = 1;
+ } else if (fn == 0) {
+ /*
+ * Function 0 is required unless we are running on
+ * a hypervisor that passes through individual PCI
+ * functions.
+ */
+ if (!hypervisor_isolated_pci_functions())
+ break;
}
- }
+ fn = next_fn(bus, dev, fn);
+ } while (fn >= 0);
/* Only one slot has PCIe device */
if (bus->self && nr)
@@ -2858,29 +2871,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
{
unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
unsigned int start = bus->busn_res.start;
- unsigned int devfn, fn, cmax, max = start;
+ unsigned int devfn, cmax, max = start;
struct pci_dev *dev;
- int nr_devs;
dev_dbg(&bus->dev, "scanning bus\n");
/* Go find them, Rover! */
- for (devfn = 0; devfn < 256; devfn += 8) {
- nr_devs = pci_scan_slot(bus, devfn);
-
- /*
- * The Jailhouse hypervisor may pass individual functions of a
- * multi-function device to a guest without passing function 0.
- * Look for them as well.
- */
- if (jailhouse_paravirt() && nr_devs == 0) {
- for (fn = 1; fn < 8; fn++) {
- dev = pci_scan_single_device(bus, devfn + fn);
- if (dev)
- dev->multifunction = 1;
- }
- }
- }
+ for (devfn = 0; devfn < 256; devfn += 8)
+ pci_scan_slot(bus, devfn);
/* Reserve buses for SR-IOV capability */
used_buses = pci_iov_bus_range(bus);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 31b26d8ea6cc..f967709082d6 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -244,6 +244,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pci_dev *dev = pde_data(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
+ resource_size_t start, end;
int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
if (!capable(CAP_SYS_RAWIO) ||
@@ -278,7 +279,11 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
iomem_is_exclusive(dev->resource[i].start))
return -EINVAL;
- ret = pci_mmap_page_range(dev, i, vma,
+ pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
+
+ /* Adjust vm_pgoff to be the offset within the resource */
+ vma->vm_pgoff -= start >> PAGE_SHIFT;
+ ret = pci_mmap_resource_range(dev, i, vma,
fpriv->mmap_state, write_combine);
if (ret < 0)
return ret;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 41aeaa235132..4944798e75b5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
+#include <linux/isa-dma.h> /* isa_dma_bridge_buggy */
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -30,7 +31,6 @@
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/switchtec.h>
-#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
static ktime_t fixup_debug_start(struct pci_dev *dev,
@@ -239,6 +239,7 @@ static void quirk_passive_release(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
+#ifdef CONFIG_X86_32
/*
* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a
* workaround but VIA don't answer queries. If you happen to have good
@@ -265,6 +266,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
+#endif
/*
* Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
@@ -2709,10 +2711,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
nvenet_msi_disable);
/*
- * PCIe spec r4.0 sec 7.7.1.2 and sec 7.7.2.2 say that if MSI/MSI-X is enabled,
- * then the device can't use INTx interrupts. Tegra's PCIe root ports don't
- * generate MSI interrupts for PME and AER events instead only INTx interrupts
- * are generated. Though Tegra's PCIe root ports can generate MSI interrupts
+ * PCIe spec r6.0 sec 6.1.4.3 says that if MSI/MSI-X is enabled, the device
+ * can't use INTx interrupts. Tegra's PCIe Root Ports don't generate MSI
+ * interrupts for PME and AER events; instead only INTx interrupts are
+ * generated. Though Tegra's PCIe Root Ports can generate MSI interrupts
* for other events, since PCIe specification doesn't support using a mix of
* INTx and MSI/MSI-X, it is required to disable MSI interrupts to avoid port
* service drivers registering their respective ISRs for MSIs.
@@ -2760,6 +2762,15 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,
PCI_CLASS_BRIDGE_PCI, 8,
pci_quirk_nvidia_tegra_disable_rp_msi);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229a,
+ PCI_CLASS_BRIDGE_PCI, 8,
+ pci_quirk_nvidia_tegra_disable_rp_msi);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229c,
+ PCI_CLASS_BRIDGE_PCI, 8,
+ pci_quirk_nvidia_tegra_disable_rp_msi);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229e,
+ PCI_CLASS_BRIDGE_PCI, 8,
+ pci_quirk_nvidia_tegra_disable_rp_msi);
/*
* Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
@@ -4924,6 +4935,9 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
/* Broadcom multi-function device */
{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index c36c1238c604..75be4fe22509 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1376,8 +1376,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
dev->groups = switchtec_device_groups;
dev->release = stdev_release;
- minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
- GFP_KERNEL);
+ minor = ida_alloc(&switchtec_minor_ida, GFP_KERNEL);
if (minor < 0) {
rc = minor;
goto err_put;
@@ -1692,7 +1691,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
err_devadd:
stdev_kill(stdev);
err_put:
- ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
put_device(&stdev->dev);
return rc;
}
@@ -1704,7 +1703,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
cdev_device_del(&stdev->cdev, &stdev->dev);
- ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
dev_info(&stdev->dev, "unregistered.\n");
stdev_kill(stdev);
put_device(&stdev->dev);
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 2c961839903d..ebca5eab9c9b 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -170,7 +170,6 @@ int riscv_pmu_event_set_period(struct perf_event *event)
left = (max_period >> 1);
local64_set(&hwc->prev_count, (u64)-left);
- perf_event_update_userpage(event);
return overflow;
}
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 342778782359..2c20b0de8cb0 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -72,7 +72,7 @@ static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
local64_set(&hwc->prev_count, initial_val);
}
-/**
+/*
* This is just a simple implementation to allow legacy implementations
* compatible with new RISC-V PMU driver framework.
* This driver only allows reading two counters i.e CYCLE & INSTRET.
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 79a3de515ece..6f6681bbfd36 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -41,20 +41,6 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = {
NULL,
};
-union sbi_pmu_ctr_info {
- unsigned long value;
- struct {
- unsigned long csr:12;
- unsigned long width:6;
-#if __riscv_xlen == 32
- unsigned long reserved:13;
-#else
- unsigned long reserved:45;
-#endif
- unsigned long type:1;
- };
-};
-
/*
* RISC-V doesn't have hetergenous harts yet. This need to be part of
* per_cpu in case of harts with different pmu counters
@@ -294,8 +280,13 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
cflags |= SBI_PMU_CFG_FLAG_SET_UINH;
/* retrieve the available counter index */
+#if defined(CONFIG_32BIT)
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
+ cflags, hwc->event_base, hwc->config, hwc->config >> 32);
+#else
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
cflags, hwc->event_base, hwc->config, 0);
+#endif
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
hwc->event_base, hwc->config);
@@ -437,8 +428,13 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
struct hw_perf_event *hwc = &event->hw;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
+#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
1, flag, ival, ival >> 32, 0);
+#else
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
+ 1, flag, ival, 0, 0);
+#endif
if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
pr_err("Starting counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
@@ -545,8 +541,14 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
hwc = &event->hw;
max_period = riscv_pmu_ctr_get_width_mask(event);
init_val = local64_read(&hwc->prev_count) & max_period;
+#if defined(CONFIG_32BIT)
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
+ flag, init_val, init_val >> 32, 0);
+#else
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
flag, init_val, 0, 0);
+#endif
+ perf_event_update_userpage(event);
}
ctr_ovf_mask = ctr_ovf_mask >> 1;
idx++;
diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
index 578cfe07d07a..53c9230c2907 100644
--- a/drivers/phy/samsung/phy-exynos-pcie.c
+++ b/drivers/phy/samsung/phy-exynos-pcie.c
@@ -51,6 +51,13 @@ static int exynos5433_pcie_phy_init(struct phy *phy)
{
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
+ regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
+ BIT(0), 1);
+ regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
+ PCIE_APP_REQ_EXIT_L1_MODE, 0);
+ regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
+ PCIE_REFCLK_GATING_EN, 0);
+
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET,
PCIE_PHY_RESET, 1);
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET,
@@ -109,20 +116,7 @@ static int exynos5433_pcie_phy_init(struct phy *phy)
return 0;
}
-static int exynos5433_pcie_phy_power_on(struct phy *phy)
-{
- struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
-
- regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
- BIT(0), 1);
- regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
- PCIE_APP_REQ_EXIT_L1_MODE, 0);
- regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
- PCIE_REFCLK_GATING_EN, 0);
- return 0;
-}
-
-static int exynos5433_pcie_phy_power_off(struct phy *phy)
+static int exynos5433_pcie_phy_exit(struct phy *phy)
{
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
@@ -135,8 +129,7 @@ static int exynos5433_pcie_phy_power_off(struct phy *phy)
static const struct phy_ops exynos5433_phy_ops = {
.init = exynos5433_pcie_phy_init,
- .power_on = exynos5433_pcie_phy_power_on,
- .power_off = exynos5433_pcie_phy_power_off,
+ .exit = exynos5433_pcie_phy_exit,
.owner = THIS_MODULE,
};
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index bff144c97e66..1cf74b0c42e5 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -311,7 +311,7 @@ config PINCTRL_MICROCHIP_SGPIO
LED controller.
config PINCTRL_OCELOT
- bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
+ tristate "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
depends on OF
depends on HAS_IOMEM
select GPIOLIB
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 4d7548686f39..aaa78a613196 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -632,7 +632,7 @@ struct aspeed_pin_desc {
SIG_EXPR_LIST_ALIAS(pin, sig, group)
/**
- * Similar to the above, but for pins with a dual expressions (DE) and
+ * Similar to the above, but for pins with a dual expressions (DE)
* and a single group (SG) of pins.
*
* @pin: The pin the signal will be routed to
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index dad453054776..7857e612a100 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -507,7 +507,7 @@ static void bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc,
}
}
-static void bcm2835_gpio_irq_enable(struct irq_data *data)
+static void bcm2835_gpio_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
@@ -516,13 +516,15 @@ static void bcm2835_gpio_irq_enable(struct irq_data *data)
unsigned bank = GPIO_REG_OFFSET(gpio);
unsigned long flags;
+ gpiochip_enable_irq(chip, gpio);
+
raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
set_bit(offset, &pc->enabled_irq_map[bank]);
bcm2835_gpio_irq_config(pc, gpio, true);
raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
}
-static void bcm2835_gpio_irq_disable(struct irq_data *data)
+static void bcm2835_gpio_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
@@ -537,6 +539,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
clear_bit(offset, &pc->enabled_irq_map[bank]);
raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+
+ gpiochip_disable_irq(chip, gpio);
}
static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc,
@@ -693,16 +697,15 @@ static int bcm2835_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
return ret;
}
-static struct irq_chip bcm2835_gpio_irq_chip = {
+static const struct irq_chip bcm2835_gpio_irq_chip = {
.name = MODULE_NAME,
- .irq_enable = bcm2835_gpio_irq_enable,
- .irq_disable = bcm2835_gpio_irq_disable,
.irq_set_type = bcm2835_gpio_irq_set_type,
.irq_ack = bcm2835_gpio_irq_ack,
- .irq_mask = bcm2835_gpio_irq_disable,
- .irq_unmask = bcm2835_gpio_irq_enable,
+ .irq_mask = bcm2835_gpio_irq_mask,
+ .irq_unmask = bcm2835_gpio_irq_unmask,
.irq_set_wake = bcm2835_gpio_irq_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .flags = (IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE),
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
@@ -1280,7 +1283,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
girq = &pc->gpio_chip.irq;
- girq->chip = &bcm2835_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &bcm2835_gpio_irq_chip);
girq->parent_handler = bcm2835_gpio_irq_handler;
girq->num_parents = BCM2835_NUM_IRQS;
girq->parents = devm_kcalloc(dev, BCM2835_NUM_IRQS,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index ffe39336fcac..9e57f4c62e60 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -126,7 +126,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np)
mutex_lock(&pinctrldev_list_mutex);
list_for_each_entry(pctldev, &pinctrldev_list, node)
- if (pctldev->dev->of_node == np) {
+ if (device_match_of_node(pctldev->dev, np)) {
mutex_unlock(&pinctrldev_list_mutex);
return pctldev;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx93.c b/drivers/pinctrl/freescale/pinctrl-imx93.c
index 417e41b37a6f..91b3ee1e6fa9 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx93.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx93.c
@@ -247,6 +247,7 @@ static const struct of_device_id imx93_pinctrl_of_match[] = {
{ .compatible = "fsl,imx93-iomuxc", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx93_pinctrl_of_match);
static int imx93_pinctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index e5ec8b8956da..078eec8af4a4 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -151,6 +151,14 @@ config PINCTRL_LEWISBURG
This pinctrl driver provides an interface that allows configuring
of Intel Lewisburg pins and using them as GPIOs.
+config PINCTRL_METEORLAKE
+ tristate "Intel Meteor Lake pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Meteor Lake pins and using them as GPIOs.
+
config PINCTRL_SUNRISEPOINT
tristate "Intel Sunrisepoint pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 181ffcf34d62..bb87e7bc7b20 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -18,5 +18,6 @@ obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_JASPERLAKE) += pinctrl-jasperlake.o
obj-$(CONFIG_PINCTRL_LAKEFIELD) += pinctrl-lakefield.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
+obj-$(CONFIG_PINCTRL_METEORLAKE) += pinctrl-meteorlake.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 31f8f271628c..67db79f38051 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -603,7 +603,7 @@ static const char *byt_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc->groups[selector].name;
+ return vg->soc->groups[selector].grp.name;
}
static int byt_get_group_pins(struct pinctrl_dev *pctldev,
@@ -613,8 +613,8 @@ static int byt_get_group_pins(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *pins = vg->soc->groups[selector].pins;
- *num_pins = vg->soc->groups[selector].npins;
+ *pins = vg->soc->groups[selector].grp.pins;
+ *num_pins = vg->soc->groups[selector].grp.npins;
return 0;
}
@@ -662,15 +662,15 @@ static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < group.npins; i++) {
+ for (i = 0; i < group.grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+ padcfg0 = byt_gpio_reg(vg, group.grp.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
- group.name, i);
+ group.grp.name, i);
continue;
}
@@ -692,15 +692,15 @@ static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < group.npins; i++) {
+ for (i = 0; i < group.grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+ padcfg0 = byt_gpio_reg(vg, group.grp.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
- group.name, i);
+ group.grp.name, i);
continue;
}
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 26b2a425d201..5c4fd16e5b01 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -627,7 +627,7 @@ static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->soc->groups[group].name;
+ return pctrl->soc->groups[group].grp.name;
}
static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -635,8 +635,8 @@ static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *pins = pctrl->soc->groups[group].pins;
- *npins = pctrl->soc->groups[group].npins;
+ *pins = pctrl->soc->groups[group].grp.pins;
+ *npins = pctrl->soc->groups[group].grp.npins;
return 0;
}
@@ -721,16 +721,16 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&chv_lock, flags);
/* Check first that the pad is not locked */
- for (i = 0; i < grp->npins; i++) {
- if (chv_pad_locked(pctrl, grp->pins[i])) {
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (chv_pad_locked(pctrl, grp->grp.pins[i])) {
raw_spin_unlock_irqrestore(&chv_lock, flags);
- dev_warn(dev, "unable to set mode for locked pin %u\n", grp->pins[i]);
+ dev_warn(dev, "unable to set mode for locked pin %u\n", grp->grp.pins[i]);
return -EBUSY;
}
}
- for (i = 0; i < grp->npins; i++) {
- int pin = grp->pins[i];
+ for (i = 0; i < grp->grp.npins; i++) {
+ int pin = grp->grp.pins[i];
unsigned int mode;
bool invert_oe;
u32 value;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index ffc045f7bf00..52ecd66ce357 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -279,7 +279,7 @@ static const char *intel_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->soc->groups[group].name;
+ return pctrl->soc->groups[group].grp.name;
}
static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -287,8 +287,8 @@ static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *pins = pctrl->soc->groups[group].pins;
- *npins = pctrl->soc->groups[group].npins;
+ *pins = pctrl->soc->groups[group].grp.pins;
+ *npins = pctrl->soc->groups[group].grp.npins;
return 0;
}
@@ -391,19 +391,19 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev,
* All pins in the groups needs to be accessible and writable
* before we can enable the mux for this group.
*/
- for (i = 0; i < grp->npins; i++) {
- if (!intel_pad_usable(pctrl, grp->pins[i])) {
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (!intel_pad_usable(pctrl, grp->grp.pins[i])) {
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EBUSY;
}
}
/* Now enable the mux setting for each pin in the group */
- for (i = 0; i < grp->npins; i++) {
+ for (i = 0; i < grp->grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = intel_get_padcfg(pctrl, grp->pins[i], PADCFG0);
+ padcfg0 = intel_get_padcfg(pctrl, grp->grp.pins[i], PADCFG0);
value = readl(padcfg0);
value &= ~PADCFG0_PMODE_MASK;
@@ -1641,16 +1641,14 @@ EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid);
const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev)
{
+ const struct intel_pinctrl_soc_data * const *table;
const struct intel_pinctrl_soc_data *data = NULL;
- const struct intel_pinctrl_soc_data **table;
- struct acpi_device *adev;
- unsigned int i;
- adev = ACPI_COMPANION(&pdev->dev);
- if (adev) {
- const void *match = device_get_match_data(&pdev->dev);
+ table = device_get_match_data(&pdev->dev);
+ if (table) {
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ unsigned int i;
- table = (const struct intel_pinctrl_soc_data **)match;
for (i = 0; table[i]; i++) {
if (!strcmp(adev->pnp.unique_id, table[i]->uid)) {
data = table[i];
@@ -1664,7 +1662,7 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
if (!id)
return ERR_PTR(-ENODEV);
- table = (const struct intel_pinctrl_soc_data **)id->driver_data;
+ table = (const struct intel_pinctrl_soc_data * const *)id->driver_data;
data = table[pdev->id];
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 710341bb67cc..65628423bf63 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -24,17 +24,12 @@ struct device;
/**
* struct intel_pingroup - Description about group of pins
- * @name: Name of the groups
- * @pins: All pins in this group
- * @npins: Number of pins in this groups
- * @mode: Native mode in which the group is muxed out @pins. Used if @modes
- * is %NULL.
+ * @grp: Generic data of the pin group (name and pins)
+ * @mode: Native mode in which the group is muxed out @pins. Used if @modes is %NULL.
* @modes: If not %NULL this will hold mode for each pin in @pins
*/
struct intel_pingroup {
- const char *name;
- const unsigned int *pins;
- size_t npins;
+ struct pingroup grp;
unsigned short mode;
const unsigned int *modes;
};
@@ -156,15 +151,11 @@ struct intel_community {
* a single integer or an array of integers in which case mode is per
* pin.
*/
-#define PIN_GROUP(n, p, m) \
- { \
- .name = (n), \
- .pins = (p), \
- .npins = ARRAY_SIZE((p)), \
- .mode = __builtin_choose_expr( \
- __builtin_constant_p((m)), (m), 0), \
- .modes = __builtin_choose_expr( \
- __builtin_constant_p((m)), NULL, (m)), \
+#define PIN_GROUP(n, p, m) \
+ { \
+ .grp = PINCTRL_PINGROUP((n), (p), ARRAY_SIZE((p))), \
+ .mode = __builtin_choose_expr(__builtin_constant_p((m)), (m), 0), \
+ .modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
#define FUNCTION(n, g) \
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index 4fb39eb30902..5d1abee30f8f 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -282,7 +282,7 @@ static const char *lp_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
- return lg->soc->groups[selector].name;
+ return lg->soc->groups[selector].grp.name;
}
static int lp_get_group_pins(struct pinctrl_dev *pctldev,
@@ -292,8 +292,8 @@ static int lp_get_group_pins(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
- *pins = lg->soc->groups[selector].pins;
- *num_pins = lg->soc->groups[selector].npins;
+ *pins = lg->soc->groups[selector].grp.pins;
+ *num_pins = lg->soc->groups[selector].grp.npins;
return 0;
}
@@ -366,8 +366,8 @@ static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&lg->lock, flags);
/* Now enable the mux setting for each pin in the group */
- for (i = 0; i < grp->npins; i++) {
- void __iomem *reg = lp_gpio_reg(&lg->chip, grp->pins[i], LP_CONFIG1);
+ for (i = 0; i < grp->grp.npins; i++) {
+ void __iomem *reg = lp_gpio_reg(&lg->chip, grp->grp.pins[i], LP_CONFIG1);
u32 value;
value = ioread32(reg);
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 3ae141e0b421..5e752818adb4 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -520,7 +520,7 @@ static const char *mrfld_get_group_name(struct pinctrl_dev *pctldev,
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- return mp->groups[group].name;
+ return mp->groups[group].grp.name;
}
static int mrfld_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -528,8 +528,8 @@ static int mrfld_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- *pins = mp->groups[group].pins;
- *npins = mp->groups[group].npins;
+ *pins = mp->groups[group].grp.pins;
+ *npins = mp->groups[group].grp.npins;
return 0;
}
@@ -604,15 +604,15 @@ static int mrfld_pinmux_set_mux(struct pinctrl_dev *pctldev,
* All pins in the groups needs to be accessible and writable
* before we can enable the mux for this group.
*/
- for (i = 0; i < grp->npins; i++) {
- if (!mrfld_buf_available(mp, grp->pins[i]))
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (!mrfld_buf_available(mp, grp->grp.pins[i]))
return -EBUSY;
}
/* Now enable the mux setting for each pin in the group */
raw_spin_lock_irqsave(&mp->lock, flags);
- for (i = 0; i < grp->npins; i++)
- mrfld_update_bufcfg(mp, grp->pins[i], bits, mask);
+ for (i = 0; i < grp->grp.npins; i++)
+ mrfld_update_bufcfg(mp, grp->grp.pins[i], bits, mask);
raw_spin_unlock_irqrestore(&mp->lock, flags);
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c
new file mode 100644
index 000000000000..9576dcd1cb29
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Meteor Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2022, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define MTL_PAD_OWN 0x0b0
+#define MTL_PADCFGLOCK 0x110
+#define MTL_HOSTSW_OWN 0x140
+#define MTL_GPI_IS 0x200
+#define MTL_GPI_IE 0x210
+
+#define MTL_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define MTL_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = MTL_PAD_OWN, \
+ .padcfglock_offset = MTL_PADCFGLOCK, \
+ .hostown_offset = MTL_HOSTSW_OWN, \
+ .is_offset = MTL_GPI_IS, \
+ .ie_offset = MTL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Meteor Lake-P */
+static const struct pinctrl_pin_desc mtlp_pins[] = {
+ /* CPU */
+ PINCTRL_PIN(0, "PECI"),
+ PINCTRL_PIN(1, "UFS_RESET_B"),
+ PINCTRL_PIN(2, "VIDSOUT"),
+ PINCTRL_PIN(3, "VIDSCK"),
+ PINCTRL_PIN(4, "VIDALERT_B"),
+ /* GPP_V */
+ PINCTRL_PIN(5, "BATLOW_B"),
+ PINCTRL_PIN(6, "AC_PRESENT"),
+ PINCTRL_PIN(7, "SOC_WAKE_B"),
+ PINCTRL_PIN(8, "PWRBTN_B"),
+ PINCTRL_PIN(9, "SLP_S3_B"),
+ PINCTRL_PIN(10, "SLP_S4_B"),
+ PINCTRL_PIN(11, "SLP_A_B"),
+ PINCTRL_PIN(12, "GPP_V_7"),
+ PINCTRL_PIN(13, "SUSCLK"),
+ PINCTRL_PIN(14, "SLP_WLAN_B"),
+ PINCTRL_PIN(15, "SLP_S5_B"),
+ PINCTRL_PIN(16, "LANPHYPC"),
+ PINCTRL_PIN(17, "SLP_LAN_B"),
+ PINCTRL_PIN(18, "GPP_V_13"),
+ PINCTRL_PIN(19, "WAKE_B"),
+ PINCTRL_PIN(20, "GPP_V_15"),
+ PINCTRL_PIN(21, "GPP_V_16"),
+ PINCTRL_PIN(22, "GPP_V_17"),
+ PINCTRL_PIN(23, "GPP_V_18"),
+ PINCTRL_PIN(24, "CATERR_B"),
+ PINCTRL_PIN(25, "PROCHOT_B"),
+ PINCTRL_PIN(26, "THERMTRIP_B"),
+ PINCTRL_PIN(27, "DSI_DE_TE_2_GENLOCK_REF"),
+ PINCTRL_PIN(28, "DSI_DE_TE_1_DISP_UTILS"),
+ /* GPP_C */
+ PINCTRL_PIN(29, "SMBCLK"),
+ PINCTRL_PIN(30, "SMBDATA"),
+ PINCTRL_PIN(31, "SMBALERT_B"),
+ PINCTRL_PIN(32, "SML0CLK"),
+ PINCTRL_PIN(33, "SML0DATA"),
+ PINCTRL_PIN(34, "GPP_C_5"),
+ PINCTRL_PIN(35, "GPP_C_6"),
+ PINCTRL_PIN(36, "GPP_C_7"),
+ PINCTRL_PIN(37, "GPP_C_8"),
+ PINCTRL_PIN(38, "GPP_C_9"),
+ PINCTRL_PIN(39, "GPP_C_10"),
+ PINCTRL_PIN(40, "GPP_C_11"),
+ PINCTRL_PIN(41, "GPP_C_12"),
+ PINCTRL_PIN(42, "GPP_C_13"),
+ PINCTRL_PIN(43, "GPP_C_14"),
+ PINCTRL_PIN(44, "GPP_C_15"),
+ PINCTRL_PIN(45, "GPP_C_16"),
+ PINCTRL_PIN(46, "GPP_C_17"),
+ PINCTRL_PIN(47, "GPP_C_18"),
+ PINCTRL_PIN(48, "GPP_C_19"),
+ PINCTRL_PIN(49, "GPP_C_20"),
+ PINCTRL_PIN(50, "GPP_C_21"),
+ PINCTRL_PIN(51, "GPP_C_22"),
+ PINCTRL_PIN(52, "GPP_C_23"),
+ /* GPP_A */
+ PINCTRL_PIN(53, "ESPI_IO_0"),
+ PINCTRL_PIN(54, "ESPI_IO_1"),
+ PINCTRL_PIN(55, "ESPI_IO_2"),
+ PINCTRL_PIN(56, "ESPI_IO_3"),
+ PINCTRL_PIN(57, "ESPI_CS0_B"),
+ PINCTRL_PIN(58, "ESPI_CLK"),
+ PINCTRL_PIN(59, "ESPI_RESET_B"),
+ PINCTRL_PIN(60, "GPP_A_7"),
+ PINCTRL_PIN(61, "GPP_A_8"),
+ PINCTRL_PIN(62, "GPP_A_9"),
+ PINCTRL_PIN(63, "GPP_A_10"),
+ PINCTRL_PIN(64, "GPP_A_11"),
+ PINCTRL_PIN(65, "GPP_A_12"),
+ PINCTRL_PIN(66, "ESPI_CS1_B"),
+ PINCTRL_PIN(67, "ESPI_CS2_B"),
+ PINCTRL_PIN(68, "ESPI_CS3_B"),
+ PINCTRL_PIN(69, "ESPI_ALERT0_B"),
+ PINCTRL_PIN(70, "ESPI_ALERT1_B"),
+ PINCTRL_PIN(71, "ESPI_ALERT2_B"),
+ PINCTRL_PIN(72, "ESPI_ALERT3_B"),
+ PINCTRL_PIN(73, "GPP_A_20"),
+ PINCTRL_PIN(74, "GPP_A_21"),
+ PINCTRL_PIN(75, "GPP_A_22"),
+ PINCTRL_PIN(76, "GPP_A_23"),
+ PINCTRL_PIN(77, "ESPI_CLK_LOOPBK"),
+ /* GPP_E */
+ PINCTRL_PIN(78, "GPP_E_0"),
+ PINCTRL_PIN(79, "GPP_E_1"),
+ PINCTRL_PIN(80, "GPP_E_2"),
+ PINCTRL_PIN(81, "GPP_E_3"),
+ PINCTRL_PIN(82, "GPP_E_4"),
+ PINCTRL_PIN(83, "GPP_E_5"),
+ PINCTRL_PIN(84, "GPP_E_6"),
+ PINCTRL_PIN(85, "GPP_E_7"),
+ PINCTRL_PIN(86, "GPP_E_8"),
+ PINCTRL_PIN(87, "GPP_E_9"),
+ PINCTRL_PIN(88, "GPP_E_10"),
+ PINCTRL_PIN(89, "GPP_E_11"),
+ PINCTRL_PIN(90, "GPP_E_12"),
+ PINCTRL_PIN(91, "GPP_E_13"),
+ PINCTRL_PIN(92, "GPP_E_14"),
+ PINCTRL_PIN(93, "SLP_DRAM_B"),
+ PINCTRL_PIN(94, "GPP_E_16"),
+ PINCTRL_PIN(95, "GPP_E_17"),
+ PINCTRL_PIN(96, "GPP_E_18"),
+ PINCTRL_PIN(97, "GPP_E_19"),
+ PINCTRL_PIN(98, "GPP_E_20"),
+ PINCTRL_PIN(99, "GPP_E_21"),
+ PINCTRL_PIN(100, "DNX_FORCE_RELOAD"),
+ PINCTRL_PIN(101, "GPP_E_23"),
+ PINCTRL_PIN(102, "THC0_GSPI0_CLK_LOOPBK"),
+ /* GPP_H */
+ PINCTRL_PIN(103, "GPP_H_0"),
+ PINCTRL_PIN(104, "GPP_H_1"),
+ PINCTRL_PIN(105, "GPP_H_2"),
+ PINCTRL_PIN(106, "GPP_H_3"),
+ PINCTRL_PIN(107, "GPP_H_4"),
+ PINCTRL_PIN(108, "GPP_H_5"),
+ PINCTRL_PIN(109, "GPP_H_6"),
+ PINCTRL_PIN(110, "GPP_H_7"),
+ PINCTRL_PIN(111, "GPP_H_8"),
+ PINCTRL_PIN(112, "GPP_H_9"),
+ PINCTRL_PIN(113, "GPP_H_10"),
+ PINCTRL_PIN(114, "GPP_H_11"),
+ PINCTRL_PIN(115, "GPP_H_12"),
+ PINCTRL_PIN(116, "CPU_C10_GATE_B"),
+ PINCTRL_PIN(117, "GPP_H_14"),
+ PINCTRL_PIN(118, "GPP_H_15"),
+ PINCTRL_PIN(119, "GPP_H_16"),
+ PINCTRL_PIN(120, "GPP_H_17"),
+ PINCTRL_PIN(121, "GPP_H_18"),
+ PINCTRL_PIN(122, "GPP_H_19"),
+ PINCTRL_PIN(123, "GPP_H_20"),
+ PINCTRL_PIN(124, "GPP_H_21"),
+ PINCTRL_PIN(125, "GPP_H_22"),
+ PINCTRL_PIN(126, "GPP_H_23"),
+ PINCTRL_PIN(127, "LPI3C1_CLK_LOOPBK"),
+ PINCTRL_PIN(128, "I3C0_CLK_LOOPBK"),
+ /* GPP_F */
+ PINCTRL_PIN(129, "CNV_BRI_DT"),
+ PINCTRL_PIN(130, "CNV_BRI_RSP"),
+ PINCTRL_PIN(131, "CNV_RGI_DT"),
+ PINCTRL_PIN(132, "CNV_RGI_RSP"),
+ PINCTRL_PIN(133, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(134, "CRF_CLKREQ"),
+ PINCTRL_PIN(135, "GPP_F_6"),
+ PINCTRL_PIN(136, "FUSA_DIAGTEST_EN"),
+ PINCTRL_PIN(137, "FUSA_DIAGTEST_MODE"),
+ PINCTRL_PIN(138, "BOOTMPC"),
+ PINCTRL_PIN(139, "GPP_F_10"),
+ PINCTRL_PIN(140, "GPP_F_11"),
+ PINCTRL_PIN(141, "GSXDOUT"),
+ PINCTRL_PIN(142, "GSXSLOAD"),
+ PINCTRL_PIN(143, "GSXDIN"),
+ PINCTRL_PIN(144, "GSXSRESETB"),
+ PINCTRL_PIN(145, "GSXCLK"),
+ PINCTRL_PIN(146, "GMII_MDC_0"),
+ PINCTRL_PIN(147, "GMII_MDIO_0"),
+ PINCTRL_PIN(148, "GPP_F_19"),
+ PINCTRL_PIN(149, "GPP_F_20"),
+ PINCTRL_PIN(150, "GPP_F_21"),
+ PINCTRL_PIN(151, "GPP_F_22"),
+ PINCTRL_PIN(152, "GPP_F_23"),
+ PINCTRL_PIN(153, "THC1_GSPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(154, "GSPI0A_CLK_LOOPBK"),
+ /* SPI0 */
+ PINCTRL_PIN(155, "SPI0_IO_2"),
+ PINCTRL_PIN(156, "SPI0_IO_3"),
+ PINCTRL_PIN(157, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(158, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(159, "SPI0_TPM_CS_B"),
+ PINCTRL_PIN(160, "SPI0_FLASH_0_CS_B"),
+ PINCTRL_PIN(161, "SPI0_FLASH_1_CS_B"),
+ PINCTRL_PIN(162, "SPI0_CLK"),
+ PINCTRL_PIN(163, "L_BKLTEN"),
+ PINCTRL_PIN(164, "L_BKLTCTL"),
+ PINCTRL_PIN(165, "L_VDDEN"),
+ PINCTRL_PIN(166, "SYS_PWROK"),
+ PINCTRL_PIN(167, "SYS_RESET_B"),
+ PINCTRL_PIN(168, "MLK_RST_B"),
+ PINCTRL_PIN(169, "SPI0_CLK_LOOPBK"),
+ /* vGPIO_3 */
+ PINCTRL_PIN(170, "ESPI_USB_OCB_0"),
+ PINCTRL_PIN(171, "ESPI_USB_OCB_1"),
+ PINCTRL_PIN(172, "ESPI_USB_OCB_2"),
+ PINCTRL_PIN(173, "ESPI_USB_OCB_3"),
+ PINCTRL_PIN(174, "USB_CPU_OCB_0"),
+ PINCTRL_PIN(175, "USB_CPU_OCB_1"),
+ PINCTRL_PIN(176, "USB_CPU_OCB_2"),
+ PINCTRL_PIN(177, "USB_CPU_OCB_3"),
+ PINCTRL_PIN(178, "TS0_IN_INT"),
+ PINCTRL_PIN(179, "TS1_IN_INT"),
+ PINCTRL_PIN(180, "THC0_WOT_INT"),
+ PINCTRL_PIN(181, "THC1_WOT_INT"),
+ PINCTRL_PIN(182, "THC0_WHC_INT"),
+ PINCTRL_PIN(183, "THC1_WHC_INT"),
+ /* GPP_S */
+ PINCTRL_PIN(184, "GPP_S_0"),
+ PINCTRL_PIN(185, "GPP_S_1"),
+ PINCTRL_PIN(186, "GPP_S_2"),
+ PINCTRL_PIN(187, "GPP_S_3"),
+ PINCTRL_PIN(188, "GPP_S_4"),
+ PINCTRL_PIN(189, "GPP_S_5"),
+ PINCTRL_PIN(190, "GPP_S_6"),
+ PINCTRL_PIN(191, "GPP_S_7"),
+ /* JTAG */
+ PINCTRL_PIN(192, "JTAG_MBPB0"),
+ PINCTRL_PIN(193, "JTAG_MBPB1"),
+ PINCTRL_PIN(194, "JTAG_MBPB2"),
+ PINCTRL_PIN(195, "JTAG_MBPB3"),
+ PINCTRL_PIN(196, "JTAG_TDO"),
+ PINCTRL_PIN(197, "PRDY_B"),
+ PINCTRL_PIN(198, "PREQ_B"),
+ PINCTRL_PIN(199, "JTAG_TDI"),
+ PINCTRL_PIN(200, "JTAG_TMS"),
+ PINCTRL_PIN(201, "JTAG_TCK"),
+ PINCTRL_PIN(202, "DBG_PMODE"),
+ PINCTRL_PIN(203, "JTAG_TRST_B"),
+ /* GPP_B */
+ PINCTRL_PIN(204, "ADM_VID_0"),
+ PINCTRL_PIN(205, "ADM_VID_1"),
+ PINCTRL_PIN(206, "GPP_B_2"),
+ PINCTRL_PIN(207, "GPP_B_3"),
+ PINCTRL_PIN(208, "GPP_B_4"),
+ PINCTRL_PIN(209, "GPP_B_5"),
+ PINCTRL_PIN(210, "GPP_B_6"),
+ PINCTRL_PIN(211, "GPP_B_7"),
+ PINCTRL_PIN(212, "GPP_B_8"),
+ PINCTRL_PIN(213, "GPP_B_9"),
+ PINCTRL_PIN(214, "GPP_B_10"),
+ PINCTRL_PIN(215, "GPP_B_11"),
+ PINCTRL_PIN(216, "SLP_S0_B"),
+ PINCTRL_PIN(217, "PLTRST_B"),
+ PINCTRL_PIN(218, "GPP_B_14"),
+ PINCTRL_PIN(219, "GPP_B_15"),
+ PINCTRL_PIN(220, "GPP_B_16"),
+ PINCTRL_PIN(221, "GPP_B_17"),
+ PINCTRL_PIN(222, "GPP_B_18"),
+ PINCTRL_PIN(223, "GPP_B_19"),
+ PINCTRL_PIN(224, "GPP_B_20"),
+ PINCTRL_PIN(225, "GPP_B_21"),
+ PINCTRL_PIN(226, "GPP_B_22"),
+ PINCTRL_PIN(227, "GPP_B_23"),
+ PINCTRL_PIN(228, "ISH_I3C0_CLK_LOOPBK"),
+ /* GPP_D */
+ PINCTRL_PIN(229, "GPP_D_0"),
+ PINCTRL_PIN(230, "GPP_D_1"),
+ PINCTRL_PIN(231, "GPP_D_2"),
+ PINCTRL_PIN(232, "GPP_D_3"),
+ PINCTRL_PIN(233, "GPP_D_4"),
+ PINCTRL_PIN(234, "GPP_D_5"),
+ PINCTRL_PIN(235, "GPP_D_6"),
+ PINCTRL_PIN(236, "GPP_D_7"),
+ PINCTRL_PIN(237, "GPP_D_8"),
+ PINCTRL_PIN(238, "GPP_D_9"),
+ PINCTRL_PIN(239, "HDA_BCLK"),
+ PINCTRL_PIN(240, "HDA_SYNC"),
+ PINCTRL_PIN(241, "HDA_SDO"),
+ PINCTRL_PIN(242, "HDA_SDI_0"),
+ PINCTRL_PIN(243, "GPP_D_14"),
+ PINCTRL_PIN(244, "GPP_D_15"),
+ PINCTRL_PIN(245, "GPP_D_16"),
+ PINCTRL_PIN(246, "HDA_RST_B"),
+ PINCTRL_PIN(247, "GPP_D_18"),
+ PINCTRL_PIN(248, "GPP_D_19"),
+ PINCTRL_PIN(249, "GPP_D_20"),
+ PINCTRL_PIN(250, "UFS_REFCLK"),
+ PINCTRL_PIN(251, "BPKI3C_SDA"),
+ PINCTRL_PIN(252, "BPKI3C_SCL"),
+ PINCTRL_PIN(253, "BOOTHALT_B"),
+ /* vGPIO */
+ PINCTRL_PIN(254, "CNV_BTEN"),
+ PINCTRL_PIN(255, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(256, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(257, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(258, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(259, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(260, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(261, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(262, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(263, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(264, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(265, "vUART0_TXD"),
+ PINCTRL_PIN(266, "vUART0_RXD"),
+ PINCTRL_PIN(267, "vUART0_CTS_B"),
+ PINCTRL_PIN(268, "vUART0_RTS_B"),
+ PINCTRL_PIN(269, "vISH_UART0_TXD"),
+ PINCTRL_PIN(270, "vISH_UART0_RXD"),
+ PINCTRL_PIN(271, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(272, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(273, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(274, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(275, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(276, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(277, "vI2S2_SCLK"),
+ PINCTRL_PIN(278, "vI2S2_SFRM"),
+ PINCTRL_PIN(279, "vI2S2_TXD"),
+ PINCTRL_PIN(280, "vI2S2_RXD"),
+ PINCTRL_PIN(281, "vCNV_BT_I2S_BCLK_2"),
+ PINCTRL_PIN(282, "vCNV_BT_I2S_WS_SYNC_2"),
+ PINCTRL_PIN(283, "vCNV_BT_I2S_SDO_2"),
+ PINCTRL_PIN(284, "vCNV_BT_I2S_SDI_2"),
+ PINCTRL_PIN(285, "vI2S2_SCLK_2"),
+ PINCTRL_PIN(286, "vI2S2_SFRM_2"),
+ PINCTRL_PIN(287, "vI2S2_TXD_2"),
+ PINCTRL_PIN(288, "vI2S2_RXD_2"),
+};
+
+static const struct intel_padgroup mtlp_community0_gpps[] = {
+ MTL_GPP(0, 0, 4, 0), /* CPU */
+ MTL_GPP(1, 5, 28, 32), /* GPP_V */
+ MTL_GPP(2, 29, 52, 64), /* GPP_C */
+};
+
+static const struct intel_padgroup mtlp_community1_gpps[] = {
+ MTL_GPP(0, 53, 77, 96), /* GPP_A */
+ MTL_GPP(1, 78, 102, 128), /* GPP_E */
+};
+
+static const struct intel_padgroup mtlp_community3_gpps[] = {
+ MTL_GPP(0, 103, 128, 160), /* GPP_H */
+ MTL_GPP(1, 129, 154, 192), /* GPP_F */
+ MTL_GPP(2, 155, 169, 224), /* SPI0 */
+ MTL_GPP(3, 170, 183, 256), /* vGPIO_3 */
+};
+
+static const struct intel_padgroup mtlp_community4_gpps[] = {
+ MTL_GPP(0, 184, 191, 288), /* GPP_S */
+ MTL_GPP(1, 192, 203, 320), /* JTAG */
+};
+
+static const struct intel_padgroup mtlp_community5_gpps[] = {
+ MTL_GPP(0, 204, 228, 352), /* GPP_B */
+ MTL_GPP(1, 229, 253, 384), /* GPP_D */
+ MTL_GPP(2, 254, 285, 416), /* vGPIO_0 */
+ MTL_GPP(3, 286, 288, 448), /* vGPIO_1 */
+};
+
+static const struct intel_community mtlp_communities[] = {
+ MTL_COMMUNITY(0, 0, 52, mtlp_community0_gpps),
+ MTL_COMMUNITY(1, 53, 102, mtlp_community1_gpps),
+ MTL_COMMUNITY(2, 103, 183, mtlp_community3_gpps),
+ MTL_COMMUNITY(3, 184, 203, mtlp_community4_gpps),
+ MTL_COMMUNITY(4, 204, 288, mtlp_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data mtlp_soc_data = {
+ .pins = mtlp_pins,
+ .npins = ARRAY_SIZE(mtlp_pins),
+ .communities = mtlp_communities,
+ .ncommunities = ARRAY_SIZE(mtlp_communities),
+};
+
+static const struct acpi_device_id mtl_pinctrl_acpi_match[] = {
+ { "INTC1083", (kernel_ulong_t)&mtlp_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, mtl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(mtl_pinctrl_pm_ops);
+
+static struct platform_driver mtl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_hid,
+ .driver = {
+ .name = "meteorlake-pinctrl",
+ .acpi_match_table = mtl_pinctrl_acpi_match,
+ .pm = &mtl_pinctrl_pm_ops,
+ },
+};
+module_platform_driver(mtl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Meteor Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
index acccde9262ba..78c02b7c81f0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -1107,24 +1107,10 @@ static const struct mtk_pin_field_calc mt8192_pin_pupd_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0060, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0060, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0060, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 31, 1),
PIN_FIELD_BASE(152, 152, 7, 0x0090, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x0090, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x0090, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x0090, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 31, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 31, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0030, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0030, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0030, 0x10, 4, 1),
@@ -1137,12 +1123,6 @@ static const struct mtk_pin_field_calc mt8192_pin_pupd_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0030, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0030, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0030, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 31, 1),
};
static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
@@ -1164,24 +1144,10 @@ static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0080, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0080, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0080, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 0, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 12, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 10, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 22, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 8, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 20, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 6, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 18, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 4, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 16, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 2, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 14, 1),
PIN_FIELD_BASE(152, 152, 7, 0x00c0, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x00c0, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x00c0, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x00c0, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 0, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 2, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0040, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0040, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0040, 0x10, 4, 1),
@@ -1194,12 +1160,6 @@ static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0040, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0040, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0040, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 2, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 6, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 0, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 2, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 0, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 4, 1),
};
static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
@@ -1221,24 +1181,10 @@ static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0090, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0090, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0090, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 1, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 13, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 11, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 23, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 9, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 21, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 7, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 19, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 5, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 17, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 3, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 15, 1),
PIN_FIELD_BASE(152, 152, 7, 0x00d0, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x00d0, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x00d0, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x00d0, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 1, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 3, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0050, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0050, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0050, 0x10, 4, 1),
@@ -1251,83 +1197,169 @@ static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0050, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0050, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0050, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 3, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 7, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 1, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 3, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 1, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 5, 1),
};
-static const struct mtk_pin_field_calc mt8192_pin_e1e0en_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 0, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 18, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 15, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 3, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 12, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 0, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 9, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 27, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 6, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 24, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 3, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 21, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 0, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 3, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 3, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 9, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 0, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 3, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 0, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 6, 1),
-};
+static const struct mtk_pin_field_calc mt8192_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(89, 89, 2, 0x0040, 0x10, 0, 5),
+ PIN_FIELD_BASE(90, 90, 2, 0x0040, 0x10, 5, 5),
-static const struct mtk_pin_field_calc mt8192_pin_e0_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 1, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 19, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 16, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 4, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 13, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 1, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 10, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 28, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 7, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 25, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 4, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 22, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 1, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 4, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 4, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 10, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 1, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 4, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 1, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 7, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 18, 3),
+ PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 15, 3),
+ PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 3, 3),
+ PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 0, 3),
+ PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 27, 3),
+ PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 24, 3),
+ PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 21, 3),
+ PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 6, 3),
};
-static const struct mtk_pin_field_calc mt8192_pin_e1_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 2, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 20, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 17, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 5, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 14, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 2, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 11, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 29, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 8, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 26, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 5, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 23, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 2, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 5, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 5, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 11, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 2, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 5, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 2, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 8, 1),
+static const struct mtk_pin_field_calc mt8192_pin_rsel_range[] = {
+ PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 0, 2),
+ PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 12, 2),
+ PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 10, 2),
+ PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 22, 2),
+ PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 8, 2),
+ PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 20, 2),
+ PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 6, 2),
+ PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 18, 2),
+ PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 4, 2),
+ PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 2, 2),
+ PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 14, 2),
+ PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 0, 2),
+ PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 2, 2),
+ PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 2, 2),
+ PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 6, 2),
+ PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 0, 2),
+ PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 2, 2),
+ PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 0, 2),
+ PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 4, 2),
};
+static const unsigned int mt8192_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE,/*0*/ MTK_PULL_PU_PD_TYPE,/*1*/
+ MTK_PULL_PU_PD_TYPE,/*2*/ MTK_PULL_PU_PD_TYPE,/*3*/
+ MTK_PULL_PU_PD_TYPE,/*4*/ MTK_PULL_PU_PD_TYPE,/*5*/
+ MTK_PULL_PU_PD_TYPE,/*6*/ MTK_PULL_PU_PD_TYPE,/*7*/
+ MTK_PULL_PU_PD_TYPE,/*8*/ MTK_PULL_PU_PD_TYPE,/*9*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*10*/ MTK_PULL_PUPD_R1R0_TYPE,/*11*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*12*/ MTK_PULL_PUPD_R1R0_TYPE,/*13*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*14*/ MTK_PULL_PUPD_R1R0_TYPE,/*15*/
+ MTK_PULL_PU_PD_TYPE,/*16*/ MTK_PULL_PU_PD_TYPE,/*17*/
+ MTK_PULL_PU_PD_TYPE,/*18*/ MTK_PULL_PU_PD_TYPE,/*19*/
+ MTK_PULL_PU_PD_TYPE,/*20*/ MTK_PULL_PU_PD_TYPE,/*21*/
+ MTK_PULL_PU_PD_TYPE,/*22*/ MTK_PULL_PU_PD_TYPE,/*23*/
+ MTK_PULL_PU_PD_TYPE,/*24*/ MTK_PULL_PU_PD_TYPE,/*25*/
+ MTK_PULL_PU_PD_TYPE,/*26*/ MTK_PULL_PU_PD_TYPE,/*27*/
+ MTK_PULL_PU_PD_TYPE,/*28*/ MTK_PULL_PU_PD_TYPE,/*29*/
+ MTK_PULL_PU_PD_TYPE,/*30*/ MTK_PULL_PU_PD_TYPE,/*31*/
+ MTK_PULL_PU_PD_TYPE,/*32*/ MTK_PULL_PU_PD_TYPE,/*33*/
+ MTK_PULL_PU_PD_TYPE,/*34*/ MTK_PULL_PU_PD_TYPE,/*35*/
+ MTK_PULL_PU_PD_TYPE,/*36*/ MTK_PULL_PU_PD_TYPE,/*37*/
+ MTK_PULL_PU_PD_TYPE,/*38*/ MTK_PULL_PU_PD_TYPE,/*39*/
+ MTK_PULL_PU_PD_TYPE,/*40*/ MTK_PULL_PU_PD_TYPE,/*41*/
+ MTK_PULL_PU_PD_TYPE,/*42*/ MTK_PULL_PU_PD_TYPE,/*43*/
+ MTK_PULL_PU_PD_TYPE,/*44*/ MTK_PULL_PUPD_R1R0_TYPE,/*45*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*46*/ MTK_PULL_PUPD_R1R0_TYPE,/*47*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*48*/ MTK_PULL_PUPD_R1R0_TYPE,/*49*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*50*/ MTK_PULL_PUPD_R1R0_TYPE,/*51*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*52*/ MTK_PULL_PUPD_R1R0_TYPE,/*53*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*54*/ MTK_PULL_PUPD_R1R0_TYPE,/*55*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*56*/ MTK_PULL_PU_PD_TYPE,/*57*/
+ MTK_PULL_PU_PD_TYPE,/*58*/ MTK_PULL_PU_PD_TYPE,/*59*/
+ MTK_PULL_PU_PD_TYPE,/*60*/ MTK_PULL_PU_PD_TYPE,/*61*/
+ MTK_PULL_PU_PD_TYPE,/*62*/ MTK_PULL_PU_PD_TYPE,/*63*/
+ MTK_PULL_PU_PD_TYPE,/*64*/ MTK_PULL_PU_PD_TYPE,/*65*/
+ MTK_PULL_PU_PD_TYPE,/*66*/ MTK_PULL_PU_PD_TYPE,/*67*/
+ MTK_PULL_PU_PD_TYPE,/*68*/ MTK_PULL_PU_PD_TYPE,/*69*/
+ MTK_PULL_PU_PD_TYPE,/*70*/ MTK_PULL_PU_PD_TYPE,/*71*/
+ MTK_PULL_PU_PD_TYPE,/*72*/ MTK_PULL_PU_PD_TYPE,/*73*/
+ MTK_PULL_PU_PD_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE,/*75*/
+ MTK_PULL_PU_PD_TYPE,/*76*/ MTK_PULL_PU_PD_TYPE,/*77*/
+ MTK_PULL_PU_PD_TYPE,/*78*/ MTK_PULL_PU_PD_TYPE,/*79*/
+ MTK_PULL_PU_PD_TYPE,/*80*/ MTK_PULL_PU_PD_TYPE,/*81*/
+ MTK_PULL_PU_PD_TYPE,/*82*/ MTK_PULL_PU_PD_TYPE,/*83*/
+ MTK_PULL_PU_PD_TYPE,/*84*/ MTK_PULL_PU_PD_TYPE,/*85*/
+ MTK_PULL_PU_PD_TYPE,/*86*/ MTK_PULL_PU_PD_TYPE,/*87*/
+ MTK_PULL_PU_PD_TYPE,/*88*/ MTK_PULL_PU_PD_TYPE,/*89*/
+ MTK_PULL_PU_PD_TYPE,/*90*/ MTK_PULL_PU_PD_TYPE,/*91*/
+ MTK_PULL_PU_PD_TYPE,/*92*/ MTK_PULL_PU_PD_TYPE,/*93*/
+ MTK_PULL_PU_PD_TYPE,/*94*/ MTK_PULL_PU_PD_TYPE,/*95*/
+ MTK_PULL_PU_PD_TYPE,/*96*/ MTK_PULL_PU_PD_TYPE,/*97*/
+ MTK_PULL_PU_PD_TYPE,/*98*/ MTK_PULL_PU_PD_TYPE,/*99*/
+ MTK_PULL_PU_PD_TYPE,/*100*/ MTK_PULL_PU_PD_TYPE,/*101*/
+ MTK_PULL_PU_PD_TYPE,/*102*/ MTK_PULL_PU_PD_TYPE,/*103*/
+ MTK_PULL_PU_PD_TYPE,/*104*/ MTK_PULL_PU_PD_TYPE,/*105*/
+ MTK_PULL_PU_PD_TYPE,/*106*/ MTK_PULL_PU_PD_TYPE,/*107*/
+ MTK_PULL_PU_PD_TYPE,/*108*/ MTK_PULL_PU_PD_TYPE,/*109*/
+ MTK_PULL_PU_PD_TYPE,/*110*/ MTK_PULL_PU_PD_TYPE,/*111*/
+ MTK_PULL_PU_PD_TYPE,/*112*/ MTK_PULL_PU_PD_TYPE,/*113*/
+ MTK_PULL_PU_PD_TYPE,/*114*/ MTK_PULL_PU_PD_TYPE,/*115*/
+ MTK_PULL_PU_PD_TYPE,/*116*/ MTK_PULL_PU_PD_TYPE,/*117*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*118*/ MTK_PULL_PU_PD_RSEL_TYPE,/*119*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*120*/ MTK_PULL_PU_PD_RSEL_TYPE,/*121*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*122*/ MTK_PULL_PU_PD_RSEL_TYPE,/*123*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*124*/ MTK_PULL_PU_PD_RSEL_TYPE,/*125*/
+ MTK_PULL_PU_PD_TYPE,/*126*/ MTK_PULL_PU_PD_TYPE,/*127*/
+ MTK_PULL_PU_PD_TYPE,/*128*/ MTK_PULL_PU_PD_TYPE,/*129*/
+ MTK_PULL_PU_PD_TYPE,/*130*/ MTK_PULL_PU_PD_TYPE,/*131*/
+ MTK_PULL_PU_PD_TYPE,/*132*/ MTK_PULL_PU_PD_TYPE,/*133*/
+ MTK_PULL_PU_PD_TYPE,/*134*/ MTK_PULL_PU_PD_TYPE,/*135*/
+ MTK_PULL_PU_PD_TYPE,/*136*/ MTK_PULL_PU_PD_TYPE,/*137*/
+ MTK_PULL_PU_PD_TYPE,/*138*/ MTK_PULL_PU_PD_RSEL_TYPE,/*139*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*140*/ MTK_PULL_PU_PD_RSEL_TYPE,/*141*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*142*/ MTK_PULL_PU_PD_TYPE,/*143*/
+ MTK_PULL_PU_PD_TYPE,/*144*/ MTK_PULL_PU_PD_TYPE,/*145*/
+ MTK_PULL_PU_PD_TYPE,/*146*/ MTK_PULL_PU_PD_TYPE,/*147*/
+ MTK_PULL_PU_PD_TYPE,/*148*/ MTK_PULL_PU_PD_TYPE,/*149*/
+ MTK_PULL_PU_PD_TYPE,/*150*/ MTK_PULL_PU_PD_TYPE,/*151*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*152*/ MTK_PULL_PUPD_R1R0_TYPE,/*153*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*154*/ MTK_PULL_PUPD_R1R0_TYPE,/*155*/
+ MTK_PULL_PU_PD_TYPE,/*156*/ MTK_PULL_PU_PD_TYPE,/*157*/
+ MTK_PULL_PU_PD_TYPE,/*158*/ MTK_PULL_PU_PD_TYPE,/*159*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*160*/ MTK_PULL_PU_PD_RSEL_TYPE,/*161*/
+ MTK_PULL_PU_PD_TYPE,/*162*/ MTK_PULL_PU_PD_TYPE,/*163*/
+ MTK_PULL_PU_PD_TYPE,/*164*/ MTK_PULL_PU_PD_TYPE,/*165*/
+ MTK_PULL_PU_PD_TYPE,/*166*/ MTK_PULL_PU_PD_TYPE,/*167*/
+ MTK_PULL_PU_PD_TYPE,/*168*/ MTK_PULL_PU_PD_TYPE,/*169*/
+ MTK_PULL_PU_PD_TYPE,/*170*/ MTK_PULL_PU_PD_TYPE,/*171*/
+ MTK_PULL_PU_PD_TYPE,/*172*/ MTK_PULL_PU_PD_TYPE,/*173*/
+ MTK_PULL_PU_PD_TYPE,/*174*/ MTK_PULL_PU_PD_TYPE,/*175*/
+ MTK_PULL_PU_PD_TYPE,/*176*/ MTK_PULL_PU_PD_TYPE,/*177*/
+ MTK_PULL_PU_PD_TYPE,/*178*/ MTK_PULL_PU_PD_TYPE,/*179*/
+ MTK_PULL_PU_PD_TYPE,/*180*/ MTK_PULL_PU_PD_TYPE,/*181*/
+ MTK_PULL_PU_PD_TYPE,/*182*/ MTK_PULL_PUPD_R1R0_TYPE,/*183*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*184*/ MTK_PULL_PUPD_R1R0_TYPE,/*185*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*186*/ MTK_PULL_PUPD_R1R0_TYPE,/*187*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*188*/ MTK_PULL_PUPD_R1R0_TYPE,/*189*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*190*/ MTK_PULL_PUPD_R1R0_TYPE,/*191*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*192*/ MTK_PULL_PUPD_R1R0_TYPE,/*193*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*194*/ MTK_PULL_PU_PD_TYPE,/*195*/
+ MTK_PULL_PU_PD_TYPE,/*196*/ MTK_PULL_PU_PD_TYPE,/*197*/
+ MTK_PULL_PU_PD_TYPE,/*198*/ MTK_PULL_PU_PD_TYPE,/*199*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*200*/ MTK_PULL_PU_PD_RSEL_TYPE,/*201*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*202*/ MTK_PULL_PU_PD_RSEL_TYPE,/*203*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*204*/ MTK_PULL_PU_PD_RSEL_TYPE,/*205*/
+ MTK_PULL_PU_PD_TYPE,/*206*/ MTK_PULL_PU_PD_TYPE,/*207*/
+ MTK_PULL_PU_PD_TYPE,/*208*/ MTK_PULL_PU_PD_TYPE,/*209*/
+ MTK_PULL_PU_PD_TYPE,/*210*/ MTK_PULL_PU_PD_TYPE,/*211*/
+ MTK_PULL_PU_PD_TYPE,/*212*/ MTK_PULL_PU_PD_TYPE,/*213*/
+ MTK_PULL_PU_PD_TYPE,/*214*/ MTK_PULL_PU_PD_TYPE,/*215*/
+ MTK_PULL_PU_PD_TYPE,/*216*/ MTK_PULL_PU_PD_TYPE,/*217*/
+ MTK_PULL_PU_PD_TYPE,/*218*/ MTK_PULL_PU_PD_TYPE,/*219*/
+};
static const char * const mt8192_pinctrl_register_base_names[] = {
"iocfg0", "iocfg_rm", "iocfg_bm", "iocfg_bl", "iocfg_br",
@@ -1355,9 +1387,8 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8192_pin_pupd_range),
[PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8192_pin_r0_range),
[PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8192_pin_r1_range),
- [PINCTRL_PIN_REG_DRV_EN] = MTK_RANGE(mt8192_pin_e1e0en_range),
- [PINCTRL_PIN_REG_DRV_E0] = MTK_RANGE(mt8192_pin_e0_range),
- [PINCTRL_PIN_REG_DRV_E1] = MTK_RANGE(mt8192_pin_e1_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8192_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt8192_pin_rsel_range),
};
static const struct mtk_pin_soc mt8192_data = {
@@ -1367,17 +1398,16 @@ static const struct mtk_pin_soc mt8192_data = {
.ngrps = ARRAY_SIZE(mtk_pins_mt8192),
.base_names = mt8192_pinctrl_register_base_names,
.nbase_names = ARRAY_SIZE(mt8192_pinctrl_register_base_names),
+ .pull_type = mt8192_pull_type,
.eint_hw = &mt8192_eint_hw,
.nfuncs = 8,
.gpio_m = 0,
.bias_set_combo = mtk_pinconf_bias_set_combo,
.bias_get_combo = mtk_pinconf_bias_get_combo,
- .drive_set = mtk_pinconf_drive_set_raw,
- .drive_get = mtk_pinconf_drive_get_raw,
- .adv_pull_get = mtk_pinconf_adv_pull_get,
- .adv_pull_set = mtk_pinconf_adv_pull_set,
- .adv_drive_get = mtk_pinconf_adv_drive_get,
- .adv_drive_set = mtk_pinconf_adv_drive_set,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
};
static const struct of_device_id mt8192_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index a1f93859e7ca..8ef0a97d2bf5 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -96,10 +96,12 @@ static struct mvebu_pinctrl_group *mvebu_pinctrl_find_group_by_name(
struct mvebu_pinctrl *pctl, const char *name)
{
unsigned n;
+
for (n = 0; n < pctl->num_groups; n++) {
if (strcmp(name, pctl->groups[n].name) == 0)
return &pctl->groups[n];
}
+
return NULL;
}
@@ -108,6 +110,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_val(
unsigned long config)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (config == grp->settings[n].val) {
if (!pctl->variant || (pctl->variant &
@@ -115,6 +118,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_val(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -123,6 +127,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_name(
const char *name)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (strcmp(name, grp->settings[n].name) == 0) {
if (!pctl->variant || (pctl->variant &
@@ -130,6 +135,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_name(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -137,6 +143,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_gpio_setting(
struct mvebu_pinctrl *pctl, struct mvebu_pinctrl_group *grp)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (grp->settings[n].flags &
(MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
@@ -145,6 +152,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_gpio_setting(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -152,10 +160,12 @@ static struct mvebu_pinctrl_function *mvebu_pinctrl_find_function_by_name(
struct mvebu_pinctrl *pctl, const char *name)
{
unsigned n;
+
for (n = 0; n < pctl->num_functions; n++) {
if (strcmp(name, pctl->functions[n].name) == 0)
return &pctl->functions[n];
}
+
return NULL;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 640e50d94f27..f5014d09d81a 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1421,8 +1421,10 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
has_config = nmk_pinctrl_dt_get_config(np, &configs);
np_config = of_parse_phandle(np, "ste,config", 0);
- if (np_config)
+ if (np_config) {
has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
+ of_node_put(np_config);
+ }
if (has_config) {
const char *gpio_name;
const char *pin;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 0645c2c24f50..4691a33bc374 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -6,8 +6,6 @@
* Authors: Ken Xue <Ken.Xue@amd.com>
* Wu, Jeff <Jeff.Wu@amd.com>
*
- * Contact Information: Nehal Shah <Nehal-bakulchandra.Shah@amd.com>
- * Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
*/
#include <linux/err.h>
@@ -31,6 +29,7 @@
#include <linux/bitops.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
#include "core.h"
#include "pinctrl-utils.h"
@@ -203,8 +202,6 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
bool tmr_out_unit;
- unsigned int time;
- unsigned int unit;
bool tmr_large;
char *level_trig;
@@ -218,13 +215,13 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *pull_up_sel;
char *pull_up_enable;
char *pull_down_enable;
- char *output_value;
- char *output_enable;
+ char *orientation;
char debounce_value[40];
char *debounce_enable;
for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
- seq_printf(s, "GPIO bank%d\t", bank);
+ unsigned int time = 0;
+ unsigned int unit = 0;
switch (bank) {
case 0:
@@ -247,8 +244,9 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
/* Illegal bank number, ignore */
continue;
}
+ seq_printf(s, "GPIO bank%d\n", bank);
for (; i < pin_num; i++) {
- seq_printf(s, "pin%d\t", i);
+ seq_printf(s, "📌%d\t", i);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + i * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -256,84 +254,91 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
u8 level = (pin_reg >> ACTIVE_LEVEL_OFF) &
ACTIVE_LEVEL_MASK;
- interrupt_enable = "interrupt is enabled|";
+ interrupt_enable = "+";
if (level == ACTIVE_LEVEL_HIGH)
- active_level = "Active high|";
+ active_level = "↑";
else if (level == ACTIVE_LEVEL_LOW)
- active_level = "Active low|";
+ active_level = "↓";
else if (!(pin_reg & BIT(LEVEL_TRIG_OFF)) &&
level == ACTIVE_LEVEL_BOTH)
- active_level = "Active on both|";
+ active_level = "b";
else
- active_level = "Unknown Active level|";
+ active_level = "?";
if (pin_reg & BIT(LEVEL_TRIG_OFF))
- level_trig = "Level trigger|";
+ level_trig = "level";
else
- level_trig = "Edge trigger|";
+ level_trig = " edge";
} else {
- interrupt_enable =
- "interrupt is disabled|";
- active_level = " ";
- level_trig = " ";
+ interrupt_enable = "∅";
+ active_level = "∅";
+ level_trig = " ∅";
}
if (pin_reg & BIT(INTERRUPT_MASK_OFF))
- interrupt_mask =
- "interrupt is unmasked|";
+ interrupt_mask = "-";
else
- interrupt_mask =
- "interrupt is masked|";
+ interrupt_mask = "+";
+ seq_printf(s, "int %s (🎭 %s)| active-%s| %s-🔫| ",
+ interrupt_enable,
+ interrupt_mask,
+ active_level,
+ level_trig);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S0I3))
- wake_cntrl0 = "enable wakeup in S0i3 state|";
+ wake_cntrl0 = "+";
else
- wake_cntrl0 = "disable wakeup in S0i3 state|";
+ wake_cntrl0 = "∅";
+ seq_printf(s, "S0i3 🌅 %s| ", wake_cntrl0);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S3))
- wake_cntrl1 = "enable wakeup in S3 state|";
+ wake_cntrl1 = "+";
else
- wake_cntrl1 = "disable wakeup in S3 state|";
+ wake_cntrl1 = "∅";
+ seq_printf(s, "S3 🌅 %s| ", wake_cntrl1);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S4))
- wake_cntrl2 = "enable wakeup in S4/S5 state|";
+ wake_cntrl2 = "+";
else
- wake_cntrl2 = "disable wakeup in S4/S5 state|";
+ wake_cntrl2 = "∅";
+ seq_printf(s, "S4/S5 🌅 %s| ", wake_cntrl2);
if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
- pull_up_enable = "pull-up is enabled|";
+ pull_up_enable = "+";
if (pin_reg & BIT(PULL_UP_SEL_OFF))
- pull_up_sel = "8k pull-up|";
+ pull_up_sel = "8k";
else
- pull_up_sel = "4k pull-up|";
+ pull_up_sel = "4k";
} else {
- pull_up_enable = "pull-up is disabled|";
- pull_up_sel = " ";
+ pull_up_enable = "∅";
+ pull_up_sel = " ";
}
+ seq_printf(s, "pull-↑ %s (%s)| ",
+ pull_up_enable,
+ pull_up_sel);
if (pin_reg & BIT(PULL_DOWN_ENABLE_OFF))
- pull_down_enable = "pull-down is enabled|";
+ pull_down_enable = "+";
else
- pull_down_enable = "Pull-down is disabled|";
+ pull_down_enable = "∅";
+ seq_printf(s, "pull-↓ %s| ", pull_down_enable);
if (pin_reg & BIT(OUTPUT_ENABLE_OFF)) {
- pin_sts = " ";
- output_enable = "output is enabled|";
+ pin_sts = "output";
if (pin_reg & BIT(OUTPUT_VALUE_OFF))
- output_value = "output is high|";
+ orientation = "↑";
else
- output_value = "output is low|";
+ orientation = "↓";
} else {
- output_enable = "output is disabled|";
- output_value = " ";
-
+ pin_sts = "input ";
if (pin_reg & BIT(PIN_STS_OFF))
- pin_sts = "input is high|";
+ orientation = "↑";
else
- pin_sts = "input is low|";
+ orientation = "↓";
}
+ seq_printf(s, "%s %s| ", pin_sts, orientation);
db_cntrl = (DB_CNTRl_MASK << DB_CNTRL_OFF) & pin_reg;
if (db_cntrl) {
@@ -352,27 +357,18 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
unit = 61;
}
if ((DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF) == db_cntrl)
- debounce_enable = "debouncing filter (high and low) enabled|";
+ debounce_enable = "b +";
else if ((DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF) == db_cntrl)
- debounce_enable = "debouncing filter (low) enabled|";
+ debounce_enable = "↓ +";
else
- debounce_enable = "debouncing filter (high) enabled|";
+ debounce_enable = "↑ +";
- snprintf(debounce_value, sizeof(debounce_value),
- "debouncing timeout is %u (us)|", time * unit);
} else {
- debounce_enable = "debouncing filter disabled|";
- snprintf(debounce_value, sizeof(debounce_value), " ");
+ debounce_enable = " ∅";
}
-
- seq_printf(s, "%s %s %s %s %s %s\n"
- " %s %s %s %s %s %s %s %s %s 0x%x\n",
- level_trig, active_level, interrupt_enable,
- interrupt_mask, wake_cntrl0, wake_cntrl1,
- wake_cntrl2, pin_sts, pull_up_sel,
- pull_up_enable, pull_down_enable,
- output_value, output_enable,
- debounce_enable, debounce_value, pin_reg);
+ snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
+ seq_printf(s, "debounce %s (⏰ %sus)| ", debounce_enable, debounce_value);
+ seq_printf(s, " 0x%x\n", pin_reg);
}
}
}
@@ -917,6 +913,7 @@ static int amd_gpio_suspend(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -925,7 +922,9 @@ static int amd_gpio_suspend(struct device *dev)
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -935,6 +934,7 @@ static int amd_gpio_resume(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -943,7 +943,10 @@ static int amd_gpio_resume(struct device *dev)
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
+ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -955,14 +958,115 @@ static const struct dev_pm_ops amd_gpio_pm_ops = {
};
#endif
+static int amd_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(pmx_functions);
+}
+
+static const char *amd_get_fname(struct pinctrl_dev *pctrldev, unsigned int selector)
+{
+ return pmx_functions[selector].name;
+}
+
+static int amd_get_groups(struct pinctrl_dev *pctrldev, unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctrldev);
+
+ if (!gpio_dev->iomux_base) {
+ dev_err(&gpio_dev->pdev->dev, "iomux function %d group not supported\n", selector);
+ return -EINVAL;
+ }
+
+ *groups = pmx_functions[selector].groups;
+ *num_groups = pmx_functions[selector].ngroups;
+ return 0;
+}
+
+static int amd_set_mux(struct pinctrl_dev *pctrldev, unsigned int function, unsigned int group)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctrldev);
+ struct device *dev = &gpio_dev->pdev->dev;
+ struct pin_desc *pd;
+ int ind, index;
+
+ if (!gpio_dev->iomux_base)
+ return -EINVAL;
+
+ for (index = 0; index < NSELECTS; index++) {
+ if (strcmp(gpio_dev->groups[group].name, pmx_functions[function].groups[index]))
+ continue;
+
+ if (readb(gpio_dev->iomux_base + pmx_functions[function].index) ==
+ FUNCTION_INVALID) {
+ dev_err(dev, "IOMUX_GPIO 0x%x not present or supported\n",
+ pmx_functions[function].index);
+ return -EINVAL;
+ }
+
+ writeb(index, gpio_dev->iomux_base + pmx_functions[function].index);
+
+ if (index != (readb(gpio_dev->iomux_base + pmx_functions[function].index) &
+ FUNCTION_MASK)) {
+ dev_err(dev, "IOMUX_GPIO 0x%x not present or supported\n",
+ pmx_functions[function].index);
+ return -EINVAL;
+ }
+
+ for (ind = 0; ind < gpio_dev->groups[group].npins; ind++) {
+ if (strncmp(gpio_dev->groups[group].name, "IMX_F", strlen("IMX_F")))
+ continue;
+
+ pd = pin_desc_get(gpio_dev->pctrl, gpio_dev->groups[group].pins[ind]);
+ pd->mux_owner = gpio_dev->groups[group].name;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops amd_pmxops = {
+ .get_functions_count = amd_get_functions_count,
+ .get_function_name = amd_get_fname,
+ .get_function_groups = amd_get_groups,
+ .set_mux = amd_set_mux,
+};
+
static struct pinctrl_desc amd_pinctrl_desc = {
.pins = kerncz_pins,
.npins = ARRAY_SIZE(kerncz_pins),
.pctlops = &amd_pinctrl_ops,
+ .pmxops = &amd_pmxops,
.confops = &amd_pinconf_ops,
.owner = THIS_MODULE,
};
+static void amd_get_iomux_res(struct amd_gpio *gpio_dev)
+{
+ struct pinctrl_desc *desc = &amd_pinctrl_desc;
+ struct device *dev = &gpio_dev->pdev->dev;
+ int index;
+
+ index = device_property_match_string(dev, "pinctrl-resource-names", "iomux");
+ if (index < 0) {
+ dev_warn(dev, "failed to get iomux index\n");
+ goto out_no_pinmux;
+ }
+
+ gpio_dev->iomux_base = devm_platform_ioremap_resource(gpio_dev->pdev, index);
+ if (IS_ERR(gpio_dev->iomux_base)) {
+ dev_warn(dev, "Failed to get iomux %d io resource\n", index);
+ goto out_no_pinmux;
+ }
+
+ return;
+
+out_no_pinmux:
+ desc->pmxops = NULL;
+}
+
static int amd_gpio_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -977,17 +1081,12 @@ static int amd_gpio_probe(struct platform_device *pdev)
raw_spin_lock_init(&gpio_dev->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
+ gpio_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(gpio_dev->base)) {
dev_err(&pdev->dev, "Failed to get gpio io resource.\n");
- return -EINVAL;
+ return PTR_ERR(gpio_dev->base);
}
- gpio_dev->base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!gpio_dev->base)
- return -ENOMEM;
-
gpio_dev->irq = platform_get_irq(pdev, 0);
if (gpio_dev->irq < 0)
return gpio_dev->irq;
@@ -1020,6 +1119,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->ngroups = ARRAY_SIZE(kerncz_groups);
amd_pinctrl_desc.name = dev_name(&pdev->dev);
+ amd_get_iomux_res(gpio_dev);
gpio_dev->pctrl = devm_pinctrl_register(&pdev->dev, &amd_pinctrl_desc,
gpio_dev);
if (IS_ERR(gpio_dev->pctrl)) {
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 1d4317073654..c8635998465d 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -74,23 +74,24 @@
#define CLR_INTR_STAT 0x1UL
-struct amd_pingroup {
- const char *name;
- const unsigned *pins;
- unsigned npins;
-};
+#define NSELECTS 0x4
+
+#define FUNCTION_MASK GENMASK(1, 0)
+#define FUNCTION_INVALID GENMASK(7, 0)
struct amd_function {
const char *name;
- const char * const *groups;
+ const char * const groups[NSELECTS];
unsigned ngroups;
+ int index;
};
struct amd_gpio {
raw_spinlock_t lock;
void __iomem *base;
+ void __iomem *iomux_base;
- const struct amd_pingroup *groups;
+ const struct pingroup *groups;
u32 ngroups;
struct pinctrl_dev *pctrl;
struct gpio_chip gc;
@@ -288,45 +289,1332 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(183, "GPIO_183"),
};
-static const unsigned i2c0_pins[] = {145, 146};
-static const unsigned i2c1_pins[] = {147, 148};
-static const unsigned i2c2_pins[] = {113, 114};
-static const unsigned i2c3_pins[] = {19, 20};
+#define AMD_PINS(...) (const unsigned int []){__VA_ARGS__}
+
+enum amd_functions {
+ IMX_F0_GPIO0,
+ IMX_F1_GPIO0,
+ IMX_F2_GPIO0,
+ IMX_F3_GPIO0,
+ IMX_F0_GPIO1,
+ IMX_F1_GPIO1,
+ IMX_F2_GPIO1,
+ IMX_F3_GPIO1,
+ IMX_F0_GPIO2,
+ IMX_F1_GPIO2,
+ IMX_F2_GPIO2,
+ IMX_F3_GPIO2,
+ IMX_F0_GPIO3,
+ IMX_F1_GPIO3,
+ IMX_F2_GPIO3,
+ IMX_F3_GPIO3,
+ IMX_F0_GPIO4,
+ IMX_F1_GPIO4,
+ IMX_F2_GPIO4,
+ IMX_F3_GPIO4,
+ IMX_F0_GPIO5,
+ IMX_F1_GPIO5,
+ IMX_F2_GPIO5,
+ IMX_F3_GPIO5,
+ IMX_F0_GPIO6,
+ IMX_F1_GPIO6,
+ IMX_F2_GPIO6,
+ IMX_F3_GPIO6,
+ IMX_F0_GPIO7,
+ IMX_F1_GPIO7,
+ IMX_F2_GPIO7,
+ IMX_F3_GPIO7,
+ IMX_F0_GPIO8,
+ IMX_F1_GPIO8,
+ IMX_F2_GPIO8,
+ IMX_F3_GPIO8,
+ IMX_F0_GPIO9,
+ IMX_F1_GPIO9,
+ IMX_F2_GPIO9,
+ IMX_F3_GPIO9,
+ IMX_F0_GPIO10,
+ IMX_F1_GPIO10,
+ IMX_F2_GPIO10,
+ IMX_F3_GPIO10,
+ IMX_F0_GPIO11,
+ IMX_F1_GPIO11,
+ IMX_F2_GPIO11,
+ IMX_F3_GPIO11,
+ IMX_F0_GPIO12,
+ IMX_F1_GPIO12,
+ IMX_F2_GPIO12,
+ IMX_F3_GPIO12,
+ IMX_F0_GPIO13,
+ IMX_F1_GPIO13,
+ IMX_F2_GPIO13,
+ IMX_F3_GPIO13,
+ IMX_F0_GPIO14,
+ IMX_F1_GPIO14,
+ IMX_F2_GPIO14,
+ IMX_F3_GPIO14,
+ IMX_F0_GPIO15,
+ IMX_F1_GPIO15,
+ IMX_F2_GPIO15,
+ IMX_F3_GPIO15,
+ IMX_F0_GPIO16,
+ IMX_F1_GPIO16,
+ IMX_F2_GPIO16,
+ IMX_F3_GPIO16,
+ IMX_F0_GPIO17,
+ IMX_F1_GPIO17,
+ IMX_F2_GPIO17,
+ IMX_F3_GPIO17,
+ IMX_F0_GPIO18,
+ IMX_F1_GPIO18,
+ IMX_F2_GPIO18,
+ IMX_F3_GPIO18,
+ IMX_F0_GPIO19,
+ IMX_F1_GPIO19,
+ IMX_F2_GPIO19,
+ IMX_F3_GPIO19,
+ IMX_F0_GPIO20,
+ IMX_F1_GPIO20,
+ IMX_F2_GPIO20,
+ IMX_F3_GPIO20,
+ IMX_F0_GPIO21,
+ IMX_F1_GPIO21,
+ IMX_F2_GPIO21,
+ IMX_F3_GPIO21,
+ IMX_F0_GPIO22,
+ IMX_F1_GPIO22,
+ IMX_F2_GPIO22,
+ IMX_F3_GPIO22,
+ IMX_F0_GPIO23,
+ IMX_F1_GPIO23,
+ IMX_F2_GPIO23,
+ IMX_F3_GPIO23,
+ IMX_F0_GPIO24,
+ IMX_F1_GPIO24,
+ IMX_F2_GPIO24,
+ IMX_F3_GPIO24,
+ IMX_F0_GPIO25,
+ IMX_F1_GPIO25,
+ IMX_F2_GPIO25,
+ IMX_F3_GPIO25,
+ IMX_F0_GPIO26,
+ IMX_F1_GPIO26,
+ IMX_F2_GPIO26,
+ IMX_F3_GPIO26,
+ IMX_F0_GPIO27,
+ IMX_F1_GPIO27,
+ IMX_F2_GPIO27,
+ IMX_F3_GPIO27,
+ IMX_F0_GPIO28,
+ IMX_F1_GPIO28,
+ IMX_F2_GPIO28,
+ IMX_F3_GPIO28,
+ IMX_F0_GPIO29,
+ IMX_F1_GPIO29,
+ IMX_F2_GPIO29,
+ IMX_F3_GPIO29,
+ IMX_F0_GPIO30,
+ IMX_F1_GPIO30,
+ IMX_F2_GPIO30,
+ IMX_F3_GPIO30,
+ IMX_F0_GPIO31,
+ IMX_F1_GPIO31,
+ IMX_F2_GPIO31,
+ IMX_F3_GPIO31,
+ IMX_F0_GPIO32,
+ IMX_F1_GPIO32,
+ IMX_F2_GPIO32,
+ IMX_F3_GPIO32,
+ IMX_F0_GPIO33,
+ IMX_F1_GPIO33,
+ IMX_F2_GPIO33,
+ IMX_F3_GPIO33,
+ IMX_F0_GPIO34,
+ IMX_F1_GPIO34,
+ IMX_F2_GPIO34,
+ IMX_F3_GPIO34,
+ IMX_F0_GPIO35,
+ IMX_F1_GPIO35,
+ IMX_F2_GPIO35,
+ IMX_F3_GPIO35,
+ IMX_F0_GPIO36,
+ IMX_F1_GPIO36,
+ IMX_F2_GPIO36,
+ IMX_F3_GPIO36,
+ IMX_F0_GPIO37,
+ IMX_F1_GPIO37,
+ IMX_F2_GPIO37,
+ IMX_F3_GPIO37,
+ IMX_F0_GPIO38,
+ IMX_F1_GPIO38,
+ IMX_F2_GPIO38,
+ IMX_F3_GPIO38,
+ IMX_F0_GPIO39,
+ IMX_F1_GPIO39,
+ IMX_F2_GPIO39,
+ IMX_F3_GPIO39,
+ IMX_F0_GPIO40,
+ IMX_F1_GPIO40,
+ IMX_F2_GPIO40,
+ IMX_F3_GPIO40,
+ IMX_F0_GPIO41,
+ IMX_F1_GPIO41,
+ IMX_F2_GPIO41,
+ IMX_F3_GPIO41,
+ IMX_F0_GPIO42,
+ IMX_F1_GPIO42,
+ IMX_F2_GPIO42,
+ IMX_F3_GPIO42,
+ IMX_F0_GPIO43,
+ IMX_F1_GPIO43,
+ IMX_F2_GPIO43,
+ IMX_F3_GPIO43,
+ IMX_F0_GPIO44,
+ IMX_F1_GPIO44,
+ IMX_F2_GPIO44,
+ IMX_F3_GPIO44,
+ IMX_F0_GPIO45,
+ IMX_F1_GPIO45,
+ IMX_F2_GPIO45,
+ IMX_F3_GPIO45,
+ IMX_F0_GPIO46,
+ IMX_F1_GPIO46,
+ IMX_F2_GPIO46,
+ IMX_F3_GPIO46,
+ IMX_F0_GPIO47,
+ IMX_F1_GPIO47,
+ IMX_F2_GPIO47,
+ IMX_F3_GPIO47,
+ IMX_F0_GPIO48,
+ IMX_F1_GPIO48,
+ IMX_F2_GPIO48,
+ IMX_F3_GPIO48,
+ IMX_F0_GPIO49,
+ IMX_F1_GPIO49,
+ IMX_F2_GPIO49,
+ IMX_F3_GPIO49,
+ IMX_F0_GPIO50,
+ IMX_F1_GPIO50,
+ IMX_F2_GPIO50,
+ IMX_F3_GPIO50,
+ IMX_F0_GPIO51,
+ IMX_F1_GPIO51,
+ IMX_F2_GPIO51,
+ IMX_F3_GPIO51,
+ IMX_F0_GPIO52,
+ IMX_F1_GPIO52,
+ IMX_F2_GPIO52,
+ IMX_F3_GPIO52,
+ IMX_F0_GPIO53,
+ IMX_F1_GPIO53,
+ IMX_F2_GPIO53,
+ IMX_F3_GPIO53,
+ IMX_F0_GPIO54,
+ IMX_F1_GPIO54,
+ IMX_F2_GPIO54,
+ IMX_F3_GPIO54,
+ IMX_F0_GPIO55,
+ IMX_F1_GPIO55,
+ IMX_F2_GPIO55,
+ IMX_F3_GPIO55,
+ IMX_F0_GPIO56,
+ IMX_F1_GPIO56,
+ IMX_F2_GPIO56,
+ IMX_F3_GPIO56,
+ IMX_F0_GPIO57,
+ IMX_F1_GPIO57,
+ IMX_F2_GPIO57,
+ IMX_F3_GPIO57,
+ IMX_F0_GPIO58,
+ IMX_F1_GPIO58,
+ IMX_F2_GPIO58,
+ IMX_F3_GPIO58,
+ IMX_F0_GPIO59,
+ IMX_F1_GPIO59,
+ IMX_F2_GPIO59,
+ IMX_F3_GPIO59,
+ IMX_F0_GPIO60,
+ IMX_F1_GPIO60,
+ IMX_F2_GPIO60,
+ IMX_F3_GPIO60,
+ IMX_F0_GPIO61,
+ IMX_F1_GPIO61,
+ IMX_F2_GPIO61,
+ IMX_F3_GPIO61,
+ IMX_F0_GPIO62,
+ IMX_F1_GPIO62,
+ IMX_F2_GPIO62,
+ IMX_F3_GPIO62,
+ IMX_F0_GPIO64,
+ IMX_F1_GPIO64,
+ IMX_F2_GPIO64,
+ IMX_F3_GPIO64,
+ IMX_F0_GPIO65,
+ IMX_F1_GPIO65,
+ IMX_F2_GPIO65,
+ IMX_F3_GPIO65,
+ IMX_F0_GPIO66,
+ IMX_F1_GPIO66,
+ IMX_F2_GPIO66,
+ IMX_F3_GPIO66,
+ IMX_F0_GPIO67,
+ IMX_F1_GPIO67,
+ IMX_F2_GPIO67,
+ IMX_F3_GPIO67,
+ IMX_F0_GPIO68,
+ IMX_F1_GPIO68,
+ IMX_F2_GPIO68,
+ IMX_F3_GPIO68,
+ IMX_F0_GPIO69,
+ IMX_F1_GPIO69,
+ IMX_F2_GPIO69,
+ IMX_F3_GPIO69,
+ IMX_F0_GPIO70,
+ IMX_F1_GPIO70,
+ IMX_F2_GPIO70,
+ IMX_F3_GPIO70,
+ IMX_F0_GPIO71,
+ IMX_F1_GPIO71,
+ IMX_F2_GPIO71,
+ IMX_F3_GPIO71,
+ IMX_F0_GPIO72,
+ IMX_F1_GPIO72,
+ IMX_F2_GPIO72,
+ IMX_F3_GPIO72,
+ IMX_F0_GPIO73,
+ IMX_F1_GPIO73,
+ IMX_F2_GPIO73,
+ IMX_F3_GPIO73,
+ IMX_F0_GPIO74,
+ IMX_F1_GPIO74,
+ IMX_F2_GPIO74,
+ IMX_F3_GPIO74,
+ IMX_F0_GPIO75,
+ IMX_F1_GPIO75,
+ IMX_F2_GPIO75,
+ IMX_F3_GPIO75,
+ IMX_F0_GPIO76,
+ IMX_F1_GPIO76,
+ IMX_F2_GPIO76,
+ IMX_F3_GPIO76,
+ IMX_F0_GPIO77,
+ IMX_F1_GPIO77,
+ IMX_F2_GPIO77,
+ IMX_F3_GPIO77,
+ IMX_F0_GPIO78,
+ IMX_F1_GPIO78,
+ IMX_F2_GPIO78,
+ IMX_F3_GPIO78,
+ IMX_F0_GPIO79,
+ IMX_F1_GPIO79,
+ IMX_F2_GPIO79,
+ IMX_F3_GPIO79,
+ IMX_F0_GPIO80,
+ IMX_F1_GPIO80,
+ IMX_F2_GPIO80,
+ IMX_F3_GPIO80,
+ IMX_F0_GPIO81,
+ IMX_F1_GPIO81,
+ IMX_F2_GPIO81,
+ IMX_F3_GPIO81,
+ IMX_F0_GPIO82,
+ IMX_F1_GPIO82,
+ IMX_F2_GPIO82,
+ IMX_F3_GPIO82,
+ IMX_F0_GPIO83,
+ IMX_F1_GPIO83,
+ IMX_F2_GPIO83,
+ IMX_F3_GPIO83,
+ IMX_F0_GPIO84,
+ IMX_F1_GPIO84,
+ IMX_F2_GPIO84,
+ IMX_F3_GPIO84,
+ IMX_F0_GPIO85,
+ IMX_F1_GPIO85,
+ IMX_F2_GPIO85,
+ IMX_F3_GPIO85,
+ IMX_F0_GPIO86,
+ IMX_F1_GPIO86,
+ IMX_F2_GPIO86,
+ IMX_F3_GPIO86,
+ IMX_F0_GPIO87,
+ IMX_F1_GPIO87,
+ IMX_F2_GPIO87,
+ IMX_F3_GPIO87,
+ IMX_F0_GPIO88,
+ IMX_F1_GPIO88,
+ IMX_F2_GPIO88,
+ IMX_F3_GPIO88,
+ IMX_F0_GPIO89,
+ IMX_F1_GPIO89,
+ IMX_F2_GPIO89,
+ IMX_F3_GPIO89,
+ IMX_F0_GPIO90,
+ IMX_F1_GPIO90,
+ IMX_F2_GPIO90,
+ IMX_F3_GPIO90,
+ IMX_F0_GPIO91,
+ IMX_F1_GPIO91,
+ IMX_F2_GPIO91,
+ IMX_F3_GPIO91,
+ IMX_F0_GPIO92,
+ IMX_F1_GPIO92,
+ IMX_F2_GPIO92,
+ IMX_F3_GPIO92,
+ IMX_F0_GPIO93,
+ IMX_F1_GPIO93,
+ IMX_F2_GPIO93,
+ IMX_F3_GPIO93,
+ IMX_F0_GPIO94,
+ IMX_F1_GPIO94,
+ IMX_F2_GPIO94,
+ IMX_F3_GPIO94,
+ IMX_F0_GPIO95,
+ IMX_F1_GPIO95,
+ IMX_F2_GPIO95,
+ IMX_F3_GPIO95,
+ IMX_F0_GPIO96,
+ IMX_F1_GPIO96,
+ IMX_F2_GPIO96,
+ IMX_F3_GPIO96,
+ IMX_F0_GPIO97,
+ IMX_F1_GPIO97,
+ IMX_F2_GPIO97,
+ IMX_F3_GPIO97,
+ IMX_F0_GPIO98,
+ IMX_F1_GPIO98,
+ IMX_F2_GPIO98,
+ IMX_F3_GPIO98,
+ IMX_F0_GPIO99,
+ IMX_F1_GPIO99,
+ IMX_F2_GPIO99,
+ IMX_F3_GPIO99,
+ IMX_F0_GPIO100,
+ IMX_F1_GPIO100,
+ IMX_F2_GPIO100,
+ IMX_F3_GPIO100,
+ IMX_F0_GPIO101,
+ IMX_F1_GPIO101,
+ IMX_F2_GPIO101,
+ IMX_F3_GPIO101,
+ IMX_F0_GPIO102,
+ IMX_F1_GPIO102,
+ IMX_F2_GPIO102,
+ IMX_F3_GPIO102,
+ IMX_F0_GPIO103,
+ IMX_F1_GPIO103,
+ IMX_F2_GPIO103,
+ IMX_F3_GPIO103,
+ IMX_F0_GPIO104,
+ IMX_F1_GPIO104,
+ IMX_F2_GPIO104,
+ IMX_F3_GPIO104,
+ IMX_F0_GPIO105,
+ IMX_F1_GPIO105,
+ IMX_F2_GPIO105,
+ IMX_F3_GPIO105,
+ IMX_F0_GPIO106,
+ IMX_F1_GPIO106,
+ IMX_F2_GPIO106,
+ IMX_F3_GPIO106,
+ IMX_F0_GPIO107,
+ IMX_F1_GPIO107,
+ IMX_F2_GPIO107,
+ IMX_F3_GPIO107,
+ IMX_F0_GPIO108,
+ IMX_F1_GPIO108,
+ IMX_F2_GPIO108,
+ IMX_F3_GPIO108,
+ IMX_F0_GPIO109,
+ IMX_F1_GPIO109,
+ IMX_F2_GPIO109,
+ IMX_F3_GPIO109,
+ IMX_F0_GPIO110,
+ IMX_F1_GPIO110,
+ IMX_F2_GPIO110,
+ IMX_F3_GPIO110,
+ IMX_F0_GPIO111,
+ IMX_F1_GPIO111,
+ IMX_F2_GPIO111,
+ IMX_F3_GPIO111,
+ IMX_F0_GPIO112,
+ IMX_F1_GPIO112,
+ IMX_F2_GPIO112,
+ IMX_F3_GPIO112,
+ IMX_F0_GPIO113,
+ IMX_F1_GPIO113,
+ IMX_F2_GPIO113,
+ IMX_F3_GPIO113,
+ IMX_F0_GPIO114,
+ IMX_F1_GPIO114,
+ IMX_F2_GPIO114,
+ IMX_F3_GPIO114,
+ IMX_F0_GPIO115,
+ IMX_F1_GPIO115,
+ IMX_F2_GPIO115,
+ IMX_F3_GPIO115,
+ IMX_F0_GPIO116,
+ IMX_F1_GPIO116,
+ IMX_F2_GPIO116,
+ IMX_F3_GPIO116,
+ IMX_F0_GPIO117,
+ IMX_F1_GPIO117,
+ IMX_F2_GPIO117,
+ IMX_F3_GPIO117,
+ IMX_F0_GPIO118,
+ IMX_F1_GPIO118,
+ IMX_F2_GPIO118,
+ IMX_F3_GPIO118,
+ IMX_F0_GPIO119,
+ IMX_F1_GPIO119,
+ IMX_F2_GPIO119,
+ IMX_F3_GPIO119,
+ IMX_F0_GPIO120,
+ IMX_F1_GPIO120,
+ IMX_F2_GPIO120,
+ IMX_F3_GPIO120,
+ IMX_F0_GPIO121,
+ IMX_F1_GPIO121,
+ IMX_F2_GPIO121,
+ IMX_F3_GPIO121,
+ IMX_F0_GPIO122,
+ IMX_F1_GPIO122,
+ IMX_F2_GPIO122,
+ IMX_F3_GPIO122,
+ IMX_F0_GPIO123,
+ IMX_F1_GPIO123,
+ IMX_F2_GPIO123,
+ IMX_F3_GPIO123,
+ IMX_F0_GPIO124,
+ IMX_F1_GPIO124,
+ IMX_F2_GPIO124,
+ IMX_F3_GPIO124,
+ IMX_F0_GPIO125,
+ IMX_F1_GPIO125,
+ IMX_F2_GPIO125,
+ IMX_F3_GPIO125,
+ IMX_F0_GPIO126,
+ IMX_F1_GPIO126,
+ IMX_F2_GPIO126,
+ IMX_F3_GPIO126,
+ IMX_F0_GPIO127,
+ IMX_F1_GPIO127,
+ IMX_F2_GPIO127,
+ IMX_F3_GPIO127,
+ IMX_F0_GPIO128,
+ IMX_F1_GPIO128,
+ IMX_F2_GPIO128,
+ IMX_F3_GPIO128,
+ IMX_F0_GPIO129,
+ IMX_F1_GPIO129,
+ IMX_F2_GPIO129,
+ IMX_F3_GPIO129,
+ IMX_F0_GPIO130,
+ IMX_F1_GPIO130,
+ IMX_F2_GPIO130,
+ IMX_F3_GPIO130,
+ IMX_F0_GPIO131,
+ IMX_F1_GPIO131,
+ IMX_F2_GPIO131,
+ IMX_F3_GPIO131,
+ IMX_F0_GPIO132,
+ IMX_F1_GPIO132,
+ IMX_F2_GPIO132,
+ IMX_F3_GPIO132,
+ IMX_F0_GPIO133,
+ IMX_F1_GPIO133,
+ IMX_F2_GPIO133,
+ IMX_F3_GPIO133,
+ IMX_F0_GPIO134,
+ IMX_F1_GPIO134,
+ IMX_F2_GPIO134,
+ IMX_F3_GPIO134,
+ IMX_F0_GPIO135,
+ IMX_F1_GPIO135,
+ IMX_F2_GPIO135,
+ IMX_F3_GPIO135,
+ IMX_F0_GPIO136,
+ IMX_F1_GPIO136,
+ IMX_F2_GPIO136,
+ IMX_F3_GPIO136,
+ IMX_F0_GPIO137,
+ IMX_F1_GPIO137,
+ IMX_F2_GPIO137,
+ IMX_F3_GPIO137,
+ IMX_F0_GPIO138,
+ IMX_F1_GPIO138,
+ IMX_F2_GPIO138,
+ IMX_F3_GPIO138,
+ IMX_F0_GPIO139,
+ IMX_F1_GPIO139,
+ IMX_F2_GPIO139,
+ IMX_F3_GPIO139,
+ IMX_F0_GPIO140,
+ IMX_F1_GPIO140,
+ IMX_F2_GPIO140,
+ IMX_F3_GPIO140,
+ IMX_F0_GPIO141,
+ IMX_F1_GPIO141,
+ IMX_F2_GPIO141,
+ IMX_F3_GPIO141,
+ IMX_F0_GPIO142,
+ IMX_F1_GPIO142,
+ IMX_F2_GPIO142,
+ IMX_F3_GPIO142,
+ IMX_F0_GPIO143,
+ IMX_F1_GPIO143,
+ IMX_F2_GPIO143,
+ IMX_F3_GPIO143,
+ IMX_F0_GPIO144,
+ IMX_F1_GPIO144,
+ IMX_F2_GPIO144,
+ IMX_F3_GPIO144,
+};
+
+#define AMD_PINCTRL_FUNC_GRP(_number, _func) \
+ [IMX_F##_func##_GPIO##_number] = \
+ PINCTRL_PINGROUP("IMX_F"#_func "_GPIO"#_number, AMD_PINS(_number), 1)
+
+static const struct pingroup kerncz_groups[] = {
+ AMD_PINCTRL_FUNC_GRP(0, 0),
+ AMD_PINCTRL_FUNC_GRP(0, 1),
+ AMD_PINCTRL_FUNC_GRP(0, 2),
+ AMD_PINCTRL_FUNC_GRP(0, 3),
+ AMD_PINCTRL_FUNC_GRP(1, 0),
+ AMD_PINCTRL_FUNC_GRP(1, 1),
+ AMD_PINCTRL_FUNC_GRP(1, 2),
+ AMD_PINCTRL_FUNC_GRP(1, 3),
+ AMD_PINCTRL_FUNC_GRP(2, 0),
+ AMD_PINCTRL_FUNC_GRP(2, 1),
+ AMD_PINCTRL_FUNC_GRP(2, 2),
+ AMD_PINCTRL_FUNC_GRP(2, 3),
+ AMD_PINCTRL_FUNC_GRP(3, 0),
+ AMD_PINCTRL_FUNC_GRP(3, 1),
+ AMD_PINCTRL_FUNC_GRP(3, 2),
+ AMD_PINCTRL_FUNC_GRP(3, 3),
+ AMD_PINCTRL_FUNC_GRP(4, 0),
+ AMD_PINCTRL_FUNC_GRP(4, 1),
+ AMD_PINCTRL_FUNC_GRP(4, 2),
+ AMD_PINCTRL_FUNC_GRP(4, 3),
+ AMD_PINCTRL_FUNC_GRP(5, 0),
+ AMD_PINCTRL_FUNC_GRP(5, 1),
+ AMD_PINCTRL_FUNC_GRP(5, 2),
+ AMD_PINCTRL_FUNC_GRP(5, 3),
+ AMD_PINCTRL_FUNC_GRP(6, 0),
+ AMD_PINCTRL_FUNC_GRP(6, 1),
+ AMD_PINCTRL_FUNC_GRP(6, 2),
+ AMD_PINCTRL_FUNC_GRP(6, 3),
+ AMD_PINCTRL_FUNC_GRP(7, 0),
+ AMD_PINCTRL_FUNC_GRP(7, 1),
+ AMD_PINCTRL_FUNC_GRP(7, 2),
+ AMD_PINCTRL_FUNC_GRP(7, 3),
+ AMD_PINCTRL_FUNC_GRP(8, 0),
+ AMD_PINCTRL_FUNC_GRP(8, 1),
+ AMD_PINCTRL_FUNC_GRP(8, 2),
+ AMD_PINCTRL_FUNC_GRP(8, 3),
+ AMD_PINCTRL_FUNC_GRP(9, 0),
+ AMD_PINCTRL_FUNC_GRP(9, 1),
+ AMD_PINCTRL_FUNC_GRP(9, 2),
+ AMD_PINCTRL_FUNC_GRP(9, 3),
+ AMD_PINCTRL_FUNC_GRP(10, 0),
+ AMD_PINCTRL_FUNC_GRP(10, 1),
+ AMD_PINCTRL_FUNC_GRP(10, 2),
+ AMD_PINCTRL_FUNC_GRP(10, 3),
+ AMD_PINCTRL_FUNC_GRP(11, 0),
+ AMD_PINCTRL_FUNC_GRP(11, 1),
+ AMD_PINCTRL_FUNC_GRP(11, 2),
+ AMD_PINCTRL_FUNC_GRP(11, 3),
+ AMD_PINCTRL_FUNC_GRP(12, 0),
+ AMD_PINCTRL_FUNC_GRP(12, 1),
+ AMD_PINCTRL_FUNC_GRP(12, 2),
+ AMD_PINCTRL_FUNC_GRP(12, 3),
+ AMD_PINCTRL_FUNC_GRP(13, 0),
+ AMD_PINCTRL_FUNC_GRP(13, 1),
+ AMD_PINCTRL_FUNC_GRP(13, 2),
+ AMD_PINCTRL_FUNC_GRP(13, 3),
+ AMD_PINCTRL_FUNC_GRP(14, 0),
+ AMD_PINCTRL_FUNC_GRP(14, 1),
+ AMD_PINCTRL_FUNC_GRP(14, 2),
+ AMD_PINCTRL_FUNC_GRP(14, 3),
+ AMD_PINCTRL_FUNC_GRP(15, 0),
+ AMD_PINCTRL_FUNC_GRP(15, 1),
+ AMD_PINCTRL_FUNC_GRP(15, 2),
+ AMD_PINCTRL_FUNC_GRP(15, 3),
+ AMD_PINCTRL_FUNC_GRP(16, 0),
+ AMD_PINCTRL_FUNC_GRP(16, 1),
+ AMD_PINCTRL_FUNC_GRP(16, 2),
+ AMD_PINCTRL_FUNC_GRP(16, 3),
+ AMD_PINCTRL_FUNC_GRP(17, 0),
+ AMD_PINCTRL_FUNC_GRP(17, 1),
+ AMD_PINCTRL_FUNC_GRP(17, 2),
+ AMD_PINCTRL_FUNC_GRP(17, 3),
+ AMD_PINCTRL_FUNC_GRP(18, 0),
+ AMD_PINCTRL_FUNC_GRP(18, 1),
+ AMD_PINCTRL_FUNC_GRP(18, 2),
+ AMD_PINCTRL_FUNC_GRP(18, 3),
+ AMD_PINCTRL_FUNC_GRP(19, 0),
+ AMD_PINCTRL_FUNC_GRP(19, 1),
+ AMD_PINCTRL_FUNC_GRP(19, 2),
+ AMD_PINCTRL_FUNC_GRP(19, 3),
+ AMD_PINCTRL_FUNC_GRP(20, 0),
+ AMD_PINCTRL_FUNC_GRP(20, 1),
+ AMD_PINCTRL_FUNC_GRP(20, 2),
+ AMD_PINCTRL_FUNC_GRP(20, 3),
+ AMD_PINCTRL_FUNC_GRP(21, 0),
+ AMD_PINCTRL_FUNC_GRP(21, 1),
+ AMD_PINCTRL_FUNC_GRP(21, 2),
+ AMD_PINCTRL_FUNC_GRP(21, 3),
+ AMD_PINCTRL_FUNC_GRP(22, 0),
+ AMD_PINCTRL_FUNC_GRP(22, 1),
+ AMD_PINCTRL_FUNC_GRP(22, 2),
+ AMD_PINCTRL_FUNC_GRP(22, 3),
+ AMD_PINCTRL_FUNC_GRP(23, 0),
+ AMD_PINCTRL_FUNC_GRP(23, 1),
+ AMD_PINCTRL_FUNC_GRP(23, 2),
+ AMD_PINCTRL_FUNC_GRP(23, 3),
+ AMD_PINCTRL_FUNC_GRP(24, 0),
+ AMD_PINCTRL_FUNC_GRP(24, 1),
+ AMD_PINCTRL_FUNC_GRP(24, 2),
+ AMD_PINCTRL_FUNC_GRP(24, 3),
+ AMD_PINCTRL_FUNC_GRP(25, 0),
+ AMD_PINCTRL_FUNC_GRP(25, 1),
+ AMD_PINCTRL_FUNC_GRP(25, 2),
+ AMD_PINCTRL_FUNC_GRP(25, 3),
+ AMD_PINCTRL_FUNC_GRP(26, 0),
+ AMD_PINCTRL_FUNC_GRP(26, 1),
+ AMD_PINCTRL_FUNC_GRP(26, 2),
+ AMD_PINCTRL_FUNC_GRP(26, 3),
+ AMD_PINCTRL_FUNC_GRP(27, 0),
+ AMD_PINCTRL_FUNC_GRP(27, 1),
+ AMD_PINCTRL_FUNC_GRP(27, 2),
+ AMD_PINCTRL_FUNC_GRP(27, 3),
+ AMD_PINCTRL_FUNC_GRP(28, 0),
+ AMD_PINCTRL_FUNC_GRP(28, 1),
+ AMD_PINCTRL_FUNC_GRP(28, 2),
+ AMD_PINCTRL_FUNC_GRP(28, 3),
+ AMD_PINCTRL_FUNC_GRP(29, 0),
+ AMD_PINCTRL_FUNC_GRP(29, 1),
+ AMD_PINCTRL_FUNC_GRP(29, 2),
+ AMD_PINCTRL_FUNC_GRP(29, 3),
+ AMD_PINCTRL_FUNC_GRP(30, 0),
+ AMD_PINCTRL_FUNC_GRP(30, 1),
+ AMD_PINCTRL_FUNC_GRP(30, 2),
+ AMD_PINCTRL_FUNC_GRP(30, 3),
+ AMD_PINCTRL_FUNC_GRP(31, 0),
+ AMD_PINCTRL_FUNC_GRP(31, 1),
+ AMD_PINCTRL_FUNC_GRP(31, 2),
+ AMD_PINCTRL_FUNC_GRP(31, 3),
+ AMD_PINCTRL_FUNC_GRP(32, 0),
+ AMD_PINCTRL_FUNC_GRP(32, 1),
+ AMD_PINCTRL_FUNC_GRP(32, 2),
+ AMD_PINCTRL_FUNC_GRP(32, 3),
+ AMD_PINCTRL_FUNC_GRP(33, 0),
+ AMD_PINCTRL_FUNC_GRP(33, 1),
+ AMD_PINCTRL_FUNC_GRP(33, 2),
+ AMD_PINCTRL_FUNC_GRP(33, 3),
+ AMD_PINCTRL_FUNC_GRP(34, 0),
+ AMD_PINCTRL_FUNC_GRP(34, 1),
+ AMD_PINCTRL_FUNC_GRP(34, 2),
+ AMD_PINCTRL_FUNC_GRP(34, 3),
+ AMD_PINCTRL_FUNC_GRP(35, 0),
+ AMD_PINCTRL_FUNC_GRP(35, 1),
+ AMD_PINCTRL_FUNC_GRP(35, 2),
+ AMD_PINCTRL_FUNC_GRP(35, 3),
+ AMD_PINCTRL_FUNC_GRP(36, 0),
+ AMD_PINCTRL_FUNC_GRP(36, 1),
+ AMD_PINCTRL_FUNC_GRP(36, 2),
+ AMD_PINCTRL_FUNC_GRP(36, 3),
+ AMD_PINCTRL_FUNC_GRP(37, 0),
+ AMD_PINCTRL_FUNC_GRP(37, 1),
+ AMD_PINCTRL_FUNC_GRP(37, 2),
+ AMD_PINCTRL_FUNC_GRP(37, 3),
+ AMD_PINCTRL_FUNC_GRP(38, 0),
+ AMD_PINCTRL_FUNC_GRP(38, 1),
+ AMD_PINCTRL_FUNC_GRP(38, 2),
+ AMD_PINCTRL_FUNC_GRP(38, 3),
+ AMD_PINCTRL_FUNC_GRP(39, 0),
+ AMD_PINCTRL_FUNC_GRP(39, 1),
+ AMD_PINCTRL_FUNC_GRP(39, 2),
+ AMD_PINCTRL_FUNC_GRP(39, 3),
+ AMD_PINCTRL_FUNC_GRP(40, 0),
+ AMD_PINCTRL_FUNC_GRP(40, 1),
+ AMD_PINCTRL_FUNC_GRP(40, 2),
+ AMD_PINCTRL_FUNC_GRP(40, 3),
+ AMD_PINCTRL_FUNC_GRP(41, 0),
+ AMD_PINCTRL_FUNC_GRP(41, 1),
+ AMD_PINCTRL_FUNC_GRP(41, 2),
+ AMD_PINCTRL_FUNC_GRP(41, 3),
+ AMD_PINCTRL_FUNC_GRP(42, 0),
+ AMD_PINCTRL_FUNC_GRP(42, 1),
+ AMD_PINCTRL_FUNC_GRP(42, 2),
+ AMD_PINCTRL_FUNC_GRP(42, 3),
+ AMD_PINCTRL_FUNC_GRP(43, 0),
+ AMD_PINCTRL_FUNC_GRP(43, 1),
+ AMD_PINCTRL_FUNC_GRP(43, 2),
+ AMD_PINCTRL_FUNC_GRP(43, 3),
+ AMD_PINCTRL_FUNC_GRP(44, 0),
+ AMD_PINCTRL_FUNC_GRP(44, 1),
+ AMD_PINCTRL_FUNC_GRP(44, 2),
+ AMD_PINCTRL_FUNC_GRP(44, 3),
+ AMD_PINCTRL_FUNC_GRP(45, 0),
+ AMD_PINCTRL_FUNC_GRP(45, 1),
+ AMD_PINCTRL_FUNC_GRP(45, 2),
+ AMD_PINCTRL_FUNC_GRP(45, 3),
+ AMD_PINCTRL_FUNC_GRP(46, 0),
+ AMD_PINCTRL_FUNC_GRP(46, 1),
+ AMD_PINCTRL_FUNC_GRP(46, 2),
+ AMD_PINCTRL_FUNC_GRP(46, 3),
+ AMD_PINCTRL_FUNC_GRP(47, 0),
+ AMD_PINCTRL_FUNC_GRP(47, 1),
+ AMD_PINCTRL_FUNC_GRP(47, 2),
+ AMD_PINCTRL_FUNC_GRP(47, 3),
+ AMD_PINCTRL_FUNC_GRP(48, 0),
+ AMD_PINCTRL_FUNC_GRP(48, 1),
+ AMD_PINCTRL_FUNC_GRP(48, 2),
+ AMD_PINCTRL_FUNC_GRP(48, 3),
+ AMD_PINCTRL_FUNC_GRP(49, 0),
+ AMD_PINCTRL_FUNC_GRP(49, 1),
+ AMD_PINCTRL_FUNC_GRP(49, 2),
+ AMD_PINCTRL_FUNC_GRP(49, 3),
+ AMD_PINCTRL_FUNC_GRP(50, 0),
+ AMD_PINCTRL_FUNC_GRP(50, 1),
+ AMD_PINCTRL_FUNC_GRP(50, 2),
+ AMD_PINCTRL_FUNC_GRP(50, 3),
+ AMD_PINCTRL_FUNC_GRP(51, 0),
+ AMD_PINCTRL_FUNC_GRP(51, 1),
+ AMD_PINCTRL_FUNC_GRP(51, 2),
+ AMD_PINCTRL_FUNC_GRP(51, 3),
+ AMD_PINCTRL_FUNC_GRP(52, 0),
+ AMD_PINCTRL_FUNC_GRP(52, 1),
+ AMD_PINCTRL_FUNC_GRP(52, 2),
+ AMD_PINCTRL_FUNC_GRP(52, 3),
+ AMD_PINCTRL_FUNC_GRP(53, 0),
+ AMD_PINCTRL_FUNC_GRP(53, 1),
+ AMD_PINCTRL_FUNC_GRP(53, 2),
+ AMD_PINCTRL_FUNC_GRP(53, 3),
+ AMD_PINCTRL_FUNC_GRP(54, 0),
+ AMD_PINCTRL_FUNC_GRP(54, 1),
+ AMD_PINCTRL_FUNC_GRP(54, 2),
+ AMD_PINCTRL_FUNC_GRP(54, 3),
+ AMD_PINCTRL_FUNC_GRP(55, 0),
+ AMD_PINCTRL_FUNC_GRP(55, 1),
+ AMD_PINCTRL_FUNC_GRP(55, 2),
+ AMD_PINCTRL_FUNC_GRP(55, 3),
+ AMD_PINCTRL_FUNC_GRP(56, 0),
+ AMD_PINCTRL_FUNC_GRP(56, 1),
+ AMD_PINCTRL_FUNC_GRP(56, 2),
+ AMD_PINCTRL_FUNC_GRP(56, 3),
+ AMD_PINCTRL_FUNC_GRP(57, 0),
+ AMD_PINCTRL_FUNC_GRP(57, 1),
+ AMD_PINCTRL_FUNC_GRP(57, 2),
+ AMD_PINCTRL_FUNC_GRP(57, 3),
+ AMD_PINCTRL_FUNC_GRP(58, 0),
+ AMD_PINCTRL_FUNC_GRP(58, 1),
+ AMD_PINCTRL_FUNC_GRP(58, 2),
+ AMD_PINCTRL_FUNC_GRP(58, 3),
+ AMD_PINCTRL_FUNC_GRP(59, 0),
+ AMD_PINCTRL_FUNC_GRP(59, 1),
+ AMD_PINCTRL_FUNC_GRP(59, 2),
+ AMD_PINCTRL_FUNC_GRP(59, 3),
+ AMD_PINCTRL_FUNC_GRP(60, 0),
+ AMD_PINCTRL_FUNC_GRP(60, 1),
+ AMD_PINCTRL_FUNC_GRP(60, 2),
+ AMD_PINCTRL_FUNC_GRP(60, 3),
+ AMD_PINCTRL_FUNC_GRP(61, 0),
+ AMD_PINCTRL_FUNC_GRP(61, 1),
+ AMD_PINCTRL_FUNC_GRP(61, 2),
+ AMD_PINCTRL_FUNC_GRP(61, 3),
+ AMD_PINCTRL_FUNC_GRP(62, 0),
+ AMD_PINCTRL_FUNC_GRP(62, 1),
+ AMD_PINCTRL_FUNC_GRP(62, 2),
+ AMD_PINCTRL_FUNC_GRP(62, 3),
+ AMD_PINCTRL_FUNC_GRP(64, 0),
+ AMD_PINCTRL_FUNC_GRP(64, 1),
+ AMD_PINCTRL_FUNC_GRP(64, 2),
+ AMD_PINCTRL_FUNC_GRP(64, 3),
+ AMD_PINCTRL_FUNC_GRP(65, 0),
+ AMD_PINCTRL_FUNC_GRP(65, 1),
+ AMD_PINCTRL_FUNC_GRP(65, 2),
+ AMD_PINCTRL_FUNC_GRP(65, 3),
+ AMD_PINCTRL_FUNC_GRP(66, 0),
+ AMD_PINCTRL_FUNC_GRP(66, 1),
+ AMD_PINCTRL_FUNC_GRP(66, 2),
+ AMD_PINCTRL_FUNC_GRP(66, 3),
+ AMD_PINCTRL_FUNC_GRP(67, 0),
+ AMD_PINCTRL_FUNC_GRP(67, 1),
+ AMD_PINCTRL_FUNC_GRP(67, 2),
+ AMD_PINCTRL_FUNC_GRP(67, 3),
+ AMD_PINCTRL_FUNC_GRP(68, 0),
+ AMD_PINCTRL_FUNC_GRP(68, 1),
+ AMD_PINCTRL_FUNC_GRP(68, 2),
+ AMD_PINCTRL_FUNC_GRP(68, 3),
+ AMD_PINCTRL_FUNC_GRP(69, 0),
+ AMD_PINCTRL_FUNC_GRP(69, 1),
+ AMD_PINCTRL_FUNC_GRP(69, 2),
+ AMD_PINCTRL_FUNC_GRP(69, 3),
+ AMD_PINCTRL_FUNC_GRP(70, 0),
+ AMD_PINCTRL_FUNC_GRP(70, 1),
+ AMD_PINCTRL_FUNC_GRP(70, 2),
+ AMD_PINCTRL_FUNC_GRP(70, 3),
+ AMD_PINCTRL_FUNC_GRP(71, 0),
+ AMD_PINCTRL_FUNC_GRP(71, 1),
+ AMD_PINCTRL_FUNC_GRP(71, 2),
+ AMD_PINCTRL_FUNC_GRP(71, 3),
+ AMD_PINCTRL_FUNC_GRP(72, 0),
+ AMD_PINCTRL_FUNC_GRP(72, 1),
+ AMD_PINCTRL_FUNC_GRP(72, 2),
+ AMD_PINCTRL_FUNC_GRP(72, 3),
+ AMD_PINCTRL_FUNC_GRP(73, 0),
+ AMD_PINCTRL_FUNC_GRP(73, 1),
+ AMD_PINCTRL_FUNC_GRP(73, 2),
+ AMD_PINCTRL_FUNC_GRP(73, 3),
+ AMD_PINCTRL_FUNC_GRP(74, 0),
+ AMD_PINCTRL_FUNC_GRP(74, 1),
+ AMD_PINCTRL_FUNC_GRP(74, 2),
+ AMD_PINCTRL_FUNC_GRP(74, 3),
+ AMD_PINCTRL_FUNC_GRP(75, 0),
+ AMD_PINCTRL_FUNC_GRP(75, 1),
+ AMD_PINCTRL_FUNC_GRP(75, 2),
+ AMD_PINCTRL_FUNC_GRP(75, 3),
+ AMD_PINCTRL_FUNC_GRP(76, 0),
+ AMD_PINCTRL_FUNC_GRP(76, 1),
+ AMD_PINCTRL_FUNC_GRP(76, 2),
+ AMD_PINCTRL_FUNC_GRP(76, 3),
+ AMD_PINCTRL_FUNC_GRP(77, 0),
+ AMD_PINCTRL_FUNC_GRP(77, 1),
+ AMD_PINCTRL_FUNC_GRP(77, 2),
+ AMD_PINCTRL_FUNC_GRP(77, 3),
+ AMD_PINCTRL_FUNC_GRP(78, 0),
+ AMD_PINCTRL_FUNC_GRP(78, 1),
+ AMD_PINCTRL_FUNC_GRP(78, 2),
+ AMD_PINCTRL_FUNC_GRP(78, 3),
+ AMD_PINCTRL_FUNC_GRP(79, 0),
+ AMD_PINCTRL_FUNC_GRP(79, 1),
+ AMD_PINCTRL_FUNC_GRP(79, 2),
+ AMD_PINCTRL_FUNC_GRP(79, 3),
+ AMD_PINCTRL_FUNC_GRP(80, 0),
+ AMD_PINCTRL_FUNC_GRP(80, 1),
+ AMD_PINCTRL_FUNC_GRP(80, 2),
+ AMD_PINCTRL_FUNC_GRP(80, 3),
+ AMD_PINCTRL_FUNC_GRP(81, 0),
+ AMD_PINCTRL_FUNC_GRP(81, 1),
+ AMD_PINCTRL_FUNC_GRP(81, 2),
+ AMD_PINCTRL_FUNC_GRP(81, 3),
+ AMD_PINCTRL_FUNC_GRP(82, 0),
+ AMD_PINCTRL_FUNC_GRP(82, 1),
+ AMD_PINCTRL_FUNC_GRP(82, 2),
+ AMD_PINCTRL_FUNC_GRP(82, 3),
+ AMD_PINCTRL_FUNC_GRP(83, 0),
+ AMD_PINCTRL_FUNC_GRP(83, 1),
+ AMD_PINCTRL_FUNC_GRP(83, 2),
+ AMD_PINCTRL_FUNC_GRP(83, 3),
+ AMD_PINCTRL_FUNC_GRP(84, 0),
+ AMD_PINCTRL_FUNC_GRP(84, 1),
+ AMD_PINCTRL_FUNC_GRP(84, 2),
+ AMD_PINCTRL_FUNC_GRP(84, 3),
+ AMD_PINCTRL_FUNC_GRP(85, 0),
+ AMD_PINCTRL_FUNC_GRP(85, 1),
+ AMD_PINCTRL_FUNC_GRP(85, 2),
+ AMD_PINCTRL_FUNC_GRP(85, 3),
+ AMD_PINCTRL_FUNC_GRP(86, 0),
+ AMD_PINCTRL_FUNC_GRP(86, 1),
+ AMD_PINCTRL_FUNC_GRP(86, 2),
+ AMD_PINCTRL_FUNC_GRP(86, 3),
+ AMD_PINCTRL_FUNC_GRP(87, 0),
+ AMD_PINCTRL_FUNC_GRP(87, 1),
+ AMD_PINCTRL_FUNC_GRP(87, 2),
+ AMD_PINCTRL_FUNC_GRP(87, 3),
+ AMD_PINCTRL_FUNC_GRP(88, 0),
+ AMD_PINCTRL_FUNC_GRP(88, 1),
+ AMD_PINCTRL_FUNC_GRP(88, 2),
+ AMD_PINCTRL_FUNC_GRP(88, 3),
+ AMD_PINCTRL_FUNC_GRP(89, 0),
+ AMD_PINCTRL_FUNC_GRP(89, 1),
+ AMD_PINCTRL_FUNC_GRP(89, 2),
+ AMD_PINCTRL_FUNC_GRP(89, 3),
+ AMD_PINCTRL_FUNC_GRP(90, 0),
+ AMD_PINCTRL_FUNC_GRP(90, 1),
+ AMD_PINCTRL_FUNC_GRP(90, 2),
+ AMD_PINCTRL_FUNC_GRP(90, 3),
+ AMD_PINCTRL_FUNC_GRP(91, 0),
+ AMD_PINCTRL_FUNC_GRP(91, 1),
+ AMD_PINCTRL_FUNC_GRP(91, 2),
+ AMD_PINCTRL_FUNC_GRP(91, 3),
+ AMD_PINCTRL_FUNC_GRP(92, 0),
+ AMD_PINCTRL_FUNC_GRP(92, 1),
+ AMD_PINCTRL_FUNC_GRP(92, 2),
+ AMD_PINCTRL_FUNC_GRP(92, 3),
+ AMD_PINCTRL_FUNC_GRP(93, 0),
+ AMD_PINCTRL_FUNC_GRP(93, 1),
+ AMD_PINCTRL_FUNC_GRP(93, 2),
+ AMD_PINCTRL_FUNC_GRP(93, 3),
+ AMD_PINCTRL_FUNC_GRP(94, 0),
+ AMD_PINCTRL_FUNC_GRP(94, 1),
+ AMD_PINCTRL_FUNC_GRP(94, 2),
+ AMD_PINCTRL_FUNC_GRP(94, 3),
+ AMD_PINCTRL_FUNC_GRP(95, 0),
+ AMD_PINCTRL_FUNC_GRP(95, 1),
+ AMD_PINCTRL_FUNC_GRP(95, 2),
+ AMD_PINCTRL_FUNC_GRP(95, 3),
+ AMD_PINCTRL_FUNC_GRP(96, 0),
+ AMD_PINCTRL_FUNC_GRP(96, 1),
+ AMD_PINCTRL_FUNC_GRP(96, 2),
+ AMD_PINCTRL_FUNC_GRP(96, 3),
+ AMD_PINCTRL_FUNC_GRP(97, 0),
+ AMD_PINCTRL_FUNC_GRP(97, 1),
+ AMD_PINCTRL_FUNC_GRP(97, 2),
+ AMD_PINCTRL_FUNC_GRP(97, 3),
+ AMD_PINCTRL_FUNC_GRP(98, 0),
+ AMD_PINCTRL_FUNC_GRP(98, 1),
+ AMD_PINCTRL_FUNC_GRP(98, 2),
+ AMD_PINCTRL_FUNC_GRP(98, 3),
+ AMD_PINCTRL_FUNC_GRP(99, 0),
+ AMD_PINCTRL_FUNC_GRP(99, 1),
+ AMD_PINCTRL_FUNC_GRP(99, 2),
+ AMD_PINCTRL_FUNC_GRP(99, 3),
+ AMD_PINCTRL_FUNC_GRP(100, 0),
+ AMD_PINCTRL_FUNC_GRP(100, 1),
+ AMD_PINCTRL_FUNC_GRP(100, 2),
+ AMD_PINCTRL_FUNC_GRP(100, 3),
+ AMD_PINCTRL_FUNC_GRP(101, 0),
+ AMD_PINCTRL_FUNC_GRP(101, 1),
+ AMD_PINCTRL_FUNC_GRP(101, 2),
+ AMD_PINCTRL_FUNC_GRP(101, 3),
+ AMD_PINCTRL_FUNC_GRP(102, 0),
+ AMD_PINCTRL_FUNC_GRP(102, 1),
+ AMD_PINCTRL_FUNC_GRP(102, 2),
+ AMD_PINCTRL_FUNC_GRP(102, 3),
+ AMD_PINCTRL_FUNC_GRP(103, 0),
+ AMD_PINCTRL_FUNC_GRP(103, 1),
+ AMD_PINCTRL_FUNC_GRP(103, 2),
+ AMD_PINCTRL_FUNC_GRP(103, 3),
+ AMD_PINCTRL_FUNC_GRP(104, 0),
+ AMD_PINCTRL_FUNC_GRP(104, 1),
+ AMD_PINCTRL_FUNC_GRP(104, 2),
+ AMD_PINCTRL_FUNC_GRP(104, 3),
+ AMD_PINCTRL_FUNC_GRP(105, 0),
+ AMD_PINCTRL_FUNC_GRP(105, 1),
+ AMD_PINCTRL_FUNC_GRP(105, 2),
+ AMD_PINCTRL_FUNC_GRP(105, 3),
+ AMD_PINCTRL_FUNC_GRP(106, 0),
+ AMD_PINCTRL_FUNC_GRP(106, 1),
+ AMD_PINCTRL_FUNC_GRP(106, 2),
+ AMD_PINCTRL_FUNC_GRP(106, 3),
+ AMD_PINCTRL_FUNC_GRP(107, 0),
+ AMD_PINCTRL_FUNC_GRP(107, 1),
+ AMD_PINCTRL_FUNC_GRP(107, 2),
+ AMD_PINCTRL_FUNC_GRP(107, 3),
+ AMD_PINCTRL_FUNC_GRP(108, 0),
+ AMD_PINCTRL_FUNC_GRP(108, 1),
+ AMD_PINCTRL_FUNC_GRP(108, 2),
+ AMD_PINCTRL_FUNC_GRP(108, 3),
+ AMD_PINCTRL_FUNC_GRP(109, 0),
+ AMD_PINCTRL_FUNC_GRP(109, 1),
+ AMD_PINCTRL_FUNC_GRP(109, 2),
+ AMD_PINCTRL_FUNC_GRP(109, 3),
+ AMD_PINCTRL_FUNC_GRP(110, 0),
+ AMD_PINCTRL_FUNC_GRP(110, 1),
+ AMD_PINCTRL_FUNC_GRP(110, 2),
+ AMD_PINCTRL_FUNC_GRP(110, 3),
+ AMD_PINCTRL_FUNC_GRP(111, 0),
+ AMD_PINCTRL_FUNC_GRP(111, 1),
+ AMD_PINCTRL_FUNC_GRP(111, 2),
+ AMD_PINCTRL_FUNC_GRP(111, 3),
+ AMD_PINCTRL_FUNC_GRP(112, 0),
+ AMD_PINCTRL_FUNC_GRP(112, 1),
+ AMD_PINCTRL_FUNC_GRP(112, 2),
+ AMD_PINCTRL_FUNC_GRP(112, 3),
+ AMD_PINCTRL_FUNC_GRP(113, 0),
+ AMD_PINCTRL_FUNC_GRP(113, 1),
+ AMD_PINCTRL_FUNC_GRP(113, 2),
+ AMD_PINCTRL_FUNC_GRP(113, 3),
+ AMD_PINCTRL_FUNC_GRP(114, 0),
+ AMD_PINCTRL_FUNC_GRP(114, 1),
+ AMD_PINCTRL_FUNC_GRP(114, 2),
+ AMD_PINCTRL_FUNC_GRP(114, 3),
+ AMD_PINCTRL_FUNC_GRP(115, 0),
+ AMD_PINCTRL_FUNC_GRP(115, 1),
+ AMD_PINCTRL_FUNC_GRP(115, 2),
+ AMD_PINCTRL_FUNC_GRP(115, 3),
+ AMD_PINCTRL_FUNC_GRP(116, 0),
+ AMD_PINCTRL_FUNC_GRP(116, 1),
+ AMD_PINCTRL_FUNC_GRP(116, 2),
+ AMD_PINCTRL_FUNC_GRP(116, 3),
+ AMD_PINCTRL_FUNC_GRP(117, 0),
+ AMD_PINCTRL_FUNC_GRP(117, 1),
+ AMD_PINCTRL_FUNC_GRP(117, 2),
+ AMD_PINCTRL_FUNC_GRP(117, 3),
+ AMD_PINCTRL_FUNC_GRP(118, 0),
+ AMD_PINCTRL_FUNC_GRP(118, 1),
+ AMD_PINCTRL_FUNC_GRP(118, 2),
+ AMD_PINCTRL_FUNC_GRP(118, 3),
+ AMD_PINCTRL_FUNC_GRP(119, 0),
+ AMD_PINCTRL_FUNC_GRP(119, 1),
+ AMD_PINCTRL_FUNC_GRP(119, 2),
+ AMD_PINCTRL_FUNC_GRP(119, 3),
+ AMD_PINCTRL_FUNC_GRP(120, 0),
+ AMD_PINCTRL_FUNC_GRP(120, 1),
+ AMD_PINCTRL_FUNC_GRP(120, 2),
+ AMD_PINCTRL_FUNC_GRP(120, 3),
+ AMD_PINCTRL_FUNC_GRP(121, 0),
+ AMD_PINCTRL_FUNC_GRP(121, 1),
+ AMD_PINCTRL_FUNC_GRP(121, 2),
+ AMD_PINCTRL_FUNC_GRP(121, 3),
+ AMD_PINCTRL_FUNC_GRP(122, 0),
+ AMD_PINCTRL_FUNC_GRP(122, 1),
+ AMD_PINCTRL_FUNC_GRP(122, 2),
+ AMD_PINCTRL_FUNC_GRP(122, 3),
+ AMD_PINCTRL_FUNC_GRP(123, 0),
+ AMD_PINCTRL_FUNC_GRP(123, 1),
+ AMD_PINCTRL_FUNC_GRP(123, 2),
+ AMD_PINCTRL_FUNC_GRP(123, 3),
+ AMD_PINCTRL_FUNC_GRP(124, 0),
+ AMD_PINCTRL_FUNC_GRP(124, 1),
+ AMD_PINCTRL_FUNC_GRP(124, 2),
+ AMD_PINCTRL_FUNC_GRP(124, 3),
+ AMD_PINCTRL_FUNC_GRP(125, 0),
+ AMD_PINCTRL_FUNC_GRP(125, 1),
+ AMD_PINCTRL_FUNC_GRP(125, 2),
+ AMD_PINCTRL_FUNC_GRP(125, 3),
+ AMD_PINCTRL_FUNC_GRP(126, 0),
+ AMD_PINCTRL_FUNC_GRP(126, 1),
+ AMD_PINCTRL_FUNC_GRP(126, 2),
+ AMD_PINCTRL_FUNC_GRP(126, 3),
+ AMD_PINCTRL_FUNC_GRP(127, 0),
+ AMD_PINCTRL_FUNC_GRP(127, 1),
+ AMD_PINCTRL_FUNC_GRP(127, 2),
+ AMD_PINCTRL_FUNC_GRP(127, 3),
+ AMD_PINCTRL_FUNC_GRP(128, 0),
+ AMD_PINCTRL_FUNC_GRP(128, 1),
+ AMD_PINCTRL_FUNC_GRP(128, 2),
+ AMD_PINCTRL_FUNC_GRP(128, 3),
+ AMD_PINCTRL_FUNC_GRP(129, 0),
+ AMD_PINCTRL_FUNC_GRP(129, 1),
+ AMD_PINCTRL_FUNC_GRP(129, 2),
+ AMD_PINCTRL_FUNC_GRP(129, 3),
+ AMD_PINCTRL_FUNC_GRP(130, 0),
+ AMD_PINCTRL_FUNC_GRP(130, 1),
+ AMD_PINCTRL_FUNC_GRP(130, 2),
+ AMD_PINCTRL_FUNC_GRP(130, 3),
+ AMD_PINCTRL_FUNC_GRP(131, 0),
+ AMD_PINCTRL_FUNC_GRP(131, 1),
+ AMD_PINCTRL_FUNC_GRP(131, 2),
+ AMD_PINCTRL_FUNC_GRP(131, 3),
+ AMD_PINCTRL_FUNC_GRP(132, 0),
+ AMD_PINCTRL_FUNC_GRP(132, 1),
+ AMD_PINCTRL_FUNC_GRP(132, 2),
+ AMD_PINCTRL_FUNC_GRP(132, 3),
+ AMD_PINCTRL_FUNC_GRP(133, 0),
+ AMD_PINCTRL_FUNC_GRP(133, 1),
+ AMD_PINCTRL_FUNC_GRP(133, 2),
+ AMD_PINCTRL_FUNC_GRP(133, 3),
+ AMD_PINCTRL_FUNC_GRP(134, 0),
+ AMD_PINCTRL_FUNC_GRP(134, 1),
+ AMD_PINCTRL_FUNC_GRP(134, 2),
+ AMD_PINCTRL_FUNC_GRP(134, 3),
+ AMD_PINCTRL_FUNC_GRP(135, 0),
+ AMD_PINCTRL_FUNC_GRP(135, 1),
+ AMD_PINCTRL_FUNC_GRP(135, 2),
+ AMD_PINCTRL_FUNC_GRP(135, 3),
+ AMD_PINCTRL_FUNC_GRP(136, 0),
+ AMD_PINCTRL_FUNC_GRP(136, 1),
+ AMD_PINCTRL_FUNC_GRP(136, 2),
+ AMD_PINCTRL_FUNC_GRP(136, 3),
+ AMD_PINCTRL_FUNC_GRP(137, 0),
+ AMD_PINCTRL_FUNC_GRP(137, 1),
+ AMD_PINCTRL_FUNC_GRP(137, 2),
+ AMD_PINCTRL_FUNC_GRP(137, 3),
+ AMD_PINCTRL_FUNC_GRP(138, 0),
+ AMD_PINCTRL_FUNC_GRP(138, 1),
+ AMD_PINCTRL_FUNC_GRP(138, 2),
+ AMD_PINCTRL_FUNC_GRP(138, 3),
+ AMD_PINCTRL_FUNC_GRP(139, 0),
+ AMD_PINCTRL_FUNC_GRP(139, 1),
+ AMD_PINCTRL_FUNC_GRP(139, 2),
+ AMD_PINCTRL_FUNC_GRP(139, 3),
+ AMD_PINCTRL_FUNC_GRP(140, 0),
+ AMD_PINCTRL_FUNC_GRP(140, 1),
+ AMD_PINCTRL_FUNC_GRP(140, 2),
+ AMD_PINCTRL_FUNC_GRP(140, 3),
+ AMD_PINCTRL_FUNC_GRP(141, 0),
+ AMD_PINCTRL_FUNC_GRP(141, 1),
+ AMD_PINCTRL_FUNC_GRP(141, 2),
+ AMD_PINCTRL_FUNC_GRP(141, 3),
+ AMD_PINCTRL_FUNC_GRP(142, 0),
+ AMD_PINCTRL_FUNC_GRP(142, 1),
+ AMD_PINCTRL_FUNC_GRP(142, 2),
+ AMD_PINCTRL_FUNC_GRP(142, 3),
+ AMD_PINCTRL_FUNC_GRP(143, 0),
+ AMD_PINCTRL_FUNC_GRP(143, 1),
+ AMD_PINCTRL_FUNC_GRP(143, 2),
+ AMD_PINCTRL_FUNC_GRP(143, 3),
+ AMD_PINCTRL_FUNC_GRP(144, 0),
+ AMD_PINCTRL_FUNC_GRP(144, 1),
+ AMD_PINCTRL_FUNC_GRP(144, 2),
+ AMD_PINCTRL_FUNC_GRP(144, 3),
+
+ PINCTRL_PINGROUP("i2c0", AMD_PINS(145, 146), 2),
+ PINCTRL_PINGROUP("i2c1", AMD_PINS(147, 148), 2),
+ PINCTRL_PINGROUP("i2c2", AMD_PINS(113, 114), 2),
+ PINCTRL_PINGROUP("i2c3", AMD_PINS(19, 20), 2),
+ PINCTRL_PINGROUP("uart0", AMD_PINS(135, 136, 137, 138, 139), 5),
+ PINCTRL_PINGROUP("uart1", AMD_PINS(140, 141, 142, 143, 144), 5),
+};
-static const unsigned uart0_pins[] = {135, 136, 137, 138, 139};
-static const unsigned uart1_pins[] = {140, 141, 142, 143, 144};
+#define AMD_PMUX_FUNC(_number) { \
+ .name = "iomux_gpio_"#_number, \
+ .groups = { \
+ "IMX_F0_GPIO"#_number, "IMX_F1_GPIO"#_number, \
+ "IMX_F2_GPIO"#_number, "IMX_F3_GPIO"#_number, \
+ }, \
+ .index = _number, \
+ .ngroups = NSELECTS, \
+}
-static const struct amd_pingroup kerncz_groups[] = {
- {
- .name = "i2c0",
- .pins = i2c0_pins,
- .npins = 2,
- },
- {
- .name = "i2c1",
- .pins = i2c1_pins,
- .npins = 2,
- },
- {
- .name = "i2c2",
- .pins = i2c2_pins,
- .npins = 2,
- },
- {
- .name = "i2c3",
- .pins = i2c3_pins,
- .npins = 2,
- },
- {
- .name = "uart0",
- .pins = uart0_pins,
- .npins = 5,
- },
- {
- .name = "uart1",
- .pins = uart1_pins,
- .npins = 5,
- },
+static const struct amd_function pmx_functions[] = {
+ AMD_PMUX_FUNC(0),
+ AMD_PMUX_FUNC(1),
+ AMD_PMUX_FUNC(2),
+ AMD_PMUX_FUNC(3),
+ AMD_PMUX_FUNC(4),
+ AMD_PMUX_FUNC(5),
+ AMD_PMUX_FUNC(6),
+ AMD_PMUX_FUNC(7),
+ AMD_PMUX_FUNC(8),
+ AMD_PMUX_FUNC(9),
+ AMD_PMUX_FUNC(10),
+ AMD_PMUX_FUNC(11),
+ AMD_PMUX_FUNC(12),
+ AMD_PMUX_FUNC(13),
+ AMD_PMUX_FUNC(14),
+ AMD_PMUX_FUNC(15),
+ AMD_PMUX_FUNC(16),
+ AMD_PMUX_FUNC(17),
+ AMD_PMUX_FUNC(18),
+ AMD_PMUX_FUNC(19),
+ AMD_PMUX_FUNC(20),
+ AMD_PMUX_FUNC(21),
+ AMD_PMUX_FUNC(22),
+ AMD_PMUX_FUNC(23),
+ AMD_PMUX_FUNC(24),
+ AMD_PMUX_FUNC(25),
+ AMD_PMUX_FUNC(26),
+ AMD_PMUX_FUNC(27),
+ AMD_PMUX_FUNC(28),
+ AMD_PMUX_FUNC(29),
+ AMD_PMUX_FUNC(30),
+ AMD_PMUX_FUNC(31),
+ AMD_PMUX_FUNC(32),
+ AMD_PMUX_FUNC(33),
+ AMD_PMUX_FUNC(34),
+ AMD_PMUX_FUNC(35),
+ AMD_PMUX_FUNC(36),
+ AMD_PMUX_FUNC(37),
+ AMD_PMUX_FUNC(38),
+ AMD_PMUX_FUNC(39),
+ AMD_PMUX_FUNC(40),
+ AMD_PMUX_FUNC(41),
+ AMD_PMUX_FUNC(42),
+ AMD_PMUX_FUNC(43),
+ AMD_PMUX_FUNC(44),
+ AMD_PMUX_FUNC(45),
+ AMD_PMUX_FUNC(46),
+ AMD_PMUX_FUNC(47),
+ AMD_PMUX_FUNC(48),
+ AMD_PMUX_FUNC(49),
+ AMD_PMUX_FUNC(50),
+ AMD_PMUX_FUNC(51),
+ AMD_PMUX_FUNC(52),
+ AMD_PMUX_FUNC(53),
+ AMD_PMUX_FUNC(54),
+ AMD_PMUX_FUNC(55),
+ AMD_PMUX_FUNC(56),
+ AMD_PMUX_FUNC(57),
+ AMD_PMUX_FUNC(58),
+ AMD_PMUX_FUNC(59),
+ AMD_PMUX_FUNC(60),
+ AMD_PMUX_FUNC(61),
+ AMD_PMUX_FUNC(62),
+ AMD_PMUX_FUNC(64),
+ AMD_PMUX_FUNC(65),
+ AMD_PMUX_FUNC(66),
+ AMD_PMUX_FUNC(67),
+ AMD_PMUX_FUNC(68),
+ AMD_PMUX_FUNC(69),
+ AMD_PMUX_FUNC(70),
+ AMD_PMUX_FUNC(71),
+ AMD_PMUX_FUNC(72),
+ AMD_PMUX_FUNC(73),
+ AMD_PMUX_FUNC(74),
+ AMD_PMUX_FUNC(75),
+ AMD_PMUX_FUNC(76),
+ AMD_PMUX_FUNC(77),
+ AMD_PMUX_FUNC(78),
+ AMD_PMUX_FUNC(79),
+ AMD_PMUX_FUNC(80),
+ AMD_PMUX_FUNC(81),
+ AMD_PMUX_FUNC(82),
+ AMD_PMUX_FUNC(83),
+ AMD_PMUX_FUNC(84),
+ AMD_PMUX_FUNC(85),
+ AMD_PMUX_FUNC(86),
+ AMD_PMUX_FUNC(87),
+ AMD_PMUX_FUNC(88),
+ AMD_PMUX_FUNC(89),
+ AMD_PMUX_FUNC(90),
+ AMD_PMUX_FUNC(91),
+ AMD_PMUX_FUNC(92),
+ AMD_PMUX_FUNC(93),
+ AMD_PMUX_FUNC(94),
+ AMD_PMUX_FUNC(95),
+ AMD_PMUX_FUNC(96),
+ AMD_PMUX_FUNC(97),
+ AMD_PMUX_FUNC(98),
+ AMD_PMUX_FUNC(99),
+ AMD_PMUX_FUNC(100),
+ AMD_PMUX_FUNC(101),
+ AMD_PMUX_FUNC(102),
+ AMD_PMUX_FUNC(103),
+ AMD_PMUX_FUNC(104),
+ AMD_PMUX_FUNC(105),
+ AMD_PMUX_FUNC(106),
+ AMD_PMUX_FUNC(107),
+ AMD_PMUX_FUNC(108),
+ AMD_PMUX_FUNC(109),
+ AMD_PMUX_FUNC(110),
+ AMD_PMUX_FUNC(111),
+ AMD_PMUX_FUNC(112),
+ AMD_PMUX_FUNC(113),
+ AMD_PMUX_FUNC(114),
+ AMD_PMUX_FUNC(115),
+ AMD_PMUX_FUNC(116),
+ AMD_PMUX_FUNC(117),
+ AMD_PMUX_FUNC(118),
+ AMD_PMUX_FUNC(119),
+ AMD_PMUX_FUNC(120),
+ AMD_PMUX_FUNC(121),
+ AMD_PMUX_FUNC(122),
+ AMD_PMUX_FUNC(123),
+ AMD_PMUX_FUNC(124),
+ AMD_PMUX_FUNC(125),
+ AMD_PMUX_FUNC(126),
+ AMD_PMUX_FUNC(127),
+ AMD_PMUX_FUNC(128),
+ AMD_PMUX_FUNC(129),
+ AMD_PMUX_FUNC(130),
+ AMD_PMUX_FUNC(131),
+ AMD_PMUX_FUNC(132),
+ AMD_PMUX_FUNC(133),
+ AMD_PMUX_FUNC(134),
+ AMD_PMUX_FUNC(135),
+ AMD_PMUX_FUNC(136),
+ AMD_PMUX_FUNC(137),
+ AMD_PMUX_FUNC(138),
+ AMD_PMUX_FUNC(139),
+ AMD_PMUX_FUNC(140),
+ AMD_PMUX_FUNC(141),
+ AMD_PMUX_FUNC(142),
+ AMD_PMUX_FUNC(143),
+ AMD_PMUX_FUNC(144),
};
#endif
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 517f2a6330ad..82b921fd630d 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -237,8 +237,6 @@ static void atmel_gpio_irq_unmask(struct irq_data *d)
BIT(pin->line));
}
-#ifdef CONFIG_PM_SLEEP
-
static int atmel_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
@@ -255,9 +253,6 @@ static int atmel_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-#else
-#define atmel_gpio_irq_set_wake NULL
-#endif /* CONFIG_PM_SLEEP */
static struct irq_chip atmel_gpio_irq_chip = {
.name = "GPIO",
@@ -265,7 +260,7 @@ static struct irq_chip atmel_gpio_irq_chip = {
.irq_mask = atmel_gpio_irq_mask,
.irq_unmask = atmel_gpio_irq_unmask,
.irq_set_type = atmel_gpio_irq_set_type,
- .irq_set_wake = atmel_gpio_irq_set_wake,
+ .irq_set_wake = pm_sleep_ptr(atmel_gpio_irq_set_wake),
};
static int atmel_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index d91a010e65f5..5634fa063ebf 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1615,8 +1615,6 @@ static void gpio_irq_ack(struct irq_data *d)
/* the interrupt is already cleared before by reading ISR */
}
-#ifdef CONFIG_PM
-
static u32 wakeups[MAX_GPIO_BANKS];
static u32 backups[MAX_GPIO_BANKS];
@@ -1683,10 +1681,6 @@ void at91_pinctrl_gpio_resume(void)
}
}
-#else
-#define gpio_irq_set_wake NULL
-#endif /* CONFIG_PM */
-
static void gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1741,14 +1735,14 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
gpio_irqchip->irq_disable = gpio_irq_mask;
gpio_irqchip->irq_mask = gpio_irq_mask;
gpio_irqchip->irq_unmask = gpio_irq_unmask;
- gpio_irqchip->irq_set_wake = gpio_irq_set_wake;
+ gpio_irqchip->irq_set_wake = pm_ptr(gpio_irq_set_wake);
gpio_irqchip->irq_set_type = at91_gpio->ops->irq_type;
/* Disable irqs of this PIO controller */
writel_relaxed(~0, at91_gpio->regbase + PIO_IDR);
/*
- * Let the generic code handle this edge IRQ, the the chained
+ * Let the generic code handle this edge IRQ, the chained
* handler will perform the actual work of handling the parent
* interrupt.
*/
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index 207cbae3a7bf..7ab20ac15391 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -73,7 +73,7 @@ static const struct pinctrl_pin_desc axp209_pins[] = {
PINCTRL_PIN(2, "GPIO2"),
};
-static const struct pinctrl_pin_desc axp813_pins[] = {
+static const struct pinctrl_pin_desc axp22x_pins[] = {
PINCTRL_PIN(0, "GPIO0"),
PINCTRL_PIN(1, "GPIO1"),
};
@@ -87,9 +87,16 @@ static const struct axp20x_pctrl_desc axp20x_data = {
.adc_mux = AXP20X_MUX_ADC,
};
+static const struct axp20x_pctrl_desc axp22x_data = {
+ .pins = axp22x_pins,
+ .npins = ARRAY_SIZE(axp22x_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .gpio_status_offset = 0,
+};
+
static const struct axp20x_pctrl_desc axp813_data = {
- .pins = axp813_pins,
- .npins = ARRAY_SIZE(axp813_pins),
+ .pins = axp22x_pins,
+ .npins = ARRAY_SIZE(axp22x_pins),
.ldo_mask = BIT(0) | BIT(1),
.adc_mask = BIT(0),
.gpio_status_offset = 0,
@@ -388,6 +395,7 @@ static int axp20x_build_funcs_groups(struct platform_device *pdev)
static const struct of_device_id axp20x_pctl_match[] = {
{ .compatible = "x-powers,axp209-gpio", .data = &axp20x_data, },
+ { .compatible = "x-powers,axp221-gpio", .data = &axp22x_data, },
{ .compatible = "x-powers,axp813-gpio", .data = &axp813_data, },
{ }
};
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 1ca11616db74..3a9ee9c8af11 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -21,6 +21,7 @@
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include "core.h"
@@ -135,7 +136,6 @@ struct ingenic_pinctrl {
struct ingenic_gpio_chip {
struct ingenic_pinctrl *jzpc;
struct gpio_chip gc;
- struct irq_chip irq_chip;
unsigned int irq, reg_base;
};
@@ -3393,7 +3393,7 @@ static void ingenic_gpio_irq_mask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, true);
@@ -3405,7 +3405,7 @@ static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, false);
@@ -3417,7 +3417,9 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ gpiochip_enable_irq(gc, irq);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4770))
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
@@ -3433,7 +3435,7 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
ingenic_gpio_irq_mask(irqd);
@@ -3443,13 +3445,15 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, false);
+
+ gpiochip_disable_irq(gc, irq);
}
static void ingenic_gpio_irq_ack(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
bool high;
if ((irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) &&
@@ -3477,6 +3481,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
@@ -3498,12 +3503,12 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
* best we can do is to set up a single-edge interrupt and then
* switch to the opposing edge when ACKing the interrupt.
*/
- bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq);
+ bool high = ingenic_gpio_get_value(jzgc, irq);
type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH;
}
- irq_set_type(jzgc, irqd->hwirq, type);
+ irq_set_type(jzgc, irq, type);
return 0;
}
@@ -3668,22 +3673,45 @@ static const struct pinctrl_ops ingenic_pctlops = {
static int ingenic_gpio_irq_request(struct irq_data *data)
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t irq = irqd_to_hwirq(data);
int ret;
- ret = ingenic_gpio_direction_input(gpio_chip, data->hwirq);
+ ret = ingenic_gpio_direction_input(gpio_chip, irq);
if (ret)
return ret;
- return gpiochip_reqres_irq(gpio_chip, data->hwirq);
+ return gpiochip_reqres_irq(gpio_chip, irq);
}
static void ingenic_gpio_irq_release(struct irq_data *data)
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t irq = irqd_to_hwirq(data);
+
+ return gpiochip_relres_irq(gpio_chip, irq);
+}
+
+static void ingenic_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
- return gpiochip_relres_irq(gpio_chip, data->hwirq);
+ seq_printf(p, "%s", gpio_chip->label);
}
+static const struct irq_chip ingenic_gpio_irqchip = {
+ .irq_enable = ingenic_gpio_irq_enable,
+ .irq_disable = ingenic_gpio_irq_disable,
+ .irq_unmask = ingenic_gpio_irq_unmask,
+ .irq_mask = ingenic_gpio_irq_mask,
+ .irq_ack = ingenic_gpio_irq_ack,
+ .irq_set_type = ingenic_gpio_irq_set_type,
+ .irq_set_wake = ingenic_gpio_irq_set_wake,
+ .irq_request_resources = ingenic_gpio_irq_request,
+ .irq_release_resources = ingenic_gpio_irq_release,
+ .irq_print_chip = ingenic_gpio_irq_print_chip,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+};
+
static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
int pin, int func)
{
@@ -4172,20 +4200,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
if (!jzgc->irq)
return -EINVAL;
- jzgc->irq_chip.name = jzgc->gc.label;
- jzgc->irq_chip.irq_enable = ingenic_gpio_irq_enable;
- jzgc->irq_chip.irq_disable = ingenic_gpio_irq_disable;
- jzgc->irq_chip.irq_unmask = ingenic_gpio_irq_unmask;
- jzgc->irq_chip.irq_mask = ingenic_gpio_irq_mask;
- jzgc->irq_chip.irq_ack = ingenic_gpio_irq_ack;
- jzgc->irq_chip.irq_set_type = ingenic_gpio_irq_set_type;
- jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
- jzgc->irq_chip.irq_request_resources = ingenic_gpio_irq_request;
- jzgc->irq_chip.irq_release_resources = ingenic_gpio_irq_release;
- jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
-
girq = &jzgc->gc.irq;
- girq->chip = &jzgc->irq_chip;
+ gpio_irq_chip_set_chip(girq, &ingenic_gpio_irqchip);
girq->parent_handler = ingenic_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 771dd1f4fbe0..c5fd154990c8 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1944,6 +1944,7 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
{ .compatible = "microchip,lan966x-pinctrl", .data = &lan966x_desc },
{},
};
+MODULE_DEVICE_TABLE(of, ocelot_pinctrl_of_match);
static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
const struct ocelot_pinctrl *info)
@@ -2050,4 +2051,5 @@ static struct platform_driver ocelot_pinctrl_driver = {
},
.probe = ocelot_pinctrl_probe,
};
-builtin_platform_driver(ocelot_pinctrl_driver);
+module_platform_driver(ocelot_pinctrl_driver);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c
index 2a86c1035cc8..3eb40e230d98 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/pinctrl-starfive.c
@@ -207,6 +207,7 @@ struct starfive_pinctrl {
void __iomem *base;
void __iomem *padctl;
struct pinctrl_dev *pctl;
+ struct mutex mutex; /* serialize adding groups and functions */
};
static inline unsigned int starfive_pin_to_gpio(const struct starfive_pinctrl *sfp,
@@ -522,6 +523,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
nmaps = 0;
ngroups = 0;
+ mutex_lock(&sfp->mutex);
for_each_child_of_node(np, child) {
int npins;
int i;
@@ -615,12 +617,14 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
*maps = map;
*num_maps = nmaps;
+ mutex_unlock(&sfp->mutex);
return 0;
put_child:
of_node_put(child);
free_map:
pinctrl_utils_free_map(pctldev, map, nmaps);
+ mutex_unlock(&sfp->mutex);
return ret;
}
@@ -1267,6 +1271,7 @@ static int starfive_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sfp);
sfp->gc.parent = dev;
raw_spin_lock_init(&sfp->lock);
+ mutex_init(&sfp->mutex);
ret = devm_pinctrl_register_and_init(dev, &starfive_desc, sfp, &sfp->pctl);
if (ret)
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index e14012209992..7d2fbf8a02cd 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -163,6 +163,8 @@ static const char *zynqmp_pmux_get_function_name(struct pinctrl_dev *pctldev,
* @num_groups: Number of function groups.
*
* Get function's group count and group names.
+ *
+ * Return: 0
*/
static int zynqmp_pmux_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
@@ -410,6 +412,10 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ param = PM_PINCTRL_CONFIG_TRI_STATE;
+ arg = PM_PINCTRL_TRI_STATE_ENABLE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
case PIN_CONFIG_MODE_LOW_POWER:
/*
* These cases are mentioned in dts but configurable
@@ -418,6 +424,11 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
*/
ret = 0;
break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ param = PM_PINCTRL_CONFIG_TRI_STATE;
+ arg = PM_PINCTRL_TRI_STATE_DISABLE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
default:
dev_warn(pctldev->dev,
"unsupported configuration parameter '%u'\n",
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 3daeb9772391..f415c13caae0 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -113,6 +113,14 @@ config PINCTRL_MSM8X74
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8974 platform.
+config PINCTRL_MSM8909
+ tristate "Qualcomm 8909 pin controller driver"
+ depends on OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8909 platform.
+
config PINCTRL_MSM8916
tristate "Qualcomm 8916 pin controller driver"
depends on OF
@@ -320,6 +328,15 @@ config PINCTRL_SM6350
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM6350 platform.
+config PINCTRL_SM6375
+ tristate "Qualcomm Technologies Inc SM6375 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM6375 platform.
+
config PINCTRL_SDX65
tristate "Qualcomm Technologies Inc SDX65 pin controller driver"
depends on GPIOLIB && OF
@@ -367,7 +384,7 @@ config PINCTRL_SM8350
config PINCTRL_SM8450
tristate "Qualcomm Technologies Inc SM8450 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 4f0ee7597f81..fbd64853a24d 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_PINCTRL_MSM8226) += pinctrl-msm8226.o
obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
+obj-$(CONFIG_PINCTRL_MSM8909) += pinctrl-msm8909.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
obj-$(CONFIG_PINCTRL_MSM8953) += pinctrl-msm8953.o
obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
@@ -37,6 +38,7 @@ obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
obj-$(CONFIG_PINCTRL_SM6115) += pinctrl-sm6115.o
obj-$(CONFIG_PINCTRL_SM6125) += pinctrl-sm6125.o
obj-$(CONFIG_PINCTRL_SM6350) += pinctrl-sm6350.o
+obj-$(CONFIG_PINCTRL_SM6375) += pinctrl-sm6375.o
obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 74810ec4df44..e97ce45b6d53 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -401,7 +401,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(pctrl->slew_base),
"Slew resource not provided\n");
- if (data->is_clk_optional)
+ if (of_property_read_bool(dev->of_node, "qcom,adsp-bypass-mode"))
ret = devm_clk_bulk_get_optional(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
else
ret = devm_clk_bulk_get(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
index 759d5d8da562..afbac2a6c82c 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
@@ -77,7 +77,6 @@ struct lpi_pinctrl_variant_data {
int ngroups;
const struct lpi_function *functions;
int nfunctions;
- bool is_clk_optional;
};
int lpi_pinctrl_probe(struct platform_device *pdev);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c
new file mode 100644
index 000000000000..6dd15b910632
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2022, Kernkonzept GmbH.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9, \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc msm8909_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "SDC1_CLK"),
+ PINCTRL_PIN(114, "SDC1_CMD"),
+ PINCTRL_PIN(115, "SDC1_DATA"),
+ PINCTRL_PIN(116, "SDC2_CLK"),
+ PINCTRL_PIN(117, "SDC2_CMD"),
+ PINCTRL_PIN(118, "SDC2_DATA"),
+ PINCTRL_PIN(119, "QDSD_CLK"),
+ PINCTRL_PIN(120, "QDSD_CMD"),
+ PINCTRL_PIN(121, "QDSD_DATA0"),
+ PINCTRL_PIN(122, "QDSD_DATA1"),
+ PINCTRL_PIN(123, "QDSD_DATA2"),
+ PINCTRL_PIN(124, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+
+static const unsigned int sdc1_clk_pins[] = { 113 };
+static const unsigned int sdc1_cmd_pins[] = { 114 };
+static const unsigned int sdc1_data_pins[] = { 115 };
+static const unsigned int sdc2_clk_pins[] = { 116 };
+static const unsigned int sdc2_cmd_pins[] = { 117 };
+static const unsigned int sdc2_data_pins[] = { 118 };
+static const unsigned int qdsd_clk_pins[] = { 119 };
+static const unsigned int qdsd_cmd_pins[] = { 120 };
+static const unsigned int qdsd_data0_pins[] = { 121 };
+static const unsigned int qdsd_data1_pins[] = { 122 };
+static const unsigned int qdsd_data2_pins[] = { 123 };
+static const unsigned int qdsd_data3_pins[] = { 124 };
+
+enum msm8909_functions {
+ msm_mux_gpio,
+ msm_mux_adsp_ext,
+ msm_mux_atest_bbrx0,
+ msm_mux_atest_bbrx1,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_combodac,
+ msm_mux_atest_gpsadc0,
+ msm_mux_atest_gpsadc1,
+ msm_mux_atest_wlan0,
+ msm_mux_atest_wlan1,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi1_cs1,
+ msm_mux_blsp_spi1_cs2,
+ msm_mux_blsp_spi1_cs3,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi2_cs1,
+ msm_mux_blsp_spi2_cs2,
+ msm_mux_blsp_spi2_cs3,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi3_cs1,
+ msm_mux_blsp_spi3_cs2,
+ msm_mux_blsp_spi3_cs3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uim1,
+ msm_mux_blsp_uim2,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cdc_pdm0,
+ msm_mux_dbg_out,
+ msm_mux_dmic0_clk,
+ msm_mux_dmic0_data,
+ msm_mux_ebi0_wrcdc,
+ msm_mux_ebi2_a,
+ msm_mux_ebi2_lcd,
+ msm_mux_ext_lpass,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_gcc_plltest,
+ msm_mux_gsm0_tx,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_m_voc,
+ msm_mux_mdp_vsync,
+ msm_mux_modem_tsync,
+ msm_mux_nav_pps,
+ msm_mux_nav_tsync,
+ msm_mux_pa_indicator,
+ msm_mux_pbs0,
+ msm_mux_pbs1,
+ msm_mux_pbs2,
+ msm_mux_pri_mi2s_data0_a,
+ msm_mux_pri_mi2s_data0_b,
+ msm_mux_pri_mi2s_data1_a,
+ msm_mux_pri_mi2s_data1_b,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_sck_a,
+ msm_mux_pri_mi2s_sck_b,
+ msm_mux_pri_mi2s_ws_a,
+ msm_mux_pri_mi2s_ws_b,
+ msm_mux_prng_rosc,
+ msm_mux_pwr_crypto_enabled_a,
+ msm_mux_pwr_crypto_enabled_b,
+ msm_mux_pwr_modem_enabled_a,
+ msm_mux_pwr_modem_enabled_b,
+ msm_mux_pwr_nav_enabled_a,
+ msm_mux_pwr_nav_enabled_b,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_sd_write,
+ msm_mux_sec_mi2s,
+ msm_mux_smb_int,
+ msm_mux_ssbi0,
+ msm_mux_ssbi1,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_uim3_clk,
+ msm_mux_uim3_data,
+ msm_mux_uim3_present,
+ msm_mux_uim3_reset,
+ msm_mux_uim_batt,
+ msm_mux_wcss_bt,
+ msm_mux_wcss_fm,
+ msm_mux_wcss_wlan,
+ msm_mux__,
+};
+
+static const char * const adsp_ext_groups[] = { "gpio38" };
+static const char * const atest_bbrx0_groups[] = { "gpio37" };
+static const char * const atest_bbrx1_groups[] = { "gpio36" };
+static const char * const atest_char0_groups[] = { "gpio62" };
+static const char * const atest_char1_groups[] = { "gpio61" };
+static const char * const atest_char2_groups[] = { "gpio60" };
+static const char * const atest_char3_groups[] = { "gpio59" };
+static const char * const atest_char_groups[] = { "gpio63" };
+static const char * const atest_combodac_groups[] = {
+ "gpio32", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio47", "gpio48", "gpio66", "gpio81", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio94", "gpio95", "gpio110"
+};
+static const char * const atest_gpsadc0_groups[] = { "gpio65" };
+static const char * const atest_gpsadc1_groups[] = { "gpio79" };
+static const char * const atest_wlan0_groups[] = { "gpio96" };
+static const char * const atest_wlan1_groups[] = { "gpio97" };
+static const char * const bimc_dte0_groups[] = { "gpio6", "gpio59" };
+static const char * const bimc_dte1_groups[] = { "gpio7", "gpio60" };
+static const char * const blsp_i2c1_groups[] = { "gpio6", "gpio7" };
+static const char * const blsp_i2c2_groups[] = { "gpio111", "gpio112" };
+static const char * const blsp_i2c3_groups[] = { "gpio29", "gpio30" };
+static const char * const blsp_i2c4_groups[] = { "gpio14", "gpio15" };
+static const char * const blsp_i2c5_groups[] = { "gpio18", "gpio19" };
+static const char * const blsp_i2c6_groups[] = { "gpio10", "gpio11" };
+static const char * const blsp_spi1_cs1_groups[] = { "gpio97" };
+static const char * const blsp_spi1_cs2_groups[] = { "gpio37" };
+static const char * const blsp_spi1_cs3_groups[] = { "gpio65" };
+static const char * const blsp_spi1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_spi2_cs1_groups[] = { "gpio98" };
+static const char * const blsp_spi2_cs2_groups[] = { "gpio17" };
+static const char * const blsp_spi2_cs3_groups[] = { "gpio5" };
+static const char * const blsp_spi2_groups[] = {
+ "gpio20", "gpio21", "gpio111", "gpio112"
+};
+static const char * const blsp_spi3_cs1_groups[] = { "gpio95" };
+static const char * const blsp_spi3_cs2_groups[] = { "gpio65" };
+static const char * const blsp_spi3_cs3_groups[] = { "gpio4" };
+static const char * const blsp_spi3_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15"
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19"
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio20", "gpio21", "gpio111", "gpio112"
+};
+static const char * const blsp_uim1_groups[] = { "gpio4", "gpio5" };
+static const char * const blsp_uim2_groups[] = { "gpio20", "gpio21" };
+static const char * const cam_mclk_groups[] = { "gpio26", "gpio27" };
+static const char * const cci_async_groups[] = { "gpio33" };
+static const char * const cci_timer0_groups[] = { "gpio31" };
+static const char * const cci_timer1_groups[] = { "gpio32" };
+static const char * const cci_timer2_groups[] = { "gpio38" };
+static const char * const cdc_pdm0_groups[] = {
+ "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio64"
+};
+static const char * const dbg_out_groups[] = { "gpio10" };
+static const char * const dmic0_clk_groups[] = { "gpio4" };
+static const char * const dmic0_data_groups[] = { "gpio5" };
+static const char * const ebi0_wrcdc_groups[] = { "gpio64" };
+static const char * const ebi2_a_groups[] = { "gpio99" };
+static const char * const ebi2_lcd_groups[] = {
+ "gpio24", "gpio24", "gpio25", "gpio95"
+};
+static const char * const ext_lpass_groups[] = { "gpio45" };
+static const char * const gcc_gp1_clk_a_groups[] = { "gpio49" };
+static const char * const gcc_gp1_clk_b_groups[] = { "gpio14" };
+static const char * const gcc_gp2_clk_a_groups[] = { "gpio50" };
+static const char * const gcc_gp2_clk_b_groups[] = { "gpio12" };
+static const char * const gcc_gp3_clk_a_groups[] = { "gpio51" };
+static const char * const gcc_gp3_clk_b_groups[] = { "gpio13" };
+static const char * const gcc_plltest_groups[] = { "gpio66", "gpio67" };
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112"
+};
+static const char * const gsm0_tx_groups[] = { "gpio85" };
+static const char * const ldo_en_groups[] = { "gpio99" };
+static const char * const ldo_update_groups[] = { "gpio98" };
+static const char * const m_voc_groups[] = { "gpio8", "gpio95" };
+static const char * const mdp_vsync_groups[] = { "gpio24", "gpio25" };
+static const char * const modem_tsync_groups[] = { "gpio83" };
+static const char * const nav_pps_groups[] = { "gpio83" };
+static const char * const nav_tsync_groups[] = { "gpio83" };
+static const char * const pa_indicator_groups[] = { "gpio82" };
+static const char * const pbs0_groups[] = { "gpio90" };
+static const char * const pbs1_groups[] = { "gpio91" };
+static const char * const pbs2_groups[] = { "gpio92" };
+static const char * const pri_mi2s_data0_a_groups[] = { "gpio62" };
+static const char * const pri_mi2s_data0_b_groups[] = { "gpio95" };
+static const char * const pri_mi2s_data1_a_groups[] = { "gpio63" };
+static const char * const pri_mi2s_data1_b_groups[] = { "gpio96" };
+static const char * const pri_mi2s_mclk_a_groups[] = { "gpio59" };
+static const char * const pri_mi2s_mclk_b_groups[] = { "gpio98" };
+static const char * const pri_mi2s_sck_a_groups[] = { "gpio60" };
+static const char * const pri_mi2s_sck_b_groups[] = { "gpio94" };
+static const char * const pri_mi2s_ws_a_groups[] = { "gpio61" };
+static const char * const pri_mi2s_ws_b_groups[] = { "gpio110" };
+static const char * const prng_rosc_groups[] = { "gpio43" };
+static const char * const pwr_crypto_enabled_a_groups[] = { "gpio35" };
+static const char * const pwr_crypto_enabled_b_groups[] = { "gpio96" };
+static const char * const pwr_modem_enabled_a_groups[] = { "gpio28" };
+static const char * const pwr_modem_enabled_b_groups[] = { "gpio94" };
+static const char * const pwr_nav_enabled_a_groups[] = { "gpio34" };
+static const char * const pwr_nav_enabled_b_groups[] = { "gpio95" };
+static const char * const qdss_cti_trig_in_a0_groups[] = { "gpio20" };
+static const char * const qdss_cti_trig_in_a1_groups[] = { "gpio49" };
+static const char * const qdss_cti_trig_in_b0_groups[] = { "gpio21" };
+static const char * const qdss_cti_trig_in_b1_groups[] = { "gpio50" };
+static const char * const qdss_cti_trig_out_a0_groups[] = { "gpio23" };
+static const char * const qdss_cti_trig_out_a1_groups[] = { "gpio52" };
+static const char * const qdss_cti_trig_out_b0_groups[] = { "gpio22" };
+static const char * const qdss_cti_trig_out_b1_groups[] = { "gpio51" };
+static const char * const qdss_traceclk_a_groups[] = { "gpio46" };
+static const char * const qdss_tracectl_a_groups[] = { "gpio45" };
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio47", "gpio48", "gpio58", "gpio65", "gpio94", "gpio96",
+ "gpio97"
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio14", "gpio16", "gpio17", "gpio29", "gpio30", "gpio31", "gpio32",
+ "gpio33", "gpio34", "gpio35", "gpio36", "gpio37", "gpio93"
+};
+static const char * const sd_write_groups[] = { "gpio99" };
+static const char * const sec_mi2s_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio98"
+};
+static const char * const smb_int_groups[] = { "gpio58" };
+static const char * const ssbi0_groups[] = { "gpio88" };
+static const char * const ssbi1_groups[] = { "gpio89" };
+static const char * const uim1_clk_groups[] = { "gpio54" };
+static const char * const uim1_data_groups[] = { "gpio53" };
+static const char * const uim1_present_groups[] = { "gpio56" };
+static const char * const uim1_reset_groups[] = { "gpio55" };
+static const char * const uim2_clk_groups[] = { "gpio50" };
+static const char * const uim2_data_groups[] = { "gpio49" };
+static const char * const uim2_present_groups[] = { "gpio52" };
+static const char * const uim2_reset_groups[] = { "gpio51" };
+static const char * const uim3_clk_groups[] = { "gpio23" };
+static const char * const uim3_data_groups[] = { "gpio20" };
+static const char * const uim3_present_groups[] = { "gpio21" };
+static const char * const uim3_reset_groups[] = { "gpio22" };
+static const char * const uim_batt_groups[] = { "gpio57" };
+static const char * const wcss_bt_groups[] = { "gpio39", "gpio47", "gpio48" };
+static const char * const wcss_fm_groups[] = { "gpio45", "gpio46" };
+static const char * const wcss_wlan_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43", "gpio44"
+};
+
+static const struct msm_function msm8909_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(atest_bbrx0),
+ FUNCTION(atest_bbrx1),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_combodac),
+ FUNCTION(atest_gpsadc0),
+ FUNCTION(atest_gpsadc1),
+ FUNCTION(atest_wlan0),
+ FUNCTION(atest_wlan1),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi1_cs1),
+ FUNCTION(blsp_spi1_cs2),
+ FUNCTION(blsp_spi1_cs3),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi2_cs1),
+ FUNCTION(blsp_spi2_cs2),
+ FUNCTION(blsp_spi2_cs3),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi3_cs1),
+ FUNCTION(blsp_spi3_cs2),
+ FUNCTION(blsp_spi3_cs3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uim1),
+ FUNCTION(blsp_uim2),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(dbg_out),
+ FUNCTION(dmic0_clk),
+ FUNCTION(dmic0_data),
+ FUNCTION(ebi0_wrcdc),
+ FUNCTION(ebi2_a),
+ FUNCTION(ebi2_lcd),
+ FUNCTION(ext_lpass),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gpio),
+ FUNCTION(gsm0_tx),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(m_voc),
+ FUNCTION(mdp_vsync),
+ FUNCTION(modem_tsync),
+ FUNCTION(nav_pps),
+ FUNCTION(nav_tsync),
+ FUNCTION(pa_indicator),
+ FUNCTION(pbs0),
+ FUNCTION(pbs1),
+ FUNCTION(pbs2),
+ FUNCTION(pri_mi2s_data0_a),
+ FUNCTION(pri_mi2s_data0_b),
+ FUNCTION(pri_mi2s_data1_a),
+ FUNCTION(pri_mi2s_data1_b),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(pri_mi2s_mclk_b),
+ FUNCTION(pri_mi2s_sck_a),
+ FUNCTION(pri_mi2s_sck_b),
+ FUNCTION(pri_mi2s_ws_a),
+ FUNCTION(pri_mi2s_ws_b),
+ FUNCTION(prng_rosc),
+ FUNCTION(pwr_crypto_enabled_a),
+ FUNCTION(pwr_crypto_enabled_b),
+ FUNCTION(pwr_modem_enabled_a),
+ FUNCTION(pwr_modem_enabled_b),
+ FUNCTION(pwr_nav_enabled_a),
+ FUNCTION(pwr_nav_enabled_b),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(sd_write),
+ FUNCTION(sec_mi2s),
+ FUNCTION(smb_int),
+ FUNCTION(ssbi0),
+ FUNCTION(ssbi1),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim3_clk),
+ FUNCTION(uim3_data),
+ FUNCTION(uim3_present),
+ FUNCTION(uim3_reset),
+ FUNCTION(uim_batt),
+ FUNCTION(wcss_bt),
+ FUNCTION(wcss_fm),
+ FUNCTION(wcss_wlan),
+};
+
+static const struct msm_pingroup msm8909_groups[] = {
+ PINGROUP(0, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(1, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(2, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(3, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(4, blsp_spi1, blsp_uart1, blsp_uim1, blsp_spi3_cs3, dmic0_clk, _, _, _, _),
+ PINGROUP(5, blsp_spi1, blsp_uart1, blsp_uim1, blsp_spi2_cs3, dmic0_data, _, _, _, _),
+ PINGROUP(6, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, bimc_dte0),
+ PINGROUP(7, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, bimc_dte1),
+ PINGROUP(8, blsp_spi6, m_voc, _, _, _, _, _, qdss_tracedata_a, _),
+ PINGROUP(9, blsp_spi6, _, _, _, _, _, qdss_tracedata_a, _, _),
+ PINGROUP(10, blsp_spi6, blsp_i2c6, dbg_out, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(11, blsp_spi6, blsp_i2c6, _, _, _, _, _, _, _),
+ PINGROUP(12, blsp_spi4, gcc_gp2_clk_b, _, _, _, _, _, _, _),
+ PINGROUP(13, blsp_spi4, gcc_gp3_clk_b, _, _, _, _, _, _, _),
+ PINGROUP(14, blsp_spi4, blsp_i2c4, gcc_gp1_clk_b, _, _, _, _, _, qdss_tracedata_b),
+ PINGROUP(15, blsp_spi4, blsp_i2c4, _, _, _, _, _, _, _),
+ PINGROUP(16, blsp_spi5, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(17, blsp_spi5, blsp_spi2_cs2, _, _, _, _, _, qdss_tracedata_b, _),
+ PINGROUP(18, blsp_spi5, blsp_i2c5, _, _, _, _, _, _, _),
+ PINGROUP(19, blsp_spi5, blsp_i2c5, _, _, _, _, _, _, _),
+ PINGROUP(20, uim3_data, blsp_spi2, blsp_uart2, blsp_uim2, _, qdss_cti_trig_in_a0, _, _, _),
+ PINGROUP(21, uim3_present, blsp_spi2, blsp_uart2, blsp_uim2, _, qdss_cti_trig_in_b0, _, _, _),
+ PINGROUP(22, uim3_reset, _, qdss_cti_trig_out_b0, _, _, _, _, _, _),
+ PINGROUP(23, uim3_clk, qdss_cti_trig_out_a0, _, _, _, _, _, _, _),
+ PINGROUP(24, mdp_vsync, ebi2_lcd, ebi2_lcd, _, _, _, _, _, _),
+ PINGROUP(25, mdp_vsync, ebi2_lcd, _, _, _, _, _, _, _),
+ PINGROUP(26, cam_mclk, _, _, _, _, _, _, _, _),
+ PINGROUP(27, cam_mclk, _, _, _, _, _, _, _, _),
+ PINGROUP(28, _, pwr_modem_enabled_a, _, _, _, _, _, _, _),
+ PINGROUP(29, blsp_i2c3, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(30, blsp_i2c3, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(31, cci_timer0, _, _, _, _, _, _, qdss_tracedata_b, _),
+ PINGROUP(32, cci_timer1, _, qdss_tracedata_b, _, atest_combodac, _, _, _, _),
+ PINGROUP(33, cci_async, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(34, pwr_nav_enabled_a, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(35, pwr_crypto_enabled_a, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(36, qdss_tracedata_b, _, atest_bbrx1, _, _, _, _, _, _),
+ PINGROUP(37, blsp_spi1_cs2, qdss_tracedata_b, _, atest_bbrx0, _, _, _, _, _),
+ PINGROUP(38, cci_timer2, adsp_ext, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(39, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(40, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(41, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(42, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(43, wcss_wlan, prng_rosc, qdss_tracedata_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(44, wcss_wlan, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(45, wcss_fm, ext_lpass, qdss_tracectl_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(46, wcss_fm, qdss_traceclk_a, _, _, _, _, _, _, _),
+ PINGROUP(47, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(48, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(49, uim2_data, gcc_gp1_clk_a, qdss_cti_trig_in_a1, _, _, _, _, _, _),
+ PINGROUP(50, uim2_clk, gcc_gp2_clk_a, qdss_cti_trig_in_b1, _, _, _, _, _, _),
+ PINGROUP(51, uim2_reset, gcc_gp3_clk_a, qdss_cti_trig_out_b1, _, _, _, _, _, _),
+ PINGROUP(52, uim2_present, qdss_cti_trig_out_a1, _, _, _, _, _, _, _),
+ PINGROUP(53, uim1_data, _, _, _, _, _, _, _, _),
+ PINGROUP(54, uim1_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(55, uim1_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(56, uim1_present, _, _, _, _, _, _, _, _),
+ PINGROUP(57, uim_batt, _, _, _, _, _, _, _, _),
+ PINGROUP(58, qdss_tracedata_a, smb_int, _, _, _, _, _, _, _),
+ PINGROUP(59, cdc_pdm0, pri_mi2s_mclk_a, atest_char3, _, _, _, _, _, bimc_dte0),
+ PINGROUP(60, cdc_pdm0, pri_mi2s_sck_a, atest_char2, _, _, _, _, _, bimc_dte1),
+ PINGROUP(61, cdc_pdm0, pri_mi2s_ws_a, atest_char1, _, _, _, _, _, _),
+ PINGROUP(62, cdc_pdm0, pri_mi2s_data0_a, atest_char0, _, _, _, _, _, _),
+ PINGROUP(63, cdc_pdm0, pri_mi2s_data1_a, atest_char, _, _, _, _, _, _),
+ PINGROUP(64, cdc_pdm0, _, _, _, _, _, ebi0_wrcdc, _, _),
+ PINGROUP(65, blsp_spi3_cs2, blsp_spi1_cs3, qdss_tracedata_a, _, atest_gpsadc0, _, _, _, _),
+ PINGROUP(66, _, gcc_plltest, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(67, _, gcc_plltest, _, _, _, _, _, _, _),
+ PINGROUP(68, _, _, _, _, _, _, _, _, _),
+ PINGROUP(69, _, _, _, _, _, _, _, _, _),
+ PINGROUP(70, _, _, _, _, _, _, _, _, _),
+ PINGROUP(71, _, _, _, _, _, _, _, _, _),
+ PINGROUP(72, _, _, _, _, _, _, _, _, _),
+ PINGROUP(73, _, _, _, _, _, _, _, _, _),
+ PINGROUP(74, _, _, _, _, _, _, _, _, _),
+ PINGROUP(75, _, _, _, _, _, _, _, _, _),
+ PINGROUP(76, _, _, _, _, _, _, _, _, _),
+ PINGROUP(77, _, _, _, _, _, _, _, _, _),
+ PINGROUP(78, _, _, _, _, _, _, _, _, _),
+ PINGROUP(79, _, _, atest_gpsadc1, _, _, _, _, _, _),
+ PINGROUP(80, _, _, _, _, _, _, _, _, _),
+ PINGROUP(81, _, _, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(82, _, pa_indicator, _, _, _, _, _, _, _),
+ PINGROUP(83, _, modem_tsync, nav_tsync, nav_pps, _, atest_combodac, _, _, _),
+ PINGROUP(84, _, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(85, gsm0_tx, _, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(86, _, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(87, _, _, _, _, _, _, _, _, _),
+ PINGROUP(88, _, ssbi0, _, _, _, _, _, _, _),
+ PINGROUP(89, _, ssbi1, _, _, _, _, _, _, _),
+ PINGROUP(90, pbs0, _, _, _, _, _, _, _, _),
+ PINGROUP(91, pbs1, _, _, _, _, _, _, _, _),
+ PINGROUP(92, pbs2, _, _, _, _, _, _, _, _),
+ PINGROUP(93, qdss_tracedata_b, _, _, _, _, _, _, _, _),
+ PINGROUP(94, pri_mi2s_sck_b, pwr_modem_enabled_b, qdss_tracedata_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(95, blsp_spi3_cs1, pri_mi2s_data0_b, ebi2_lcd, m_voc, pwr_nav_enabled_b, _, atest_combodac, _, _),
+ PINGROUP(96, pri_mi2s_data1_b, _, pwr_crypto_enabled_b, qdss_tracedata_a, _, atest_wlan0, _, _, _),
+ PINGROUP(97, blsp_spi1_cs1, qdss_tracedata_a, _, atest_wlan1, _, _, _, _, _),
+ PINGROUP(98, sec_mi2s, pri_mi2s_mclk_b, blsp_spi2_cs1, ldo_update, _, _, _, _, _),
+ PINGROUP(99, ebi2_a, sd_write, ldo_en, _, _, _, _, _, _),
+ PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ PINGROUP(102, _, _, _, _, _, _, _, _, _),
+ PINGROUP(103, _, _, _, _, _, _, _, _, _),
+ PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ PINGROUP(106, _, _, _, _, _, _, _, _, _),
+ PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ PINGROUP(108, _, _, _, _, _, _, _, _, _),
+ PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ PINGROUP(110, pri_mi2s_ws_b, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(111, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _),
+ PINGROUP(112, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_gpio_wakeirq_map msm8909_mpm_map[] = {
+ { 65, 3 }, { 5, 4 }, { 11, 5 }, { 12, 6 }, { 64, 7 }, { 58, 8 },
+ { 50, 9 }, { 13, 10 }, { 49, 11 }, { 20, 12 }, { 21, 13 }, { 25, 14 },
+ { 46, 15 }, { 45, 16 }, { 28, 17 }, { 44, 18 }, { 31, 19 }, { 43, 20 },
+ { 42, 21 }, { 34, 22 }, { 35, 23 }, { 36, 24 }, { 37, 25 }, { 38, 26 },
+ { 39, 27 }, { 40, 28 }, { 41, 29 }, { 90, 30 }, { 91, 32 }, { 92, 33 },
+ { 94, 34 }, { 95, 35 }, { 96, 36 }, { 97, 37 }, { 98, 38 },
+ { 110, 39 }, { 111, 40 }, { 112, 41 }, { 105, 42 }, { 107, 43 },
+ { 47, 50 }, { 48, 51 },
+};
+
+static const struct msm_pinctrl_soc_data msm8909_pinctrl = {
+ .pins = msm8909_pins,
+ .npins = ARRAY_SIZE(msm8909_pins),
+ .functions = msm8909_functions,
+ .nfunctions = ARRAY_SIZE(msm8909_functions),
+ .groups = msm8909_groups,
+ .ngroups = ARRAY_SIZE(msm8909_groups),
+ .ngpios = 113,
+ .wakeirq_map = msm8909_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(msm8909_mpm_map),
+};
+
+static int msm8909_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8909_pinctrl);
+}
+
+static const struct of_device_id msm8909_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8909-tlmm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8909_pinctrl_of_match);
+
+static struct platform_driver msm8909_pinctrl_driver = {
+ .driver = {
+ .name = "msm8909-pinctrl",
+ .of_match_table = msm8909_pinctrl_of_match,
+ },
+ .probe = msm8909_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8909_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8909_pinctrl_driver);
+}
+arch_initcall(msm8909_pinctrl_init);
+
+static void __exit msm8909_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8909_pinctrl_driver);
+}
+module_exit(msm8909_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm MSM8909 TLMM pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index 396db12ae904..bf68913ba821 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -844,8 +844,8 @@ static const struct msm_pingroup msm8916_groups[] = {
PINGROUP(28, pwr_modem_enabled_a, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(29, cci_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(30, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
- PINGROUP(31, cci_timer0, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(32, cci_timer1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, cci_timer0, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, cci_timer1, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(33, cci_async, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(34, pwr_nav_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(35, pwr_crypto_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
index 2add9a4520c2..d615b6c55b89 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
@@ -141,7 +141,6 @@ static const struct lpi_pinctrl_variant_data sc7280_lpi_data = {
.ngroups = ARRAY_SIZE(sc7280_groups),
.functions = sc7280_functions,
.nfunctions = ARRAY_SIZE(sc7280_functions),
- .is_clk_optional = true,
};
static const struct of_device_id lpi_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c
new file mode 100644
index 000000000000..1138e683e6f4
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c
@@ -0,0 +1,1544 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x100000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = REG_SIZE * id + 0x4, \
+ .intr_cfg_reg = REG_SIZE * id + 0x8, \
+ .intr_status_reg = REG_SIZE * id + 0xc, \
+ .intr_target_reg = REG_SIZE * id + 0x8, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm6375_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "UFS_RESET"),
+ PINCTRL_PIN(157, "SDC1_RCLK"),
+ PINCTRL_PIN(158, "SDC1_CLK"),
+ PINCTRL_PIN(159, "SDC1_CMD"),
+ PINCTRL_PIN(160, "SDC1_DATA"),
+ PINCTRL_PIN(161, "SDC2_CLK"),
+ PINCTRL_PIN(162, "SDC2_CMD"),
+ PINCTRL_PIN(163, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+
+
+static const unsigned int sdc1_rclk_pins[] = { 157 };
+static const unsigned int sdc1_clk_pins[] = { 158 };
+static const unsigned int sdc1_cmd_pins[] = { 159 };
+static const unsigned int sdc1_data_pins[] = { 160 };
+static const unsigned int sdc2_clk_pins[] = { 161 };
+static const unsigned int sdc2_cmd_pins[] = { 162 };
+static const unsigned int sdc2_data_pins[] = { 163 };
+static const unsigned int ufs_reset_pins[] = { 156 };
+
+enum sm6375_functions {
+ msm_mux_adsp_ext,
+ msm_mux_agera_pll,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_tsens,
+ msm_mux_atest_tsens2,
+ msm_mux_atest_usb1,
+ msm_mux_atest_usb10,
+ msm_mux_atest_usb11,
+ msm_mux_atest_usb12,
+ msm_mux_atest_usb13,
+ msm_mux_atest_usb2,
+ msm_mux_atest_usb20,
+ msm_mux_atest_usb21,
+ msm_mux_atest_usb22,
+ msm_mux_atest_usb23,
+ msm_mux_audio_ref,
+ msm_mux_btfm_slimbus,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_dp_hot,
+ msm_mux_edp_lcd,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gp_pdm0,
+ msm_mux_gp_pdm1,
+ msm_mux_gp_pdm2,
+ msm_mux_gpio,
+ msm_mux_gps_tx,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_lpass_ext,
+ msm_mux_m_voc,
+ msm_mux_mclk,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mi2s_0,
+ msm_mux_mi2s_1,
+ msm_mux_mi2s_2,
+ msm_mux_mss_lte,
+ msm_mux_nav_gpio,
+ msm_mux_nav_pps,
+ msm_mux_pa_indicator,
+ msm_mux_phase_flag0,
+ msm_mux_phase_flag1,
+ msm_mux_phase_flag10,
+ msm_mux_phase_flag11,
+ msm_mux_phase_flag12,
+ msm_mux_phase_flag13,
+ msm_mux_phase_flag14,
+ msm_mux_phase_flag15,
+ msm_mux_phase_flag16,
+ msm_mux_phase_flag17,
+ msm_mux_phase_flag18,
+ msm_mux_phase_flag19,
+ msm_mux_phase_flag2,
+ msm_mux_phase_flag20,
+ msm_mux_phase_flag21,
+ msm_mux_phase_flag22,
+ msm_mux_phase_flag23,
+ msm_mux_phase_flag24,
+ msm_mux_phase_flag25,
+ msm_mux_phase_flag26,
+ msm_mux_phase_flag27,
+ msm_mux_phase_flag28,
+ msm_mux_phase_flag29,
+ msm_mux_phase_flag3,
+ msm_mux_phase_flag30,
+ msm_mux_phase_flag31,
+ msm_mux_phase_flag4,
+ msm_mux_phase_flag5,
+ msm_mux_phase_flag6,
+ msm_mux_phase_flag7,
+ msm_mux_phase_flag8,
+ msm_mux_phase_flag9,
+ msm_mux_pll_bist,
+ msm_mux_pll_bypassnl,
+ msm_mux_pll_clk,
+ msm_mux_pll_reset,
+ msm_mux_prng_rosc0,
+ msm_mux_prng_rosc1,
+ msm_mux_prng_rosc2,
+ msm_mux_prng_rosc3,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qdss_gpio0,
+ msm_mux_qdss_gpio1,
+ msm_mux_qdss_gpio10,
+ msm_mux_qdss_gpio11,
+ msm_mux_qdss_gpio12,
+ msm_mux_qdss_gpio13,
+ msm_mux_qdss_gpio14,
+ msm_mux_qdss_gpio15,
+ msm_mux_qdss_gpio2,
+ msm_mux_qdss_gpio3,
+ msm_mux_qdss_gpio4,
+ msm_mux_qdss_gpio5,
+ msm_mux_qdss_gpio6,
+ msm_mux_qdss_gpio7,
+ msm_mux_qdss_gpio8,
+ msm_mux_qdss_gpio9,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qup00,
+ msm_mux_qup01,
+ msm_mux_qup02,
+ msm_mux_qup10,
+ msm_mux_qup11_f1,
+ msm_mux_qup11_f2,
+ msm_mux_qup12,
+ msm_mux_qup13_f1,
+ msm_mux_qup13_f2,
+ msm_mux_qup14,
+ msm_mux_sd_write,
+ msm_mux_sdc1_tb,
+ msm_mux_sdc2_tb,
+ msm_mux_sp_cmu,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_phy,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux_wlan1_adc0,
+ msm_mux_wlan1_adc1,
+ msm_mux_wlan2_adc0,
+ msm_mux_wlan2_adc1,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
+ "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50",
+ "gpio51", "gpio52", "gpio53", "gpio56", "gpio57", "gpio58", "gpio59",
+ "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68", "gpio69", "gpio75", "gpio76", "gpio77", "gpio78",
+ "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", "gpio85",
+ "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", "gpio92",
+ "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99",
+ "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105",
+ "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", "gpio111",
+ "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", "gpio117",
+ "gpio118", "gpio119", "gpio120", "gpio124", "gpio125", "gpio126",
+ "gpio127", "gpio128", "gpio129", "gpio130", "gpio131", "gpio132",
+ "gpio133", "gpio134", "gpio135", "gpio136", "gpio141", "gpio142",
+ "gpio143", "gpio150", "gpio151", "gpio152", "gpio153", "gpio154",
+ "gpio155",
+};
+static const char * const agera_pll_groups[] = {
+ "gpio89",
+};
+static const char * const cci_async_groups[] = {
+ "gpio35", "gpio36", "gpio48", "gpio52", "gpio53",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio2", "gpio3", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44",
+};
+static const char * const gps_tx_groups[] = {
+ "gpio101", "gpio102", "gpio107", "gpio108",
+};
+static const char * const gp_pdm0_groups[] = {
+ "gpio37", "gpio68",
+};
+static const char * const gp_pdm1_groups[] = {
+ "gpio8", "gpio52",
+};
+static const char * const gp_pdm2_groups[] = {
+ "gpio57",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio90",
+};
+static const char * const mclk_groups[] = {
+ "gpio93",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio6", "gpio23", "gpio24", "gpio27", "gpio28",
+};
+static const char * const mss_lte_groups[] = {
+ "gpio65", "gpio66",
+};
+static const char * const nav_pps_groups[] = {
+ "gpio101", "gpio101", "gpio102", "gpio102",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio27",
+};
+static const char * const qlink0_wmss_groups[] = {
+ "gpio103",
+};
+static const char * const qlink1_wmss_groups[] = {
+ "gpio106",
+};
+static const char * const usb_phy_groups[] = {
+ "gpio124",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio87",
+};
+static const char * const atest_char_groups[] = {
+ "gpio95",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio96",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio97",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio98",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio99",
+};
+static const char * const atest_tsens_groups[] = {
+ "gpio92",
+};
+static const char * const atest_tsens2_groups[] = {
+ "gpio93",
+};
+static const char * const atest_usb1_groups[] = {
+ "gpio83",
+};
+static const char * const atest_usb10_groups[] = {
+ "gpio84",
+};
+static const char * const atest_usb11_groups[] = {
+ "gpio85",
+};
+static const char * const atest_usb12_groups[] = {
+ "gpio86",
+};
+static const char * const atest_usb13_groups[] = {
+ "gpio87",
+};
+static const char * const atest_usb2_groups[] = {
+ "gpio88",
+};
+static const char * const atest_usb20_groups[] = {
+ "gpio89",
+};
+static const char * const atest_usb21_groups[] = {
+ "gpio90",
+};
+static const char * const atest_usb22_groups[] = {
+ "gpio91",
+};
+static const char * const atest_usb23_groups[] = {
+ "gpio92",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio60",
+};
+static const char * const btfm_slimbus_groups[] = {
+ "gpio67", "gpio68", "gpio86", "gpio87",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33",
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio34",
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio35",
+};
+static const char * const cci_timer2_groups[] = {
+ "gpio36",
+};
+static const char * const cci_timer3_groups[] = {
+ "gpio37",
+};
+static const char * const cci_timer4_groups[] = {
+ "gpio38",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio0", "gpio1", "gpio2",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio3",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio19", "gpio20", "gpio21", "gpio22",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio86", "gpio90",
+};
+static const char * const ddr_pxi1_groups[] = {
+ "gpio87", "gpio91",
+};
+static const char * const ddr_pxi2_groups[] = {
+ "gpio88", "gpio92",
+};
+static const char * const ddr_pxi3_groups[] = {
+ "gpio89", "gpio93",
+};
+static const char * const dp_hot_groups[] = {
+ "gpio12", "gpio118",
+};
+static const char * const edp_lcd_groups[] = {
+ "gpio23",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio48", "gpio58",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio21",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio22",
+};
+static const char * const ibi_i3c_groups[] = {
+ "gpio0", "gpio1",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio95",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio96",
+};
+static const char * const lpass_ext_groups[] = {
+ "gpio60", "gpio93",
+};
+static const char * const m_voc_groups[] = {
+ "gpio12",
+};
+static const char * const mdp_vsync0_groups[] = {
+ "gpio47",
+};
+static const char * const mdp_vsync1_groups[] = {
+ "gpio48",
+};
+static const char * const mdp_vsync2_groups[] = {
+ "gpio56",
+};
+static const char * const mdp_vsync3_groups[] = {
+ "gpio57",
+};
+static const char * const mi2s_0_groups[] = {
+ "gpio88", "gpio89", "gpio90", "gpio91",
+};
+static const char * const mi2s_1_groups[] = {
+ "gpio67", "gpio68", "gpio86", "gpio87",
+};
+static const char * const mi2s_2_groups[] = {
+ "gpio60",
+};
+static const char * const nav_gpio_groups[] = {
+ "gpio101", "gpio102",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio118",
+};
+static const char * const phase_flag0_groups[] = {
+ "gpio12",
+};
+static const char * const phase_flag1_groups[] = {
+ "gpio17",
+};
+static const char * const phase_flag10_groups[] = {
+ "gpio41",
+};
+static const char * const phase_flag11_groups[] = {
+ "gpio42",
+};
+static const char * const phase_flag12_groups[] = {
+ "gpio43",
+};
+static const char * const phase_flag13_groups[] = {
+ "gpio44",
+};
+static const char * const phase_flag14_groups[] = {
+ "gpio45",
+};
+static const char * const phase_flag15_groups[] = {
+ "gpio46",
+};
+static const char * const phase_flag16_groups[] = {
+ "gpio47",
+};
+static const char * const phase_flag17_groups[] = {
+ "gpio48",
+};
+static const char * const phase_flag18_groups[] = {
+ "gpio49",
+};
+static const char * const phase_flag19_groups[] = {
+ "gpio50",
+};
+static const char * const phase_flag2_groups[] = {
+ "gpio18",
+};
+static const char * const phase_flag20_groups[] = {
+ "gpio51",
+};
+static const char * const phase_flag21_groups[] = {
+ "gpio52",
+};
+static const char * const phase_flag22_groups[] = {
+ "gpio53",
+};
+static const char * const phase_flag23_groups[] = {
+ "gpio56",
+};
+static const char * const phase_flag24_groups[] = {
+ "gpio57",
+};
+static const char * const phase_flag25_groups[] = {
+ "gpio60",
+};
+static const char * const phase_flag26_groups[] = {
+ "gpio61",
+};
+static const char * const phase_flag27_groups[] = {
+ "gpio62",
+};
+static const char * const phase_flag28_groups[] = {
+ "gpio63",
+};
+static const char * const phase_flag29_groups[] = {
+ "gpio64",
+};
+static const char * const phase_flag3_groups[] = {
+ "gpio34",
+};
+static const char * const phase_flag30_groups[] = {
+ "gpio67",
+};
+static const char * const phase_flag31_groups[] = {
+ "gpio68",
+};
+static const char * const phase_flag4_groups[] = {
+ "gpio35",
+};
+static const char * const phase_flag5_groups[] = {
+ "gpio36",
+};
+static const char * const phase_flag6_groups[] = {
+ "gpio37",
+};
+static const char * const phase_flag7_groups[] = {
+ "gpio38",
+};
+static const char * const phase_flag8_groups[] = {
+ "gpio39",
+};
+static const char * const phase_flag9_groups[] = {
+ "gpio40",
+};
+static const char * const pll_bypassnl_groups[] = {
+ "gpio13",
+};
+static const char * const pll_clk_groups[] = {
+ "gpio98",
+};
+static const char * const pll_reset_groups[] = {
+ "gpio14",
+};
+static const char * const prng_rosc0_groups[] = {
+ "gpio97",
+};
+static const char * const prng_rosc1_groups[] = {
+ "gpio98",
+};
+static const char * const prng_rosc2_groups[] = {
+ "gpio99",
+};
+static const char * const prng_rosc3_groups[] = {
+ "gpio100",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio2", "gpio3", "gpio6", "gpio7", "gpio61", "gpio62", "gpio86",
+ "gpio87",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio8", "gpio9", "gpio63", "gpio64",
+};
+static const char * const qdss_gpio0_groups[] = {
+ "gpio39", "gpio65",
+};
+static const char * const qdss_gpio1_groups[] = {
+ "gpio40", "gpio66",
+};
+static const char * const qdss_gpio10_groups[] = {
+ "gpio50", "gpio56",
+};
+static const char * const qdss_gpio11_groups[] = {
+ "gpio51", "gpio57",
+};
+static const char * const qdss_gpio12_groups[] = {
+ "gpio34", "gpio52",
+};
+static const char * const qdss_gpio13_groups[] = {
+ "gpio35", "gpio53",
+};
+static const char * const qdss_gpio14_groups[] = {
+ "gpio27", "gpio36",
+};
+static const char * const qdss_gpio15_groups[] = {
+ "gpio28", "gpio37",
+};
+static const char * const qdss_gpio2_groups[] = {
+ "gpio38", "gpio41",
+};
+static const char * const qdss_gpio3_groups[] = {
+ "gpio42", "gpio47",
+};
+static const char * const qdss_gpio4_groups[] = {
+ "gpio43", "gpio88",
+};
+static const char * const qdss_gpio5_groups[] = {
+ "gpio44", "gpio89",
+};
+static const char * const qdss_gpio6_groups[] = {
+ "gpio45", "gpio90",
+};
+static const char * const qdss_gpio7_groups[] = {
+ "gpio46", "gpio91",
+};
+static const char * const qdss_gpio8_groups[] = {
+ "gpio48", "gpio92",
+};
+static const char * const qdss_gpio9_groups[] = {
+ "gpio49", "gpio93",
+};
+static const char * const qlink0_enable_groups[] = {
+ "gpio105",
+};
+static const char * const qlink0_request_groups[] = {
+ "gpio104",
+};
+static const char * const qlink1_enable_groups[] = {
+ "gpio108",
+};
+static const char * const qlink1_request_groups[] = {
+ "gpio107",
+};
+static const char * const qup00_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const qup01_groups[] = {
+ "gpio61", "gpio62", "gpio63", "gpio64",
+};
+static const char * const qup02_groups[] = {
+ "gpio45", "gpio46", "gpio48", "gpio56", "gpio57",
+};
+static const char * const qup10_groups[] = {
+ "gpio13", "gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const qup11_f1_groups[] = {
+ "gpio27", "gpio28",
+};
+static const char * const qup11_f2_groups[] = {
+ "gpio27", "gpio28",
+};
+
+static const char * const qup12_groups[] = {
+ "gpio19", "gpio19", "gpio20", "gpio20",
+};
+static const char * const qup13_f1_groups[] = {
+ "gpio25", "gpio26",
+};
+static const char * const qup13_f2_groups[] = {
+ "gpio25", "gpio26",
+};
+static const char * const qup14_groups[] = {
+ "gpio4", "gpio4", "gpio5", "gpio5",
+};
+static const char * const sd_write_groups[] = {
+ "gpio85",
+};
+static const char * const sdc1_tb_groups[] = {
+ "gpio4",
+};
+static const char * const sdc2_tb_groups[] = {
+ "gpio5",
+};
+static const char * const sp_cmu_groups[] = {
+ "gpio3",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio61",
+};
+static const char * const tgu_ch1_groups[] = {
+ "gpio62",
+};
+static const char * const tgu_ch2_groups[] = {
+ "gpio63",
+};
+static const char * const tgu_ch3_groups[] = {
+ "gpio64",
+};
+static const char * const tsense_pwm1_groups[] = {
+ "gpio88",
+};
+static const char * const tsense_pwm2_groups[] = {
+ "gpio88",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio80",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio79",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio82",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio81",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio76",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio75",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio78",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio77",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio47",
+};
+static const char * const vfr_1_groups[] = {
+ "gpio49",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio89",
+};
+static const char * const wlan1_adc0_groups[] = {
+ "gpio90",
+};
+static const char * const wlan1_adc1_groups[] = {
+ "gpio92",
+};
+static const char * const wlan2_adc0_groups[] = {
+ "gpio91",
+};
+static const char * const wlan2_adc1_groups[] = {
+ "gpio93",
+};
+
+static const struct msm_function sm6375_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(agera_pll),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_tsens),
+ FUNCTION(atest_tsens2),
+ FUNCTION(atest_usb1),
+ FUNCTION(atest_usb10),
+ FUNCTION(atest_usb11),
+ FUNCTION(atest_usb12),
+ FUNCTION(atest_usb13),
+ FUNCTION(atest_usb2),
+ FUNCTION(atest_usb20),
+ FUNCTION(atest_usb21),
+ FUNCTION(atest_usb22),
+ FUNCTION(atest_usb23),
+ FUNCTION(audio_ref),
+ FUNCTION(btfm_slimbus),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(cri_trng),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(dp_hot),
+ FUNCTION(edp_lcd),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gp_pdm0),
+ FUNCTION(gp_pdm1),
+ FUNCTION(gp_pdm2),
+ FUNCTION(gpio),
+ FUNCTION(gps_tx),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(lpass_ext),
+ FUNCTION(m_voc),
+ FUNCTION(mclk),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mi2s_0),
+ FUNCTION(mi2s_1),
+ FUNCTION(mi2s_2),
+ FUNCTION(mss_lte),
+ FUNCTION(nav_gpio),
+ FUNCTION(nav_pps),
+ FUNCTION(pa_indicator),
+ FUNCTION(phase_flag0),
+ FUNCTION(phase_flag1),
+ FUNCTION(phase_flag10),
+ FUNCTION(phase_flag11),
+ FUNCTION(phase_flag12),
+ FUNCTION(phase_flag13),
+ FUNCTION(phase_flag14),
+ FUNCTION(phase_flag15),
+ FUNCTION(phase_flag16),
+ FUNCTION(phase_flag17),
+ FUNCTION(phase_flag18),
+ FUNCTION(phase_flag19),
+ FUNCTION(phase_flag2),
+ FUNCTION(phase_flag20),
+ FUNCTION(phase_flag21),
+ FUNCTION(phase_flag22),
+ FUNCTION(phase_flag23),
+ FUNCTION(phase_flag24),
+ FUNCTION(phase_flag25),
+ FUNCTION(phase_flag26),
+ FUNCTION(phase_flag27),
+ FUNCTION(phase_flag28),
+ FUNCTION(phase_flag29),
+ FUNCTION(phase_flag3),
+ FUNCTION(phase_flag30),
+ FUNCTION(phase_flag31),
+ FUNCTION(phase_flag4),
+ FUNCTION(phase_flag5),
+ FUNCTION(phase_flag6),
+ FUNCTION(phase_flag7),
+ FUNCTION(phase_flag8),
+ FUNCTION(phase_flag9),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_bypassnl),
+ FUNCTION(pll_clk),
+ FUNCTION(pll_reset),
+ FUNCTION(prng_rosc0),
+ FUNCTION(prng_rosc1),
+ FUNCTION(prng_rosc2),
+ FUNCTION(prng_rosc3),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qdss_gpio0),
+ FUNCTION(qdss_gpio1),
+ FUNCTION(qdss_gpio10),
+ FUNCTION(qdss_gpio11),
+ FUNCTION(qdss_gpio12),
+ FUNCTION(qdss_gpio13),
+ FUNCTION(qdss_gpio14),
+ FUNCTION(qdss_gpio15),
+ FUNCTION(qdss_gpio2),
+ FUNCTION(qdss_gpio3),
+ FUNCTION(qdss_gpio4),
+ FUNCTION(qdss_gpio5),
+ FUNCTION(qdss_gpio6),
+ FUNCTION(qdss_gpio7),
+ FUNCTION(qdss_gpio8),
+ FUNCTION(qdss_gpio9),
+ FUNCTION(qlink0_enable),
+ FUNCTION(qlink0_request),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_enable),
+ FUNCTION(qlink1_request),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qup00),
+ FUNCTION(qup01),
+ FUNCTION(qup02),
+ FUNCTION(qup10),
+ FUNCTION(qup11_f1),
+ FUNCTION(qup11_f2),
+ FUNCTION(qup12),
+ FUNCTION(qup13_f1),
+ FUNCTION(qup13_f2),
+ FUNCTION(qup14),
+ FUNCTION(sd_write),
+ FUNCTION(sdc1_tb),
+ FUNCTION(sdc2_tb),
+ FUNCTION(sp_cmu),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+ FUNCTION(wlan1_adc0),
+ FUNCTION(wlan1_adc1),
+ FUNCTION(wlan2_adc0),
+ FUNCTION(wlan2_adc1),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm6375_groups[] = {
+ [0] = PINGROUP(0, ibi_i3c, qup00, cri_trng, _, _, _, _, _, _),
+ [1] = PINGROUP(1, ibi_i3c, qup00, cri_trng, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup00, cci_i2c, cri_trng, qdss_cti, _, _, _, _, _),
+ [3] = PINGROUP(3, qup00, cci_i2c, sp_cmu, dbg_out, qdss_cti, _, _, _, _),
+ [4] = PINGROUP(4, qup14, qup14, sdc1_tb, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup14, qup14, sdc2_tb, _, _, _, _, _, _),
+ [6] = PINGROUP(6, mdp_vsync, qdss_cti, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qdss_cti, _, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, gp_pdm1, qdss_gpio, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qdss_gpio, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, _, _, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, _, _, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, m_voc, dp_hot, _, phase_flag0, _, _, _, _, _),
+ [13] = PINGROUP(13, qup10, pll_bypassnl, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup10, pll_reset, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup10, _, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup10, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, _, phase_flag1, qup10, _, _, _, _, _, _),
+ [18] = PINGROUP(18, _, phase_flag2, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, qup12, qup12, ddr_bist, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup12, qup12, ddr_bist, _, _, _, _, _, _),
+ [21] = PINGROUP(21, gcc_gp2, ddr_bist, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, gcc_gp3, ddr_bist, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, mdp_vsync, edp_lcd, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, mdp_vsync, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup13_f1, qup13_f2, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup13_f1, qup13_f2, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup11_f1, qup11_f2, mdp_vsync, pll_bist, _, qdss_gpio14, _, _, _),
+ [28] = PINGROUP(28, qup11_f1, qup11_f2, mdp_vsync, _, qdss_gpio15, _, _, _, _),
+ [29] = PINGROUP(29, cam_mclk, _, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, cam_mclk, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, cam_mclk, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, cam_mclk, _, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, cam_mclk, _, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, cci_timer0, _, phase_flag3, qdss_gpio12, _, _, _, _, _),
+ [35] = PINGROUP(35, cci_timer1, cci_async, _, phase_flag4, qdss_gpio13, _, _, _, _),
+ [36] = PINGROUP(36, cci_timer2, cci_async, _, phase_flag5, qdss_gpio14, _, _, _, _),
+ [37] = PINGROUP(37, cci_timer3, gp_pdm0, _, phase_flag6, qdss_gpio15, _, _, _, _),
+ [38] = PINGROUP(38, cci_timer4, _, phase_flag7, qdss_gpio2, _, _, _, _, _),
+ [39] = PINGROUP(39, cci_i2c, _, phase_flag8, qdss_gpio0, _, _, _, _, _),
+ [40] = PINGROUP(40, cci_i2c, _, phase_flag9, qdss_gpio1, _, _, _, _, _),
+ [41] = PINGROUP(41, cci_i2c, _, phase_flag10, qdss_gpio2, _, _, _, _, _),
+ [42] = PINGROUP(42, cci_i2c, _, phase_flag11, qdss_gpio3, _, _, _, _, _),
+ [43] = PINGROUP(43, cci_i2c, _, phase_flag12, qdss_gpio4, _, _, _, _, _),
+ [44] = PINGROUP(44, cci_i2c, _, phase_flag13, qdss_gpio5, _, _, _, _, _),
+ [45] = PINGROUP(45, qup02, _, phase_flag14, qdss_gpio6, _, _, _, _, _),
+ [46] = PINGROUP(46, qup02, _, phase_flag15, qdss_gpio7, _, _, _, _, _),
+ [47] = PINGROUP(47, mdp_vsync0, _, phase_flag16, qdss_gpio3, _, _, usb2phy_ac, _, _),
+ [48] = PINGROUP(48, cci_async, mdp_vsync1, gcc_gp1, _, phase_flag17, qdss_gpio8, qup02,
+ _, _),
+ [49] = PINGROUP(49, vfr_1, _, phase_flag18, qdss_gpio9, _, _, _, _, _),
+ [50] = PINGROUP(50, _, phase_flag19, qdss_gpio10, _, _, _, _, _, _),
+ [51] = PINGROUP(51, _, phase_flag20, qdss_gpio11, _, _, _, _, _, _),
+ [52] = PINGROUP(52, cci_async, gp_pdm1, _, phase_flag21, qdss_gpio12, _, _, _, _),
+ [53] = PINGROUP(53, cci_async, _, phase_flag22, qdss_gpio13, _, _, _, _, _),
+ [54] = PINGROUP(54, _, _, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, _, _, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup02, mdp_vsync2, _, phase_flag23, qdss_gpio10, _, _, _, _),
+ [57] = PINGROUP(57, qup02, mdp_vsync3, gp_pdm2, _, phase_flag24, qdss_gpio11, _, _, _),
+ [58] = PINGROUP(58, gcc_gp1, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, _, _, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, audio_ref, lpass_ext, mi2s_2, _, phase_flag25, _, _, _, _),
+ [61] = PINGROUP(61, qup01, tgu_ch0, _, phase_flag26, qdss_cti, _, _, _, _),
+ [62] = PINGROUP(62, qup01, tgu_ch1, _, phase_flag27, qdss_cti, _, _, _, _),
+ [63] = PINGROUP(63, qup01, tgu_ch2, _, phase_flag28, qdss_gpio, _, _, _, _),
+ [64] = PINGROUP(64, qup01, tgu_ch3, _, phase_flag29, qdss_gpio, _, _, _, _),
+ [65] = PINGROUP(65, mss_lte, _, qdss_gpio0, _, _, _, _, _, _),
+ [66] = PINGROUP(66, mss_lte, _, qdss_gpio1, _, _, _, _, _, _),
+ [67] = PINGROUP(67, btfm_slimbus, mi2s_1, _, phase_flag30, _, _, _, _, _),
+ [68] = PINGROUP(68, btfm_slimbus, mi2s_1, gp_pdm0, _, phase_flag31, _, _, _, _),
+ [69] = PINGROUP(69, _, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, _, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, _, _, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, _, _, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, _, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, _, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, uim2_data, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, uim2_clk, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, uim2_reset, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, uim2_present, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, uim1_data, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, uim1_clk, _, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, uim1_reset, _, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, uim1_present, _, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, atest_usb1, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, _, atest_usb10, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, sd_write, _, atest_usb11, _, _, _, _, _, _),
+ [86] = PINGROUP(86, btfm_slimbus, mi2s_1, _, qdss_cti, atest_usb12, ddr_pxi0, _, _, _),
+ [87] = PINGROUP(87, btfm_slimbus, mi2s_1, adsp_ext, _, qdss_cti, atest_usb13, ddr_pxi1, _,
+ _),
+ [88] = PINGROUP(88, mi2s_0, _, qdss_gpio4, _, atest_usb2, ddr_pxi2, tsense_pwm1,
+ tsense_pwm2, _),
+ [89] = PINGROUP(89, mi2s_0, agera_pll, _, qdss_gpio5, _, vsense_trigger, atest_usb20,
+ ddr_pxi3, _),
+ [90] = PINGROUP(90, mi2s_0, jitter_bist, _, qdss_gpio6, _, wlan1_adc0, atest_usb21,
+ ddr_pxi0, _),
+ [91] = PINGROUP(91, mi2s_0, _, qdss_gpio7, _, wlan2_adc0, atest_usb22, ddr_pxi1, _, _),
+ [92] = PINGROUP(92, _, qdss_gpio8, atest_tsens, wlan1_adc1, atest_usb23, ddr_pxi2, _, _,
+ _),
+ [93] = PINGROUP(93, mclk, lpass_ext, _, qdss_gpio9, atest_tsens2, wlan2_adc1, ddr_pxi3,
+ _, _),
+ [94] = PINGROUP(94, _, _, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, ldo_en, _, atest_char, _, _, _, _, _, _),
+ [96] = PINGROUP(96, ldo_update, _, atest_char0, _, _, _, _, _, _),
+ [97] = PINGROUP(97, prng_rosc0, _, atest_char1, _, _, _, _, _, _),
+ [98] = PINGROUP(98, _, atest_char2, _, _, prng_rosc1, pll_clk, _, _, _),
+ [99] = PINGROUP(99, _, atest_char3, _, _, prng_rosc2, _, _, _, _),
+ [100] = PINGROUP(100, _, _, prng_rosc3, _, _, _, _, _, _),
+ [101] = PINGROUP(101, nav_gpio, nav_pps, nav_pps, gps_tx, _, _, _, _, _),
+ [102] = PINGROUP(102, nav_gpio, nav_pps, nav_pps, gps_tx, _, _, _, _, _),
+ [103] = PINGROUP(103, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, qlink0_request, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, qlink0_enable, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, qlink1_request, gps_tx, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, qlink1_enable, gps_tx, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, _, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, _, _, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, _, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, _, _, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, _, _, pa_indicator, dp_hot, _, _, _, _, _),
+ [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, _, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, _, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, usb_phy, _, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, _, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, _, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, _, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, _, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, _, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, _, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, _, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, _, _, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, _, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, _, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, _, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, _, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, _, _, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, _, _, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, _, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, _, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _),
+ [156] = UFS_RESET(ufs_reset, 0x1ae000),
+ [157] = SDC_PINGROUP(sdc1_rclk, 0x1a1000, 0, 0),
+ [158] = SDC_PINGROUP(sdc1_clk, 0x1a0000, 13, 6),
+ [159] = SDC_PINGROUP(sdc1_cmd, 0x1a0000, 11, 3),
+ [160] = SDC_PINGROUP(sdc1_data, 0x1a0000, 9, 0),
+ [161] = SDC_PINGROUP(sdc2_clk, 0x1a2000, 14, 6),
+ [162] = SDC_PINGROUP(sdc2_cmd, 0x1a2000, 11, 3),
+ [163] = SDC_PINGROUP(sdc2_data, 0x1a2000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm6375_mpm_map[] = {
+ { 0, 84 }, { 3, 6 }, { 4, 7 }, { 7, 8 }, { 8, 9 }, { 9, 10 }, { 11, 11 }, { 12, 13 },
+ { 13, 14 }, { 16, 16 }, { 17, 17 }, { 18, 18 }, { 19, 19 }, { 21, 20 }, { 22, 21 },
+ { 23, 23 }, { 24, 24 }, { 25, 25 }, { 27, 26 }, { 28, 27 }, { 37, 28 }, { 38, 29 },
+ { 48, 30 }, { 50, 31 }, { 51, 32 }, { 52, 33 }, { 57, 34 }, { 59, 35 }, { 60, 37 },
+ { 61, 38 }, { 62, 39 }, { 64, 40 }, { 66, 41 }, { 67, 42 }, { 68, 43 }, { 69, 44 },
+ { 78, 45 }, { 82, 36 }, { 83, 47 }, { 84, 48 }, { 85, 49 }, { 87, 50 }, { 88, 51 },
+ { 91, 52 }, { 94, 53 }, { 95, 54 }, { 96, 55 }, { 97, 56 }, { 98, 57 }, { 99, 58 },
+ { 100, 59 }, { 104, 60 }, { 107, 61 }, { 118, 62 }, { 124, 63 }, { 125, 64 }, { 126, 65 },
+ { 128, 66 }, { 129, 67 }, { 131, 69 }, { 133, 70 }, { 134, 71 }, { 136, 73 }, { 142, 74 },
+ { 150, 75 }, { 153, 76 }, { 155, 77 },
+};
+
+static const struct msm_pinctrl_soc_data sm6375_tlmm = {
+ .pins = sm6375_pins,
+ .npins = ARRAY_SIZE(sm6375_pins),
+ .functions = sm6375_functions,
+ .nfunctions = ARRAY_SIZE(sm6375_functions),
+ .groups = sm6375_groups,
+ .ngroups = ARRAY_SIZE(sm6375_groups),
+ .ngpios = 157,
+ .wakeirq_map = sm6375_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(sm6375_mpm_map),
+};
+
+static int sm6375_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm6375_tlmm);
+}
+
+static const struct of_device_id sm6375_tlmm_of_match[] = {
+ { .compatible = "qcom,sm6375-tlmm", },
+ { },
+};
+
+static struct platform_driver sm6375_tlmm_driver = {
+ .driver = {
+ .name = "sm6375-tlmm",
+ .of_match_table = sm6375_tlmm_of_match,
+ },
+ .probe = sm6375_tlmm_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm6375_tlmm_init(void)
+{
+ return platform_driver_register(&sm6375_tlmm_driver);
+}
+arch_initcall(sm6375_tlmm_init);
+
+static void __exit sm6375_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm6375_tlmm_driver);
+}
+module_exit(sm6375_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM6375 TLMM driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, sm6375_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index af144e724bd9..3bd7f9fedcc3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1316,7 +1316,7 @@ static const struct msm_pingroup sm8250_groups[] = {
static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = {
{ 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 },
{ 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 },
- { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 },
+ { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 73 },
{ 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 },
{ 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 },
{ 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 },
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 3be2a08ae3a6..ccaf40a9c0e6 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1159,6 +1159,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pmc8180c-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm8226-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8350-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8350b-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8350c-gpio", .data = (void *) 9 },
@@ -1175,6 +1176,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pmi8998-gpio", .data = (void *) 14 },
{ .compatible = "qcom,pmk8350-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmm8155au-gpio", .data = (void *) 10 },
+ /* pmp8074 has 12 GPIOs with holes on 1 and 12 */
+ { .compatible = "qcom,pmp8074-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pmr735a-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmr735b-gpio", .data = (void *) 4 },
/* pms405 has 12 GPIOs with holes on 1, 9, and 10 */
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 961007ce7b3a..0903a0a41831 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -38,7 +38,9 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A77995 if ARCH_R8A77995
select PINCTRL_PFC_R8A779A0 if ARCH_R8A779A0
select PINCTRL_PFC_R8A779F0 if ARCH_R8A779F0
+ select PINCTRL_PFC_R8A779G0 if ARCH_R8A779G0
select PINCTRL_RZG2L if ARCH_RZG2L
+ select PINCTRL_RZV2M if ARCH_R9A09G011
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
@@ -153,6 +155,10 @@ config PINCTRL_PFC_R8A779A0
bool "pin control support for R-Car V3U" if COMPILE_TEST
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A779G0
+ bool "pin control support for R-Car V4H" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7740
bool "pin control support for R-Mobile A1" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
@@ -237,6 +243,18 @@ config PINCTRL_RZN1
help
This selects pinctrl driver for Renesas RZ/N1 devices.
+config PINCTRL_RZV2M
+ bool "pin control support for RZ/V2M"
+ depends on OF
+ depends on ARCH_R9A09G011 || COMPILE_TEST
+ select GPIOLIB
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ help
+ This selects GPIO and pinctrl driver for Renesas RZ/V2M
+ platforms.
+
config PINCTRL_PFC_SH7203
bool "pin control support for SH7203" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
diff --git a/drivers/pinctrl/renesas/Makefile b/drivers/pinctrl/renesas/Makefile
index 5d936c154a6f..558b30ce0dec 100644
--- a/drivers/pinctrl/renesas/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A77990) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A77995) += pfc-r8a77995.o
obj-$(CONFIG_PINCTRL_PFC_R8A779A0) += pfc-r8a779a0.o
obj-$(CONFIG_PINCTRL_PFC_R8A779F0) += pfc-r8a779f0.o
+obj-$(CONFIG_PINCTRL_PFC_R8A779G0) += pfc-r8a779g0.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
@@ -49,6 +50,7 @@ obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
obj-$(CONFIG_PINCTRL_RZA2) += pinctrl-rza2.o
obj-$(CONFIG_PINCTRL_RZG2L) += pinctrl-rzg2l.o
obj-$(CONFIG_PINCTRL_RZN1) += pinctrl-rzn1.o
+obj-$(CONFIG_PINCTRL_RZV2M) += pinctrl-rzv2m.o
ifeq ($(CONFIG_COMPILE_TEST),y)
CFLAGS_pfc-sh7203.o += -I$(srctree)/arch/sh/include/cpu-sh2a
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 8c14b2021bf0..c91102d3f1d1 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -644,6 +644,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a779f0_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A779G0
+ {
+ .compatible = "renesas,pfc-r8a779g0",
+ .data = &r8a779g0_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_SH73A0
{
.compatible = "renesas,pfc-sh73a0",
diff --git a/drivers/pinctrl/renesas/pfc-r8a779f0.c b/drivers/pinctrl/renesas/pfc-r8a779f0.c
index aaca4ee2af55..417c357f16b1 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779f0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779f0.c
@@ -1902,7 +1902,6 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
enum ioctrl_regs {
POC0,
POC1,
- POC2,
POC3,
TD0SEL1,
};
@@ -1910,7 +1909,6 @@ enum ioctrl_regs {
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
[POC0] = { 0xe60500a0, },
[POC1] = { 0xe60508a0, },
- [POC2] = { 0xe60510a0, },
[POC3] = { 0xe60518a0, },
[TD0SEL1] = { 0xe6050920, },
{ /* sentinel */ },
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
new file mode 100644
index 000000000000..5dd1c2c7708a
--- /dev/null
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -0,0 +1,4262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A779A0 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a779a0.c
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+#define CPU_ALL_GP(fn, sfx) \
+ PORT_GP_CFG_19(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_23(1, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(1, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 27, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 28, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_20(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_13(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 16, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 17, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 21, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 22, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 27, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 28, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 29, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_25(4, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(5, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(6, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(7, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_14(8, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33)
+
+/* GPSR0 */
+#define GPSR0_18 F_(MSIOF2_RXD, IP2SR0_11_8)
+#define GPSR0_17 F_(MSIOF2_SCK, IP2SR0_7_4)
+#define GPSR0_16 F_(MSIOF2_TXD, IP2SR0_3_0)
+#define GPSR0_15 F_(MSIOF2_SYNC, IP1SR0_31_28)
+#define GPSR0_14 F_(MSIOF2_SS1, IP1SR0_27_24)
+#define GPSR0_13 F_(MSIOF2_SS2, IP1SR0_23_20)
+#define GPSR0_12 F_(MSIOF5_RXD, IP1SR0_19_16)
+#define GPSR0_11 F_(MSIOF5_SCK, IP1SR0_15_12)
+#define GPSR0_10 F_(MSIOF5_TXD, IP1SR0_11_8)
+#define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4)
+#define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0)
+#define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28)
+#define GPSR0_6 F_(IRQ0, IP0SR0_27_24)
+#define GPSR0_5 F_(IRQ1, IP0SR0_23_20)
+#define GPSR0_4 F_(IRQ2, IP0SR0_19_16)
+#define GPSR0_3 F_(IRQ3, IP0SR0_15_12)
+#define GPSR0_2 F_(GP0_02, IP0SR0_11_8)
+#define GPSR0_1 F_(GP0_01, IP0SR0_7_4)
+#define GPSR0_0 F_(GP0_00, IP0SR0_3_0)
+
+/* GPSR1 */
+#define GPSR1_28 F_(HTX3, IP3SR1_19_16)
+#define GPSR1_27 F_(HCTS3_N, IP3SR1_15_12)
+#define GPSR1_26 F_(HRTS3_N, IP3SR1_11_8)
+#define GPSR1_25 F_(HSCK3, IP3SR1_7_4)
+#define GPSR1_24 F_(HRX3, IP3SR1_3_0)
+#define GPSR1_23 F_(GP1_23, IP2SR1_31_28)
+#define GPSR1_22 F_(AUDIO_CLKIN, IP2SR1_27_24)
+#define GPSR1_21 F_(AUDIO_CLKOUT, IP2SR1_23_20)
+#define GPSR1_20 F_(SSI_SD, IP2SR1_19_16)
+#define GPSR1_19 F_(SSI_WS, IP2SR1_15_12)
+#define GPSR1_18 F_(SSI_SCK, IP2SR1_11_8)
+#define GPSR1_17 F_(SCIF_CLK, IP2SR1_7_4)
+#define GPSR1_16 F_(HRX0, IP2SR1_3_0)
+#define GPSR1_15 F_(HSCK0, IP1SR1_31_28)
+#define GPSR1_14 F_(HRTS0_N, IP1SR1_27_24)
+#define GPSR1_13 F_(HCTS0_N, IP1SR1_23_20)
+#define GPSR1_12 F_(HTX0, IP1SR1_19_16)
+#define GPSR1_11 F_(MSIOF0_RXD, IP1SR1_15_12)
+#define GPSR1_10 F_(MSIOF0_SCK, IP1SR1_11_8)
+#define GPSR1_9 F_(MSIOF0_TXD, IP1SR1_7_4)
+#define GPSR1_8 F_(MSIOF0_SYNC, IP1SR1_3_0)
+#define GPSR1_7 F_(MSIOF0_SS1, IP0SR1_31_28)
+#define GPSR1_6 F_(MSIOF0_SS2, IP0SR1_27_24)
+#define GPSR1_5 F_(MSIOF1_RXD, IP0SR1_23_20)
+#define GPSR1_4 F_(MSIOF1_TXD, IP0SR1_19_16)
+#define GPSR1_3 F_(MSIOF1_SCK, IP0SR1_15_12)
+#define GPSR1_2 F_(MSIOF1_SYNC, IP0SR1_11_8)
+#define GPSR1_1 F_(MSIOF1_SS1, IP0SR1_7_4)
+#define GPSR1_0 F_(MSIOF1_SS2, IP0SR1_3_0)
+
+/* GPSR2 */
+#define GPSR2_19 F_(CANFD7_RX, IP2SR2_15_12)
+#define GPSR2_18 F_(CANFD7_TX, IP2SR2_11_8)
+#define GPSR2_17 F_(CANFD4_RX, IP2SR2_7_4)
+#define GPSR2_16 F_(CANFD4_TX, IP2SR2_3_0)
+#define GPSR2_15 F_(CANFD3_RX, IP1SR2_31_28)
+#define GPSR2_14 F_(CANFD3_TX, IP1SR2_27_24)
+#define GPSR2_13 F_(CANFD2_RX, IP1SR2_23_20)
+#define GPSR2_12 F_(CANFD2_TX, IP1SR2_19_16)
+#define GPSR2_11 F_(CANFD0_RX, IP1SR2_15_12)
+#define GPSR2_10 F_(CANFD0_TX, IP1SR2_11_8)
+#define GPSR2_9 F_(CAN_CLK, IP1SR2_7_4)
+#define GPSR2_8 F_(TPU0TO0, IP1SR2_3_0)
+#define GPSR2_7 F_(TPU0TO1, IP0SR2_31_28)
+#define GPSR2_6 F_(FXR_TXDB, IP0SR2_27_24)
+#define GPSR2_5 F_(FXR_TXENB_N, IP0SR2_23_20)
+#define GPSR2_4 F_(RXDB_EXTFXR, IP0SR2_19_16)
+#define GPSR2_3 F_(CLK_EXTFXR, IP0SR2_15_12)
+#define GPSR2_2 F_(RXDA_EXTFXR, IP0SR2_11_8)
+#define GPSR2_1 F_(FXR_TXENA_N, IP0SR2_7_4)
+#define GPSR2_0 F_(FXR_TXDA, IP0SR2_3_0)
+
+/* GPSR3 */
+#define GPSR3_29 F_(RPC_INT_N, IP3SR3_23_20)
+#define GPSR3_28 F_(RPC_WP_N, IP3SR3_19_16)
+#define GPSR3_27 F_(RPC_RESET_N, IP3SR3_15_12)
+#define GPSR3_26 F_(QSPI1_IO3, IP3SR3_11_8)
+#define GPSR3_25 F_(QSPI1_SSL, IP3SR3_7_4)
+#define GPSR3_24 F_(QSPI1_IO2, IP3SR3_3_0)
+#define GPSR3_23 F_(QSPI1_MISO_IO1, IP2SR3_31_28)
+#define GPSR3_22 F_(QSPI1_SPCLK, IP2SR3_27_24)
+#define GPSR3_21 F_(QSPI1_MOSI_IO0, IP2SR3_23_20)
+#define GPSR3_20 F_(QSPI0_SPCLK, IP2SR3_19_16)
+#define GPSR3_19 F_(QSPI0_MOSI_IO0, IP2SR3_15_12)
+#define GPSR3_18 F_(QSPI0_MISO_IO1, IP2SR3_11_8)
+#define GPSR3_17 F_(QSPI0_IO2, IP2SR3_7_4)
+#define GPSR3_16 F_(QSPI0_IO3, IP2SR3_3_0)
+#define GPSR3_15 F_(QSPI0_SSL, IP1SR3_31_28)
+#define GPSR3_14 F_(IPC_CLKOUT, IP1SR3_27_24)
+#define GPSR3_13 F_(IPC_CLKIN, IP1SR3_23_20)
+#define GPSR3_12 F_(SD_WP, IP1SR3_19_16)
+#define GPSR3_11 F_(SD_CD, IP1SR3_15_12)
+#define GPSR3_10 F_(MMC_SD_CMD, IP1SR3_11_8)
+#define GPSR3_9 F_(MMC_D6, IP1SR3_7_4)
+#define GPSR3_8 F_(MMC_D7, IP1SR3_3_0)
+#define GPSR3_7 F_(MMC_D4, IP0SR3_31_28)
+#define GPSR3_6 F_(MMC_D5, IP0SR3_27_24)
+#define GPSR3_5 F_(MMC_SD_D3, IP0SR3_23_20)
+#define GPSR3_4 F_(MMC_DS, IP0SR3_19_16)
+#define GPSR3_3 F_(MMC_SD_CLK, IP0SR3_15_12)
+#define GPSR3_2 F_(MMC_SD_D2, IP0SR3_11_8)
+#define GPSR3_1 F_(MMC_SD_D0, IP0SR3_7_4)
+#define GPSR3_0 F_(MMC_SD_D1, IP0SR3_3_0)
+
+/* GPSR4 */
+#define GPSR4_24 FM(AVS1)
+#define GPSR4_23 FM(AVS0)
+#define GPSR4_22 FM(PCIE1_CLKREQ_N)
+#define GPSR4_21 FM(PCIE0_CLKREQ_N)
+#define GPSR4_20 FM(TSN0_TXCREFCLK)
+#define GPSR4_19 FM(TSN0_TD2)
+#define GPSR4_18 FM(TSN0_TD3)
+#define GPSR4_17 FM(TSN0_RD2)
+#define GPSR4_16 FM(TSN0_RD3)
+#define GPSR4_15 FM(TSN0_TD0)
+#define GPSR4_14 FM(TSN0_TD1)
+#define GPSR4_13 FM(TSN0_RD1)
+#define GPSR4_12 FM(TSN0_TXC)
+#define GPSR4_11 FM(TSN0_RXC)
+#define GPSR4_10 FM(TSN0_RD0)
+#define GPSR4_9 FM(TSN0_TX_CTL)
+#define GPSR4_8 FM(TSN0_AVTP_PPS0)
+#define GPSR4_7 FM(TSN0_RX_CTL)
+#define GPSR4_6 FM(TSN0_AVTP_CAPTURE)
+#define GPSR4_5 FM(TSN0_AVTP_MATCH)
+#define GPSR4_4 FM(TSN0_LINK)
+#define GPSR4_3 FM(TSN0_PHY_INT)
+#define GPSR4_2 FM(TSN0_AVTP_PPS1)
+#define GPSR4_1 FM(TSN0_MDC)
+#define GPSR4_0 FM(TSN0_MDIO)
+
+/* GPSR 5 */
+#define GPSR5_20 FM(AVB2_RX_CTL)
+#define GPSR5_19 FM(AVB2_TX_CTL)
+#define GPSR5_18 FM(AVB2_RXC)
+#define GPSR5_17 FM(AVB2_RD0)
+#define GPSR5_16 FM(AVB2_TXC)
+#define GPSR5_15 FM(AVB2_TD0)
+#define GPSR5_14 FM(AVB2_RD1)
+#define GPSR5_13 FM(AVB2_RD2)
+#define GPSR5_12 FM(AVB2_TD1)
+#define GPSR5_11 FM(AVB2_TD2)
+#define GPSR5_10 FM(AVB2_MDIO)
+#define GPSR5_9 FM(AVB2_RD3)
+#define GPSR5_8 FM(AVB2_TD3)
+#define GPSR5_7 FM(AVB2_TXCREFCLK)
+#define GPSR5_6 FM(AVB2_MDC)
+#define GPSR5_5 FM(AVB2_MAGIC)
+#define GPSR5_4 FM(AVB2_PHY_INT)
+#define GPSR5_3 FM(AVB2_LINK)
+#define GPSR5_2 FM(AVB2_AVTP_MATCH)
+#define GPSR5_1 FM(AVB2_AVTP_CAPTURE)
+#define GPSR5_0 FM(AVB2_AVTP_PPS)
+
+/* GPSR 6 */
+#define GPSR6_20 F_(AVB1_TXCREFCLK, IP2SR6_19_16)
+#define GPSR6_19 F_(AVB1_RD3, IP2SR6_15_12)
+#define GPSR6_18 F_(AVB1_TD3, IP2SR6_11_8)
+#define GPSR6_17 F_(AVB1_RD2, IP2SR6_7_4)
+#define GPSR6_16 F_(AVB1_TD2, IP2SR6_3_0)
+#define GPSR6_15 F_(AVB1_RD0, IP1SR6_31_28)
+#define GPSR6_14 F_(AVB1_RD1, IP1SR6_27_24)
+#define GPSR6_13 F_(AVB1_TD0, IP1SR6_23_20)
+#define GPSR6_12 F_(AVB1_TD1, IP1SR6_19_16)
+#define GPSR6_11 F_(AVB1_AVTP_CAPTURE, IP1SR6_15_12)
+#define GPSR6_10 F_(AVB1_AVTP_PPS, IP1SR6_11_8)
+#define GPSR6_9 F_(AVB1_RX_CTL, IP1SR6_7_4)
+#define GPSR6_8 F_(AVB1_RXC, IP1SR6_3_0)
+#define GPSR6_7 F_(AVB1_TX_CTL, IP0SR6_31_28)
+#define GPSR6_6 F_(AVB1_TXC, IP0SR6_27_24)
+#define GPSR6_5 F_(AVB1_AVTP_MATCH, IP0SR6_23_20)
+#define GPSR6_4 F_(AVB1_LINK, IP0SR6_19_16)
+#define GPSR6_3 F_(AVB1_PHY_INT, IP0SR6_15_12)
+#define GPSR6_2 F_(AVB1_MDC, IP0SR6_11_8)
+#define GPSR6_1 F_(AVB1_MAGIC, IP0SR6_7_4)
+#define GPSR6_0 F_(AVB1_MDIO, IP0SR6_3_0)
+
+/* GPSR7 */
+#define GPSR7_20 F_(AVB0_RX_CTL, IP2SR7_19_16)
+#define GPSR7_19 F_(AVB0_RXC, IP2SR7_15_12)
+#define GPSR7_18 F_(AVB0_RD0, IP2SR7_11_8)
+#define GPSR7_17 F_(AVB0_RD1, IP2SR7_7_4)
+#define GPSR7_16 F_(AVB0_TX_CTL, IP2SR7_3_0)
+#define GPSR7_15 F_(AVB0_TXC, IP1SR7_31_28)
+#define GPSR7_14 F_(AVB0_MDIO, IP1SR7_27_24)
+#define GPSR7_13 F_(AVB0_MDC, IP1SR7_23_20)
+#define GPSR7_12 F_(AVB0_RD2, IP1SR7_19_16)
+#define GPSR7_11 F_(AVB0_TD0, IP1SR7_15_12)
+#define GPSR7_10 F_(AVB0_MAGIC, IP1SR7_11_8)
+#define GPSR7_9 F_(AVB0_TXCREFCLK, IP1SR7_7_4)
+#define GPSR7_8 F_(AVB0_RD3, IP1SR7_3_0)
+#define GPSR7_7 F_(AVB0_TD1, IP0SR7_31_28)
+#define GPSR7_6 F_(AVB0_TD2, IP0SR7_27_24)
+#define GPSR7_5 F_(AVB0_PHY_INT, IP0SR7_23_20)
+#define GPSR7_4 F_(AVB0_LINK, IP0SR7_19_16)
+#define GPSR7_3 F_(AVB0_TD3, IP0SR7_15_12)
+#define GPSR7_2 F_(AVB0_AVTP_MATCH, IP0SR7_11_8)
+#define GPSR7_1 F_(AVB0_AVTP_CAPTURE, IP0SR7_7_4)
+#define GPSR7_0 F_(AVB0_AVTP_PPS, IP0SR7_3_0)
+
+/* GPSR8 */
+#define GPSR8_13 F_(GP8_13, IP1SR8_23_20)
+#define GPSR8_12 F_(GP8_12, IP1SR8_19_16)
+#define GPSR8_11 F_(SDA5, IP1SR8_15_12)
+#define GPSR8_10 F_(SCL5, IP1SR8_11_8)
+#define GPSR8_9 F_(SDA4, IP1SR8_7_4)
+#define GPSR8_8 F_(SCL4, IP1SR8_3_0)
+#define GPSR8_7 F_(SDA3, IP0SR8_31_28)
+#define GPSR8_6 F_(SCL3, IP0SR8_27_24)
+#define GPSR8_5 F_(SDA2, IP0SR8_23_20)
+#define GPSR8_4 F_(SCL2, IP0SR8_19_16)
+#define GPSR8_3 F_(SDA1, IP0SR8_15_12)
+#define GPSR8_2 F_(SCL1, IP0SR8_11_8)
+#define GPSR8_1 F_(SDA0, IP0SR8_7_4)
+#define GPSR8_0 F_(SCL0, IP0SR8_3_0)
+
+/* SR0 */
+/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_B) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR0_3_0 FM(MSIOF5_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_7_4 FM(MSIOF5_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1) FM(IRQ2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR1 */
+/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_A) FM(TX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_A) FM(RX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_A) FM(RTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_A) FM(CTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_A) FM(SCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_X) FM(TX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_X) FM(RX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_X) FM(CTS1_N_X) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_X) FM(RTS1_N_X) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_X) FM(SCK1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_31_28 F_(0, 0) FM(TCLK2) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR1_3_0 FM(HRX3) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_7_4 FM(HSCK3) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_11_8 FM(HRTS3_N) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_15_12 FM(HCTS3_N) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_19_16 FM(HTX3) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR2 */
+/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_7_4 FM(FXR_TXENA_N) FM(CANFD1_RX) FM(TPU0TO3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_23_20 FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_31_28 FM(TPU0TO1) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR2_3_0 FM(TPU0TO0) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2) F_(0, 0) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1_B) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR2_3_0 FM(CANFD4_TX) F_(0, 0) FM(PWM4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_7_4 FM(CANFD4_RX) F_(0, 0) FM(PWM5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_11_8 FM(CANFD7_TX) F_(0, 0) FM(PWM6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_15_12 FM(CANFD7_RX) F_(0, 0) FM(PWM7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR3 */
+/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR3_3_0 FM(MMC_SD_D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_7_4 FM(MMC_SD_D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_11_8 FM(MMC_SD_D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_15_12 FM(MMC_SD_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_19_16 FM(MMC_DS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_23_20 FM(MMC_SD_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_27_24 FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_31_28 FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR3_3_0 FM(MMC_D7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_7_4 FM(MMC_D6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_A) FM(TCLK4_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR3_3_0 FM(QSPI0_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_7_4 FM(QSPI0_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_11_8 FM(QSPI0_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_15_12 FM(QSPI0_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_19_16 FM(QSPI0_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_23_20 FM(QSPI1_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_27_24 FM(QSPI1_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_31_28 FM(QSPI1_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR3_3_0 FM(QSPI1_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_7_4 FM(QSPI1_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_11_8 FM(QSPI1_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_15_12 FM(RPC_RESET_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_19_16 FM(RPC_WP_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_23_20 FM(RPC_INT_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR6 */
+/* IP0SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR6_3_0 FM(AVB1_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_7_4 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_11_8 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_15_12 FM(AVB1_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_19_16 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_23_20 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_27_24 FM(AVB1_TXC) FM(AVB1_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_31_28 FM(AVB1_TX_CTL) FM(AVB1_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR6_3_0 FM(AVB1_RXC) FM(AVB1_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_7_4 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_11_8 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_19_16 FM(AVB1_TD1) FM(AVB1_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_23_20 FM(AVB1_TD0) FM(AVB1_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_27_24 FM(AVB1_RD1) FM(AVB1_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_31_28 FM(AVB1_RD0) FM(AVB1_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR6_3_0 FM(AVB1_TD2) FM(AVB1_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_7_4 FM(AVB1_RD2) FM(AVB1_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_11_8 FM(AVB1_TD3) FM(AVB1_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_15_12 FM(AVB1_RD3) FM(AVB1_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_19_16 FM(AVB1_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR7 */
+/* IP0SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR7_3_0 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_7_4 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_15_12 FM(AVB0_TD3) FM(AVB0_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_19_16 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_23_20 FM(AVB0_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_27_24 FM(AVB0_TD2) FM(AVB0_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_31_28 FM(AVB0_TD1) FM(AVB0_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR7_3_0 FM(AVB0_RD3) FM(AVB0_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_7_4 FM(AVB0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_11_8 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_15_12 FM(AVB0_TD0) FM(AVB0_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_19_16 FM(AVB0_RD2) FM(AVB0_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_23_20 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_27_24 FM(AVB0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_31_28 FM(AVB0_TXC) FM(AVB0_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR7_3_0 FM(AVB0_TX_CTL) FM(AVB0_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_7_4 FM(AVB0_RD1) FM(AVB0_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_11_8 FM(AVB0_RD0) FM(AVB0_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_15_12 FM(AVB0_RXC) FM(AVB0_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_19_16 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR8 */
+/* IP0SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR8_3_0 FM(SCL0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_7_4 FM(SDA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_11_8 FM(SCL1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_15_12 FM(SDA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_19_16 FM(SCL2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_23_20 FM(SDA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_27_24 FM(SCL3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_31_28 FM(SDA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR8_3_0 FM(SCL4) FM(HRX2) FM(SCK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_7_4 FM(SDA4) FM(HTX2) FM(CTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_11_8 FM(SCL5) FM(HRTS2_N) FM(RTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_15_12 FM(SDA5) FM(SCIF_CLK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_19_16 F_(0, 0) FM(HCTS2_N) FM(TX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_23_20 F_(0, 0) FM(HSCK2) FM(RX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR \
+ GPSR3_29 \
+ GPSR1_28 GPSR3_28 \
+ GPSR1_27 GPSR3_27 \
+ GPSR1_26 GPSR3_26 \
+ GPSR1_25 GPSR3_25 \
+ GPSR1_24 GPSR3_24 GPSR4_24 \
+ GPSR1_23 GPSR3_23 GPSR4_23 \
+ GPSR1_22 GPSR3_22 GPSR4_22 \
+ GPSR1_21 GPSR3_21 GPSR4_21 \
+ GPSR1_20 GPSR3_20 GPSR4_20 GPSR5_20 GPSR6_20 GPSR7_20 \
+ GPSR1_19 GPSR2_19 GPSR3_19 GPSR4_19 GPSR5_19 GPSR6_19 GPSR7_19 \
+GPSR0_18 GPSR1_18 GPSR2_18 GPSR3_18 GPSR4_18 GPSR5_18 GPSR6_18 GPSR7_18 \
+GPSR0_17 GPSR1_17 GPSR2_17 GPSR3_17 GPSR4_17 GPSR5_17 GPSR6_17 GPSR7_17 \
+GPSR0_16 GPSR1_16 GPSR2_16 GPSR3_16 GPSR4_16 GPSR5_16 GPSR6_16 GPSR7_16 \
+GPSR0_15 GPSR1_15 GPSR2_15 GPSR3_15 GPSR4_15 GPSR5_15 GPSR6_15 GPSR7_15 \
+GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR4_14 GPSR5_14 GPSR6_14 GPSR7_14 \
+GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR4_13 GPSR5_13 GPSR6_13 GPSR7_13 GPSR8_13 \
+GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR4_12 GPSR5_12 GPSR6_12 GPSR7_12 GPSR8_12 \
+GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR4_11 GPSR5_11 GPSR6_11 GPSR7_11 GPSR8_11 \
+GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 GPSR7_10 GPSR8_10 \
+GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 GPSR7_9 GPSR8_9 \
+GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 GPSR7_8 GPSR8_8 \
+GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 GPSR7_7 GPSR8_7 \
+GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 GPSR7_6 GPSR8_6 \
+GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 GPSR7_5 GPSR8_5 \
+GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 GPSR7_4 GPSR8_4 \
+GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 GPSR7_3 GPSR8_3 \
+GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 GPSR7_2 GPSR8_2 \
+GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 GPSR7_1 GPSR8_1 \
+GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0 GPSR7_0 GPSR8_0
+
+#define PINMUX_IPSR \
+\
+FM(IP0SR0_3_0) IP0SR0_3_0 FM(IP1SR0_3_0) IP1SR0_3_0 FM(IP2SR0_3_0) IP2SR0_3_0 \
+FM(IP0SR0_7_4) IP0SR0_7_4 FM(IP1SR0_7_4) IP1SR0_7_4 FM(IP2SR0_7_4) IP2SR0_7_4 \
+FM(IP0SR0_11_8) IP0SR0_11_8 FM(IP1SR0_11_8) IP1SR0_11_8 FM(IP2SR0_11_8) IP2SR0_11_8 \
+FM(IP0SR0_15_12) IP0SR0_15_12 FM(IP1SR0_15_12) IP1SR0_15_12 \
+FM(IP0SR0_19_16) IP0SR0_19_16 FM(IP1SR0_19_16) IP1SR0_19_16 \
+FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 \
+FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 \
+FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 \
+\
+FM(IP0SR1_3_0) IP0SR1_3_0 FM(IP1SR1_3_0) IP1SR1_3_0 FM(IP2SR1_3_0) IP2SR1_3_0 FM(IP3SR1_3_0) IP3SR1_3_0 \
+FM(IP0SR1_7_4) IP0SR1_7_4 FM(IP1SR1_7_4) IP1SR1_7_4 FM(IP2SR1_7_4) IP2SR1_7_4 FM(IP3SR1_7_4) IP3SR1_7_4 \
+FM(IP0SR1_11_8) IP0SR1_11_8 FM(IP1SR1_11_8) IP1SR1_11_8 FM(IP2SR1_11_8) IP2SR1_11_8 FM(IP3SR1_11_8) IP3SR1_11_8 \
+FM(IP0SR1_15_12) IP0SR1_15_12 FM(IP1SR1_15_12) IP1SR1_15_12 FM(IP2SR1_15_12) IP2SR1_15_12 FM(IP3SR1_15_12) IP3SR1_15_12 \
+FM(IP0SR1_19_16) IP0SR1_19_16 FM(IP1SR1_19_16) IP1SR1_19_16 FM(IP2SR1_19_16) IP2SR1_19_16 FM(IP3SR1_19_16) IP3SR1_19_16 \
+FM(IP0SR1_23_20) IP0SR1_23_20 FM(IP1SR1_23_20) IP1SR1_23_20 FM(IP2SR1_23_20) IP2SR1_23_20 \
+FM(IP0SR1_27_24) IP0SR1_27_24 FM(IP1SR1_27_24) IP1SR1_27_24 FM(IP2SR1_27_24) IP2SR1_27_24 \
+FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 \
+\
+FM(IP0SR2_3_0) IP0SR2_3_0 FM(IP1SR2_3_0) IP1SR2_3_0 FM(IP2SR2_3_0) IP2SR2_3_0 \
+FM(IP0SR2_7_4) IP0SR2_7_4 FM(IP1SR2_7_4) IP1SR2_7_4 FM(IP2SR2_7_4) IP2SR2_7_4 \
+FM(IP0SR2_11_8) IP0SR2_11_8 FM(IP1SR2_11_8) IP1SR2_11_8 FM(IP2SR2_11_8) IP2SR2_11_8 \
+FM(IP0SR2_15_12) IP0SR2_15_12 FM(IP1SR2_15_12) IP1SR2_15_12 FM(IP2SR2_15_12) IP2SR2_15_12 \
+FM(IP0SR2_19_16) IP0SR2_19_16 FM(IP1SR2_19_16) IP1SR2_19_16 \
+FM(IP0SR2_23_20) IP0SR2_23_20 FM(IP1SR2_23_20) IP1SR2_23_20 \
+FM(IP0SR2_27_24) IP0SR2_27_24 FM(IP1SR2_27_24) IP1SR2_27_24 \
+FM(IP0SR2_31_28) IP0SR2_31_28 FM(IP1SR2_31_28) IP1SR2_31_28 \
+\
+FM(IP0SR3_3_0) IP0SR3_3_0 FM(IP1SR3_3_0) IP1SR3_3_0 FM(IP2SR3_3_0) IP2SR3_3_0 FM(IP3SR3_3_0) IP3SR3_3_0 \
+FM(IP0SR3_7_4) IP0SR3_7_4 FM(IP1SR3_7_4) IP1SR3_7_4 FM(IP2SR3_7_4) IP2SR3_7_4 FM(IP3SR3_7_4) IP3SR3_7_4 \
+FM(IP0SR3_11_8) IP0SR3_11_8 FM(IP1SR3_11_8) IP1SR3_11_8 FM(IP2SR3_11_8) IP2SR3_11_8 FM(IP3SR3_11_8) IP3SR3_11_8 \
+FM(IP0SR3_15_12) IP0SR3_15_12 FM(IP1SR3_15_12) IP1SR3_15_12 FM(IP2SR3_15_12) IP2SR3_15_12 FM(IP3SR3_15_12) IP3SR3_15_12 \
+FM(IP0SR3_19_16) IP0SR3_19_16 FM(IP1SR3_19_16) IP1SR3_19_16 FM(IP2SR3_19_16) IP2SR3_19_16 FM(IP3SR3_19_16) IP3SR3_19_16 \
+FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 FM(IP2SR3_23_20) IP2SR3_23_20 FM(IP3SR3_23_20) IP3SR3_23_20 \
+FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 FM(IP2SR3_27_24) IP2SR3_27_24 \
+FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 FM(IP2SR3_31_28) IP2SR3_31_28 \
+\
+FM(IP0SR6_3_0) IP0SR6_3_0 FM(IP1SR6_3_0) IP1SR6_3_0 FM(IP2SR6_3_0) IP2SR6_3_0 \
+FM(IP0SR6_7_4) IP0SR6_7_4 FM(IP1SR6_7_4) IP1SR6_7_4 FM(IP2SR6_7_4) IP2SR6_7_4 \
+FM(IP0SR6_11_8) IP0SR6_11_8 FM(IP1SR6_11_8) IP1SR6_11_8 FM(IP2SR6_11_8) IP2SR6_11_8 \
+FM(IP0SR6_15_12) IP0SR6_15_12 FM(IP1SR6_15_12) IP1SR6_15_12 FM(IP2SR6_15_12) IP2SR6_15_12 \
+FM(IP0SR6_19_16) IP0SR6_19_16 FM(IP1SR6_19_16) IP1SR6_19_16 FM(IP2SR6_19_16) IP2SR6_19_16 \
+FM(IP0SR6_23_20) IP0SR6_23_20 FM(IP1SR6_23_20) IP1SR6_23_20 \
+FM(IP0SR6_27_24) IP0SR6_27_24 FM(IP1SR6_27_24) IP1SR6_27_24 \
+FM(IP0SR6_31_28) IP0SR6_31_28 FM(IP1SR6_31_28) IP1SR6_31_28 \
+\
+FM(IP0SR7_3_0) IP0SR7_3_0 FM(IP1SR7_3_0) IP1SR7_3_0 FM(IP2SR7_3_0) IP2SR7_3_0 \
+FM(IP0SR7_7_4) IP0SR7_7_4 FM(IP1SR7_7_4) IP1SR7_7_4 FM(IP2SR7_7_4) IP2SR7_7_4 \
+FM(IP0SR7_11_8) IP0SR7_11_8 FM(IP1SR7_11_8) IP1SR7_11_8 FM(IP2SR7_11_8) IP2SR7_11_8 \
+FM(IP0SR7_15_12) IP0SR7_15_12 FM(IP1SR7_15_12) IP1SR7_15_12 FM(IP2SR7_15_12) IP2SR7_15_12 \
+FM(IP0SR7_19_16) IP0SR7_19_16 FM(IP1SR7_19_16) IP1SR7_19_16 FM(IP2SR7_19_16) IP2SR7_19_16 \
+FM(IP0SR7_23_20) IP0SR7_23_20 FM(IP1SR7_23_20) IP1SR7_23_20 \
+FM(IP0SR7_27_24) IP0SR7_27_24 FM(IP1SR7_27_24) IP1SR7_27_24 \
+FM(IP0SR7_31_28) IP0SR7_31_28 FM(IP1SR7_31_28) IP1SR7_31_28 \
+\
+FM(IP0SR8_3_0) IP0SR8_3_0 FM(IP1SR8_3_0) IP1SR8_3_0 \
+FM(IP0SR8_7_4) IP0SR8_7_4 FM(IP1SR8_7_4) IP1SR8_7_4 \
+FM(IP0SR8_11_8) IP0SR8_11_8 FM(IP1SR8_11_8) IP1SR8_11_8 \
+FM(IP0SR8_15_12) IP0SR8_15_12 FM(IP1SR8_15_12) IP1SR8_15_12 \
+FM(IP0SR8_19_16) IP0SR8_19_16 FM(IP1SR8_19_16) IP1SR8_19_16 \
+FM(IP0SR8_23_20) IP0SR8_23_20 FM(IP1SR8_23_20) IP1SR8_23_20 \
+FM(IP0SR8_27_24) IP0SR8_27_24 \
+FM(IP0SR8_31_28) IP0SR8_31_28
+
+/* MOD_SEL4 */ /* 0 */ /* 1 */
+#define MOD_SEL4_19 FM(SEL_TSN0_TD2_0) FM(SEL_TSN0_TD2_1)
+#define MOD_SEL4_18 FM(SEL_TSN0_TD3_0) FM(SEL_TSN0_TD3_1)
+#define MOD_SEL4_15 FM(SEL_TSN0_TD0_0) FM(SEL_TSN0_TD0_1)
+#define MOD_SEL4_14 FM(SEL_TSN0_TD1_0) FM(SEL_TSN0_TD1_1)
+#define MOD_SEL4_12 FM(SEL_TSN0_TXC_0) FM(SEL_TSN0_TXC_1)
+#define MOD_SEL4_9 FM(SEL_TSN0_TX_CTL_0) FM(SEL_TSN0_TX_CTL_1)
+#define MOD_SEL4_8 FM(SEL_TSN0_AVTP_PPS0_0) FM(SEL_TSN0_AVTP_PPS0_1)
+#define MOD_SEL4_5 FM(SEL_TSN0_AVTP_MATCH_0) FM(SEL_TSN0_AVTP_MATCH_1)
+#define MOD_SEL4_2 FM(SEL_TSN0_AVTP_PPS1_0) FM(SEL_TSN0_AVTP_PPS1_1)
+#define MOD_SEL4_1 FM(SEL_TSN0_MDC_0) FM(SEL_TSN0_MDC_1)
+
+/* MOD_SEL5 */ /* 0 */ /* 1 */
+#define MOD_SEL5_19 FM(SEL_AVB2_TX_CTL_0) FM(SEL_AVB2_TX_CTL_1)
+#define MOD_SEL5_16 FM(SEL_AVB2_TXC_0) FM(SEL_AVB2_TXC_1)
+#define MOD_SEL5_15 FM(SEL_AVB2_TD0_0) FM(SEL_AVB2_TD0_1)
+#define MOD_SEL5_12 FM(SEL_AVB2_TD1_0) FM(SEL_AVB2_TD1_1)
+#define MOD_SEL5_11 FM(SEL_AVB2_TD2_0) FM(SEL_AVB2_TD2_1)
+#define MOD_SEL5_8 FM(SEL_AVB2_TD3_0) FM(SEL_AVB2_TD3_1)
+#define MOD_SEL5_6 FM(SEL_AVB2_MDC_0) FM(SEL_AVB2_MDC_1)
+#define MOD_SEL5_5 FM(SEL_AVB2_MAGIC_0) FM(SEL_AVB2_MAGIC_1)
+#define MOD_SEL5_2 FM(SEL_AVB2_AVTP_MATCH_0) FM(SEL_AVB2_AVTP_MATCH_1)
+#define MOD_SEL5_0 FM(SEL_AVB2_AVTP_PPS_0) FM(SEL_AVB2_AVTP_PPS_1)
+
+/* MOD_SEL6 */ /* 0 */ /* 1 */
+#define MOD_SEL6_18 FM(SEL_AVB1_TD3_0) FM(SEL_AVB1_TD3_1)
+#define MOD_SEL6_16 FM(SEL_AVB1_TD2_0) FM(SEL_AVB1_TD2_1)
+#define MOD_SEL6_13 FM(SEL_AVB1_TD0_0) FM(SEL_AVB1_TD0_1)
+#define MOD_SEL6_12 FM(SEL_AVB1_TD1_0) FM(SEL_AVB1_TD1_1)
+#define MOD_SEL6_10 FM(SEL_AVB1_AVTP_PPS_0) FM(SEL_AVB1_AVTP_PPS_1)
+#define MOD_SEL6_7 FM(SEL_AVB1_TX_CTL_0) FM(SEL_AVB1_TX_CTL_1)
+#define MOD_SEL6_6 FM(SEL_AVB1_TXC_0) FM(SEL_AVB1_TXC_1)
+#define MOD_SEL6_5 FM(SEL_AVB1_AVTP_MATCH_0) FM(SEL_AVB1_AVTP_MATCH_1)
+#define MOD_SEL6_2 FM(SEL_AVB1_MDC_0) FM(SEL_AVB1_MDC_1)
+#define MOD_SEL6_1 FM(SEL_AVB1_MAGIC_0) FM(SEL_AVB1_MAGIC_1)
+
+/* MOD_SEL7 */ /* 0 */ /* 1 */
+#define MOD_SEL7_16 FM(SEL_AVB0_TX_CTL_0) FM(SEL_AVB0_TX_CTL_1)
+#define MOD_SEL7_15 FM(SEL_AVB0_TXC_0) FM(SEL_AVB0_TXC_1)
+#define MOD_SEL7_13 FM(SEL_AVB0_MDC_0) FM(SEL_AVB0_MDC_1)
+#define MOD_SEL7_11 FM(SEL_AVB0_TD0_0) FM(SEL_AVB0_TD0_1)
+#define MOD_SEL7_10 FM(SEL_AVB0_MAGIC_0) FM(SEL_AVB0_MAGIC_1)
+#define MOD_SEL7_7 FM(SEL_AVB0_TD1_0) FM(SEL_AVB0_TD1_1)
+#define MOD_SEL7_6 FM(SEL_AVB0_TD2_0) FM(SEL_AVB0_TD2_1)
+#define MOD_SEL7_3 FM(SEL_AVB0_TD3_0) FM(SEL_AVB0_TD3_1)
+#define MOD_SEL7_2 FM(SEL_AVB0_AVTP_MATCH_0) FM(SEL_AVB0_AVTP_MATCH_1)
+#define MOD_SEL7_0 FM(SEL_AVB0_AVTP_PPS_0) FM(SEL_AVB0_AVTP_PPS_1)
+
+/* MOD_SEL8 */ /* 0 */ /* 1 */
+#define MOD_SEL8_11 FM(SEL_SDA5_0) FM(SEL_SDA5_1)
+#define MOD_SEL8_10 FM(SEL_SCL5_0) FM(SEL_SCL5_1)
+#define MOD_SEL8_9 FM(SEL_SDA4_0) FM(SEL_SDA4_1)
+#define MOD_SEL8_8 FM(SEL_SCL4_0) FM(SEL_SCL4_1)
+#define MOD_SEL8_7 FM(SEL_SDA3_0) FM(SEL_SDA3_1)
+#define MOD_SEL8_6 FM(SEL_SCL3_0) FM(SEL_SCL3_1)
+#define MOD_SEL8_5 FM(SEL_SDA2_0) FM(SEL_SDA2_1)
+#define MOD_SEL8_4 FM(SEL_SCL2_0) FM(SEL_SCL2_1)
+#define MOD_SEL8_3 FM(SEL_SDA1_0) FM(SEL_SDA1_1)
+#define MOD_SEL8_2 FM(SEL_SCL1_0) FM(SEL_SCL1_1)
+#define MOD_SEL8_1 FM(SEL_SDA0_0) FM(SEL_SDA0_1)
+#define MOD_SEL8_0 FM(SEL_SCL0_0) FM(SEL_SCL0_1)
+
+#define PINMUX_MOD_SELS \
+\
+MOD_SEL4_19 MOD_SEL5_19 \
+MOD_SEL4_18 MOD_SEL6_18 \
+ \
+ MOD_SEL5_16 MOD_SEL6_16 MOD_SEL7_16 \
+MOD_SEL4_15 MOD_SEL5_15 MOD_SEL7_15 \
+MOD_SEL4_14 \
+ MOD_SEL6_13 MOD_SEL7_13 \
+MOD_SEL4_12 MOD_SEL5_12 MOD_SEL6_12 \
+ MOD_SEL5_11 MOD_SEL7_11 MOD_SEL8_11 \
+ MOD_SEL6_10 MOD_SEL7_10 MOD_SEL8_10 \
+MOD_SEL4_9 MOD_SEL8_9 \
+MOD_SEL4_8 MOD_SEL5_8 MOD_SEL8_8 \
+ MOD_SEL6_7 MOD_SEL7_7 MOD_SEL8_7 \
+ MOD_SEL5_6 MOD_SEL6_6 MOD_SEL7_6 MOD_SEL8_6 \
+MOD_SEL4_5 MOD_SEL5_5 MOD_SEL6_5 MOD_SEL8_5 \
+ MOD_SEL8_4 \
+ MOD_SEL7_3 MOD_SEL8_3 \
+MOD_SEL4_2 MOD_SEL5_2 MOD_SEL6_2 MOD_SEL7_2 MOD_SEL8_2 \
+MOD_SEL4_1 MOD_SEL6_1 MOD_SEL8_1 \
+ MOD_SEL5_0 MOD_SEL7_0 MOD_SEL8_0
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x) FN_##x,
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x) x##_MARK,
+ PINMUX_MARK_BEGIN,
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(),
+
+ PINMUX_SINGLE(AVS1),
+ PINMUX_SINGLE(AVS0),
+ PINMUX_SINGLE(PCIE1_CLKREQ_N),
+ PINMUX_SINGLE(PCIE0_CLKREQ_N),
+
+ /* TSN0 without MODSEL4 */
+ PINMUX_SINGLE(TSN0_TXCREFCLK),
+ PINMUX_SINGLE(TSN0_RD2),
+ PINMUX_SINGLE(TSN0_RD3),
+ PINMUX_SINGLE(TSN0_RD1),
+ PINMUX_SINGLE(TSN0_RXC),
+ PINMUX_SINGLE(TSN0_RD0),
+ PINMUX_SINGLE(TSN0_RX_CTL),
+ PINMUX_SINGLE(TSN0_AVTP_CAPTURE),
+ PINMUX_SINGLE(TSN0_LINK),
+ PINMUX_SINGLE(TSN0_PHY_INT),
+ PINMUX_SINGLE(TSN0_MDIO),
+ /* TSN0 with MODSEL4 */
+ PINMUX_IPSR_NOGM(0, TSN0_TD2, SEL_TSN0_TD2_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD3, SEL_TSN0_TD3_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD0, SEL_TSN0_TD0_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD1, SEL_TSN0_TD1_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TXC, SEL_TSN0_TXC_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TX_CTL, SEL_TSN0_TX_CTL_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS0, SEL_TSN0_AVTP_PPS0_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_MATCH, SEL_TSN0_AVTP_MATCH_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS1, SEL_TSN0_AVTP_PPS1_1),
+ PINMUX_IPSR_NOGM(0, TSN0_MDC, SEL_TSN0_MDC_1),
+
+ /* TSN0 without MODSEL5 */
+ PINMUX_SINGLE(AVB2_RX_CTL),
+ PINMUX_SINGLE(AVB2_RXC),
+ PINMUX_SINGLE(AVB2_RD0),
+ PINMUX_SINGLE(AVB2_RD1),
+ PINMUX_SINGLE(AVB2_RD2),
+ PINMUX_SINGLE(AVB2_MDIO),
+ PINMUX_SINGLE(AVB2_RD3),
+ PINMUX_SINGLE(AVB2_TXCREFCLK),
+ PINMUX_SINGLE(AVB2_PHY_INT),
+ PINMUX_SINGLE(AVB2_LINK),
+ PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
+ /* TSN0 with MODSEL5 */
+ PINMUX_IPSR_NOGM(0, AVB2_TX_CTL, SEL_AVB2_TX_CTL_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TXC, SEL_AVB2_TXC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD0, SEL_AVB2_TD0_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD1, SEL_AVB2_TD1_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD2, SEL_AVB2_TD2_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD3, SEL_AVB2_TD3_1),
+ PINMUX_IPSR_NOGM(0, AVB2_MDC, SEL_AVB2_MDC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_MAGIC, SEL_AVB2_MAGIC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_AVTP_MATCH, SEL_AVB2_AVTP_MATCH_1),
+ PINMUX_IPSR_NOGM(0, AVB2_AVTP_PPS, SEL_AVB2_AVTP_PPS_1),
+
+ /* IP0SR0 */
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_B),
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_A),
+
+ PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1),
+
+ PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2),
+
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3),
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK),
+
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2),
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1),
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC),
+
+ PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2),
+
+ /* IP1SR0 */
+ PINMUX_IPSR_GPSR(IP1SR0_3_0, MSIOF5_SS1),
+
+ PINMUX_IPSR_GPSR(IP1SR0_7_4, MSIOF5_SYNC),
+
+ PINMUX_IPSR_GPSR(IP1SR0_11_8, MSIOF5_TXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_15_12, MSIOF5_SCK),
+
+ PINMUX_IPSR_GPSR(IP1SR0_19_16, MSIOF5_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, MSIOF2_SS2),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_A),
+
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, MSIOF2_SS1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1),
+
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, MSIOF2_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1),
+
+ /* IP2SR0 */
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, MSIOF2_TXD),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N),
+
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, MSIOF2_SCK),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N),
+
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, MSIOF2_RXD),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1),
+
+ /* IP0SR1 */
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, MSIOF1_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, MSIOF1_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_A),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_A),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N),
+
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, MSIOF1_SCK),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N),
+
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, MSIOF1_TXD),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_23_20, MSIOF1_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_X),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_X),
+
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_X),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_X),
+
+ /* IP1SR1 */
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CANFD5_TX_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, CANFD5_RX_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_X),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_X),
+
+ PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, HTX0),
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, TX0),
+
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, CTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8_A),
+
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, RTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9_A),
+
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, HSCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, SCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0_A),
+
+ /* IP2SR1 */
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, HRX0),
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, RX0),
+
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, SCIF_CLK),
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, IRQ4_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, SSI_SCK),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3),
+
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, SSI_WS),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4),
+
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, SSI_SD),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, AUDIO_CLKIN),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, PWM3_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF4_SS1),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, IRQ3_B),
+
+ /* IP3SR1 */
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, SCK3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, MSIOF4_SS2),
+
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, CTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, MSIOF4_SCK),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_A),
+
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, RTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, MSIOF4_TXD),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_A),
+
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, RX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, MSIOF4_RXD),
+
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, TX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, MSIOF4_SYNC),
+
+ /* IP0SR2 */
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, FXR_TXDA),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, CANFD1_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, CANFD1_RX),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, RXDA_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, IRQ5),
+
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CLK_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, IRQ4_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_19_16, RXDB_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N),
+
+ PINMUX_IPSR_GPSR(IP0SR2_27_24, FXR_TXDB),
+
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, CANFD6_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_B),
+
+ /* IP1SR2 */
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, CANFD6_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, CAN_CLK),
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_X),
+
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, CANFD0_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_X),
+
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, CANFD0_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, STPWT_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, CANFD2_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, CANFD2_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, PWM1_B),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, CANFD3_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, CANFD3_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, PWM3_B),
+
+ /* IP2SR2 */
+ PINMUX_IPSR_GPSR(IP2SR2_3_0, CANFD4_TX),
+ PINMUX_IPSR_GPSR(IP2SR2_3_0, PWM4),
+
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, CANFD4_RX),
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, PWM5),
+
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, CANFD7_TX),
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, PWM6),
+
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, CANFD7_RX),
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, PWM7),
+
+ /* IP0SR3 */
+ PINMUX_IPSR_GPSR(IP0SR3_3_0, MMC_SD_D1),
+ PINMUX_IPSR_GPSR(IP0SR3_7_4, MMC_SD_D0),
+ PINMUX_IPSR_GPSR(IP0SR3_11_8, MMC_SD_D2),
+ PINMUX_IPSR_GPSR(IP0SR3_15_12, MMC_SD_CLK),
+ PINMUX_IPSR_GPSR(IP0SR3_19_16, MMC_DS),
+ PINMUX_IPSR_GPSR(IP0SR3_23_20, MMC_SD_D3),
+ PINMUX_IPSR_GPSR(IP0SR3_27_24, MMC_D5),
+ PINMUX_IPSR_GPSR(IP0SR3_31_28, MMC_D4),
+
+ /* IP1SR3 */
+ PINMUX_IPSR_GPSR(IP1SR3_3_0, MMC_D7),
+
+ PINMUX_IPSR_GPSR(IP1SR3_7_4, MMC_D6),
+
+ PINMUX_IPSR_GPSR(IP1SR3_11_8, MMC_SD_CMD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_15_12, SD_CD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_19_16, SD_WP),
+
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKIN),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKEN_IN),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, PWM1_A),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_X),
+
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKOUT),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKEN_OUT),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, ERROROUTC_A),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_X),
+
+ PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL),
+
+ /* IP2SR3 */
+ PINMUX_IPSR_GPSR(IP2SR3_3_0, QSPI0_IO3),
+ PINMUX_IPSR_GPSR(IP2SR3_7_4, QSPI0_IO2),
+ PINMUX_IPSR_GPSR(IP2SR3_11_8, QSPI0_MISO_IO1),
+ PINMUX_IPSR_GPSR(IP2SR3_15_12, QSPI0_MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP2SR3_19_16, QSPI0_SPCLK),
+ PINMUX_IPSR_GPSR(IP2SR3_23_20, QSPI1_MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP2SR3_27_24, QSPI1_SPCLK),
+ PINMUX_IPSR_GPSR(IP2SR3_31_28, QSPI1_MISO_IO1),
+
+ /* IP3SR3 */
+ PINMUX_IPSR_GPSR(IP3SR3_3_0, QSPI1_IO2),
+ PINMUX_IPSR_GPSR(IP3SR3_7_4, QSPI1_SSL),
+ PINMUX_IPSR_GPSR(IP3SR3_11_8, QSPI1_IO3),
+ PINMUX_IPSR_GPSR(IP3SR3_15_12, RPC_RESET_N),
+ PINMUX_IPSR_GPSR(IP3SR3_19_16, RPC_WP_N),
+ PINMUX_IPSR_GPSR(IP3SR3_23_20, RPC_INT_N),
+
+ /* IP0SR6 */
+ PINMUX_IPSR_GPSR(IP0SR6_3_0, AVB1_MDIO),
+
+ PINMUX_IPSR_MSEL(IP0SR6_7_4, AVB1_MAGIC, SEL_AVB1_MAGIC_1),
+
+ PINMUX_IPSR_MSEL(IP0SR6_11_8, AVB1_MDC, SEL_AVB1_MDC_1),
+
+ PINMUX_IPSR_GPSR(IP0SR6_15_12, AVB1_PHY_INT),
+
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_LINK),
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_MII_TX_ER),
+
+ PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_AVTP_MATCH, SEL_AVB1_AVTP_MATCH_1),
+ PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_MII_RX_ER, SEL_AVB1_AVTP_MATCH_0),
+
+ PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_TXC, SEL_AVB1_TXC_1),
+ PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_MII_TXC, SEL_AVB1_TXC_0),
+
+ PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_TX_CTL, SEL_AVB1_TX_CTL_1),
+ PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_MII_TX_EN, SEL_AVB1_TX_CTL_0),
+
+ /* IP1SR6 */
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_RXC),
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_RX_CTL),
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_MII_RX_DV),
+
+ PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_AVTP_PPS, SEL_AVB1_AVTP_PPS_1),
+ PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_MII_COL, SEL_AVB1_AVTP_PPS_0),
+
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_MII_CRS),
+
+ PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_TD1, SEL_AVB1_TD1_1),
+ PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_MII_TD1, SEL_AVB1_TD1_0),
+
+ PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_TD0, SEL_AVB1_TD0_1),
+ PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_MII_TD0, SEL_AVB1_TD0_0),
+
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_RD1),
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_RD0),
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_MII_RD0),
+
+ /* IP2SR6 */
+ PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_TD2, SEL_AVB1_TD2_1),
+ PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_MII_TD2, SEL_AVB1_TD2_0),
+
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_RD2),
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_MII_RD2),
+
+ PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_TD3, SEL_AVB1_TD3_1),
+ PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_MII_TD3, SEL_AVB1_TD3_0),
+
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_RD3),
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP2SR6_19_16, AVB1_TXCREFCLK),
+
+ /* IP0SR7 */
+ PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_AVTP_PPS, SEL_AVB0_AVTP_PPS_1),
+ PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_MII_COL, SEL_AVB0_AVTP_PPS_0),
+
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_MII_CRS),
+
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_AVTP_MATCH, SEL_AVB0_AVTP_MATCH_1),
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_MII_RX_ER, SEL_AVB0_AVTP_MATCH_0),
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, CC5_OSCOUT, SEL_AVB0_AVTP_MATCH_0),
+
+ PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_TD3, SEL_AVB0_TD3_1),
+ PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_MII_TD3, SEL_AVB0_TD3_0),
+
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_LINK),
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_MII_TX_ER),
+
+ PINMUX_IPSR_GPSR(IP0SR7_23_20, AVB0_PHY_INT),
+
+ PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_TD2, SEL_AVB0_TD2_1),
+ PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_MII_TD2, SEL_AVB0_TD2_0),
+
+ PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_TD1, SEL_AVB0_TD1_1),
+ PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_MII_TD1, SEL_AVB0_TD1_0),
+
+ /* IP1SR7 */
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_RD3),
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP1SR7_7_4, AVB0_TXCREFCLK),
+
+ PINMUX_IPSR_MSEL(IP1SR7_11_8, AVB0_MAGIC, SEL_AVB0_MAGIC_1),
+
+ PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_TD0, SEL_AVB0_TD0_1),
+ PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_MII_TD0, SEL_AVB0_TD0_0),
+
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_RD2),
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_MII_RD2),
+
+ PINMUX_IPSR_MSEL(IP1SR7_23_20, AVB0_MDC, SEL_AVB0_MDC_1),
+
+ PINMUX_IPSR_GPSR(IP1SR7_27_24, AVB0_MDIO),
+
+ PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_TXC, SEL_AVB0_TXC_1),
+ PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_MII_TXC, SEL_AVB0_TXC_0),
+
+ /* IP2SR7 */
+ PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_TX_CTL, SEL_AVB0_TX_CTL_1),
+ PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_MII_TX_EN, SEL_AVB0_TX_CTL_0),
+
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_RD1),
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_RD0),
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_MII_RD0),
+
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_RXC),
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_RX_CTL),
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_MII_RX_DV),
+
+ /* IP0SR8 */
+ PINMUX_IPSR_MSEL(IP0SR8_3_0, SCL0, SEL_SCL0_0),
+ PINMUX_IPSR_MSEL(IP0SR8_7_4, SDA0, SEL_SDA0_0),
+ PINMUX_IPSR_MSEL(IP0SR8_11_8, SCL1, SEL_SCL1_0),
+ PINMUX_IPSR_MSEL(IP0SR8_15_12, SDA1, SEL_SDA1_0),
+ PINMUX_IPSR_MSEL(IP0SR8_19_16, SCL2, SEL_SCL2_0),
+ PINMUX_IPSR_MSEL(IP0SR8_23_20, SDA2, SEL_SDA2_0),
+ PINMUX_IPSR_MSEL(IP0SR8_27_24, SCL3, SEL_SCL3_0),
+ PINMUX_IPSR_MSEL(IP0SR8_31_28, SDA3, SEL_SDA3_0),
+
+ /* IP1SR8 */
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, SCL4, SEL_SCL4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, HRX2, SEL_SCL4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, SCK4, SEL_SCL4_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, SDA4, SEL_SDA4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, HTX2, SEL_SDA4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, CTS4_N, SEL_SDA4_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, SCL5, SEL_SCL5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, HRTS2_N, SEL_SCL5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, RTS4_N, SEL_SCL5_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_15_12, SDA5, SEL_SDA5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_15_12, SCIF_CLK2, SEL_SDA5_0),
+
+ PINMUX_IPSR_GPSR(IP1SR8_19_16, HCTS2_N),
+ PINMUX_IPSR_GPSR(IP1SR8_19_16, TX4),
+
+ PINMUX_IPSR_GPSR(IP1SR8_23_20, HSCK2),
+ PINMUX_IPSR_GPSR(IP1SR8_23_20, RX4),
+};
+
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+ GP_ASSIGN_LAST(),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+};
+
+/* - AVB0 ------------------------------------------------ */
+static const unsigned int avb0_link_pins[] = {
+ /* AVB0_LINK */
+ RCAR_GP_PIN(7, 4),
+};
+static const unsigned int avb0_link_mux[] = {
+ AVB0_LINK_MARK,
+};
+static const unsigned int avb0_magic_pins[] = {
+ /* AVB0_MAGIC */
+ RCAR_GP_PIN(7, 10),
+};
+static const unsigned int avb0_magic_mux[] = {
+ AVB0_MAGIC_MARK,
+};
+static const unsigned int avb0_phy_int_pins[] = {
+ /* AVB0_PHY_INT */
+ RCAR_GP_PIN(7, 5),
+};
+static const unsigned int avb0_phy_int_mux[] = {
+ AVB0_PHY_INT_MARK,
+};
+static const unsigned int avb0_mdio_pins[] = {
+ /* AVB0_MDC, AVB0_MDIO */
+ RCAR_GP_PIN(7, 13), RCAR_GP_PIN(7, 14),
+};
+static const unsigned int avb0_mdio_mux[] = {
+ AVB0_MDC_MARK, AVB0_MDIO_MARK,
+};
+static const unsigned int avb0_rgmii_pins[] = {
+ /*
+ * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0, AVB0_TD1, AVB0_TD2, AVB0_TD3,
+ * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0, AVB0_RD1, AVB0_RD2, AVB0_RD3,
+ */
+ RCAR_GP_PIN(7, 16), RCAR_GP_PIN(7, 15),
+ RCAR_GP_PIN(7, 11), RCAR_GP_PIN(7, 7),
+ RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 3),
+ RCAR_GP_PIN(7, 20), RCAR_GP_PIN(7, 19),
+ RCAR_GP_PIN(7, 18), RCAR_GP_PIN(7, 17),
+ RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 8),
+};
+static const unsigned int avb0_rgmii_mux[] = {
+ AVB0_TX_CTL_MARK, AVB0_TXC_MARK,
+ AVB0_TD0_MARK, AVB0_TD1_MARK,
+ AVB0_TD2_MARK, AVB0_TD3_MARK,
+ AVB0_RX_CTL_MARK, AVB0_RXC_MARK,
+ AVB0_RD0_MARK, AVB0_RD1_MARK,
+ AVB0_RD2_MARK, AVB0_RD3_MARK,
+};
+static const unsigned int avb0_txcrefclk_pins[] = {
+ /* AVB0_TXCREFCLK */
+ RCAR_GP_PIN(7, 9),
+};
+static const unsigned int avb0_txcrefclk_mux[] = {
+ AVB0_TXCREFCLK_MARK,
+};
+static const unsigned int avb0_avtp_pps_pins[] = {
+ /* AVB0_AVTP_PPS */
+ RCAR_GP_PIN(7, 0),
+};
+static const unsigned int avb0_avtp_pps_mux[] = {
+ AVB0_AVTP_PPS_MARK,
+};
+static const unsigned int avb0_avtp_capture_pins[] = {
+ /* AVB0_AVTP_CAPTURE */
+ RCAR_GP_PIN(7, 1),
+};
+static const unsigned int avb0_avtp_capture_mux[] = {
+ AVB0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb0_avtp_match_pins[] = {
+ /* AVB0_AVTP_MATCH */
+ RCAR_GP_PIN(7, 2),
+};
+static const unsigned int avb0_avtp_match_mux[] = {
+ AVB0_AVTP_MATCH_MARK,
+};
+
+/* - AVB1 ------------------------------------------------ */
+static const unsigned int avb1_link_pins[] = {
+ /* AVB1_LINK */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int avb1_link_mux[] = {
+ AVB1_LINK_MARK,
+};
+static const unsigned int avb1_magic_pins[] = {
+ /* AVB1_MAGIC */
+ RCAR_GP_PIN(6, 1),
+};
+static const unsigned int avb1_magic_mux[] = {
+ AVB1_MAGIC_MARK,
+};
+static const unsigned int avb1_phy_int_pins[] = {
+ /* AVB1_PHY_INT */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int avb1_phy_int_mux[] = {
+ AVB1_PHY_INT_MARK,
+};
+static const unsigned int avb1_mdio_pins[] = {
+ /* AVB1_MDC, AVB1_MDIO */
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 0),
+};
+static const unsigned int avb1_mdio_mux[] = {
+ AVB1_MDC_MARK, AVB1_MDIO_MARK,
+};
+static const unsigned int avb1_rgmii_pins[] = {
+ /*
+ * AVB1_TX_CTL, AVB1_TXC, AVB1_TD0, AVB1_TD1, AVB1_TD2, AVB1_TD3,
+ * AVB1_RX_CTL, AVB1_RXC, AVB1_RD0, AVB1_RD1, AVB1_RD2, AVB1_RD3,
+ */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+ RCAR_GP_PIN(6, 13), RCAR_GP_PIN(6, 12),
+ RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 18),
+ RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 8),
+ RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 14),
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int avb1_rgmii_mux[] = {
+ AVB1_TX_CTL_MARK, AVB1_TXC_MARK,
+ AVB1_TD0_MARK, AVB1_TD1_MARK,
+ AVB1_TD2_MARK, AVB1_TD3_MARK,
+ AVB1_RX_CTL_MARK, AVB1_RXC_MARK,
+ AVB1_RD0_MARK, AVB1_RD1_MARK,
+ AVB1_RD2_MARK, AVB1_RD3_MARK,
+};
+static const unsigned int avb1_txcrefclk_pins[] = {
+ /* AVB1_TXCREFCLK */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int avb1_txcrefclk_mux[] = {
+ AVB1_TXCREFCLK_MARK,
+};
+static const unsigned int avb1_avtp_pps_pins[] = {
+ /* AVB1_AVTP_PPS */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int avb1_avtp_pps_mux[] = {
+ AVB1_AVTP_PPS_MARK,
+};
+static const unsigned int avb1_avtp_capture_pins[] = {
+ /* AVB1_AVTP_CAPTURE */
+ RCAR_GP_PIN(6, 11),
+};
+static const unsigned int avb1_avtp_capture_mux[] = {
+ AVB1_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb1_avtp_match_pins[] = {
+ /* AVB1_AVTP_MATCH */
+ RCAR_GP_PIN(6, 5),
+};
+static const unsigned int avb1_avtp_match_mux[] = {
+ AVB1_AVTP_MATCH_MARK,
+};
+
+/* - AVB2 ------------------------------------------------ */
+static const unsigned int avb2_link_pins[] = {
+ /* AVB2_LINK */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int avb2_link_mux[] = {
+ AVB2_LINK_MARK,
+};
+static const unsigned int avb2_magic_pins[] = {
+ /* AVB2_MAGIC */
+ RCAR_GP_PIN(5, 5),
+};
+static const unsigned int avb2_magic_mux[] = {
+ AVB2_MAGIC_MARK,
+};
+static const unsigned int avb2_phy_int_pins[] = {
+ /* AVB2_PHY_INT */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int avb2_phy_int_mux[] = {
+ AVB2_PHY_INT_MARK,
+};
+static const unsigned int avb2_mdio_pins[] = {
+ /* AVB2_MDC, AVB2_MDIO */
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int avb2_mdio_mux[] = {
+ AVB2_MDC_MARK, AVB2_MDIO_MARK,
+};
+static const unsigned int avb2_rgmii_pins[] = {
+ /*
+ * AVB2_TX_CTL, AVB2_TXC, AVB2_TD0, AVB2_TD1, AVB2_TD2, AVB2_TD3,
+ * AVB2_RX_CTL, AVB2_RXC, AVB2_RD0, AVB2_RD1, AVB2_RD2, AVB2_RD3,
+ */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 16),
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 12),
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 8),
+ RCAR_GP_PIN(5, 20), RCAR_GP_PIN(5, 18),
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 14),
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 9),
+};
+static const unsigned int avb2_rgmii_mux[] = {
+ AVB2_TX_CTL_MARK, AVB2_TXC_MARK,
+ AVB2_TD0_MARK, AVB2_TD1_MARK,
+ AVB2_TD2_MARK, AVB2_TD3_MARK,
+ AVB2_RX_CTL_MARK, AVB2_RXC_MARK,
+ AVB2_RD0_MARK, AVB2_RD1_MARK,
+ AVB2_RD2_MARK, AVB2_RD3_MARK,
+};
+static const unsigned int avb2_txcrefclk_pins[] = {
+ /* AVB2_TXCREFCLK */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int avb2_txcrefclk_mux[] = {
+ AVB2_TXCREFCLK_MARK,
+};
+static const unsigned int avb2_avtp_pps_pins[] = {
+ /* AVB2_AVTP_PPS */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int avb2_avtp_pps_mux[] = {
+ AVB2_AVTP_PPS_MARK,
+};
+static const unsigned int avb2_avtp_capture_pins[] = {
+ /* AVB2_AVTP_CAPTURE */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int avb2_avtp_capture_mux[] = {
+ AVB2_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb2_avtp_match_pins[] = {
+ /* AVB2_AVTP_MATCH */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int avb2_avtp_match_mux[] = {
+ AVB2_AVTP_MATCH_MARK,
+};
+
+/* - CANFD0 ----------------------------------------------------------------- */
+static const unsigned int canfd0_data_pins[] = {
+ /* CANFD0_TX, CANFD0_RX */
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+};
+static const unsigned int canfd0_data_mux[] = {
+ CANFD0_TX_MARK, CANFD0_RX_MARK,
+};
+
+/* - CANFD1 ----------------------------------------------------------------- */
+static const unsigned int canfd1_data_pins[] = {
+ /* CANFD1_TX, CANFD1_RX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int canfd1_data_mux[] = {
+ CANFD1_TX_MARK, CANFD1_RX_MARK,
+};
+
+/* - CANFD2 ----------------------------------------------------------------- */
+static const unsigned int canfd2_data_pins[] = {
+ /* CANFD2_TX, CANFD2_RX */
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
+};
+static const unsigned int canfd2_data_mux[] = {
+ CANFD2_TX_MARK, CANFD2_RX_MARK,
+};
+
+/* - CANFD3 ----------------------------------------------------------------- */
+static const unsigned int canfd3_data_pins[] = {
+ /* CANFD3_TX, CANFD3_RX */
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
+};
+static const unsigned int canfd3_data_mux[] = {
+ CANFD3_TX_MARK, CANFD3_RX_MARK,
+};
+
+/* - CANFD4 ----------------------------------------------------------------- */
+static const unsigned int canfd4_data_pins[] = {
+ /* CANFD4_TX, CANFD4_RX */
+ RCAR_GP_PIN(2, 16), RCAR_GP_PIN(2, 17),
+};
+static const unsigned int canfd4_data_mux[] = {
+ CANFD4_TX_MARK, CANFD4_RX_MARK,
+};
+
+/* - CANFD5 ----------------------------------------------------------------- */
+static const unsigned int canfd5_data_pins[] = {
+ /* CANFD5_TX, CANFD5_RX */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+};
+static const unsigned int canfd5_data_mux[] = {
+ CANFD5_TX_MARK, CANFD5_RX_MARK,
+};
+
+/* - CANFD5_B ----------------------------------------------------------------- */
+static const unsigned int canfd5_data_b_pins[] = {
+ /* CANFD5_TX_B, CANFD5_RX_B */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int canfd5_data_b_mux[] = {
+ CANFD5_TX_B_MARK, CANFD5_RX_B_MARK,
+};
+
+/* - CANFD6 ----------------------------------------------------------------- */
+static const unsigned int canfd6_data_pins[] = {
+ /* CANFD6_TX, CANFD6_RX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int canfd6_data_mux[] = {
+ CANFD6_TX_MARK, CANFD6_RX_MARK,
+};
+
+/* - CANFD7 ----------------------------------------------------------------- */
+static const unsigned int canfd7_data_pins[] = {
+ /* CANFD7_TX, CANFD7_RX */
+ RCAR_GP_PIN(2, 18), RCAR_GP_PIN(2, 19),
+};
+static const unsigned int canfd7_data_mux[] = {
+ CANFD7_TX_MARK, CANFD7_RX_MARK,
+};
+
+/* - CANFD Clock ------------------------------------------------------------ */
+static const unsigned int can_clk_pins[] = {
+ /* CAN_CLK */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* HRX0, HTX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* HSCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* HRTS0_N, HCTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_pins[] = {
+ /* HRX1, HTX1 */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int hscif1_data_mux[] = {
+ HRX1_MARK, HTX1_MARK,
+};
+static const unsigned int hscif1_clk_pins[] = {
+ /* HSCK1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int hscif1_clk_mux[] = {
+ HSCK1_MARK,
+};
+static const unsigned int hscif1_ctrl_pins[] = {
+ /* HRTS1_N, HCTS1_N */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int hscif1_ctrl_mux[] = {
+ HRTS1_N_MARK, HCTS1_N_MARK,
+};
+
+/* - HSCIF1_X---------------------------------------------------------------- */
+static const unsigned int hscif1_data_x_pins[] = {
+ /* HRX1_X, HTX1_X */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int hscif1_data_x_mux[] = {
+ HRX1_X_MARK, HTX1_X_MARK,
+};
+static const unsigned int hscif1_clk_x_pins[] = {
+ /* HSCK1_X */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int hscif1_clk_x_mux[] = {
+ HSCK1_X_MARK,
+};
+static const unsigned int hscif1_ctrl_x_pins[] = {
+ /* HRTS1_N_X, HCTS1_N_X */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int hscif1_ctrl_x_mux[] = {
+ HRTS1_N_X_MARK, HCTS1_N_X_MARK,
+};
+
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_pins[] = {
+ /* HRX2, HTX2 */
+ RCAR_GP_PIN(8, 8), RCAR_GP_PIN(8, 9),
+};
+static const unsigned int hscif2_data_mux[] = {
+ HRX2_MARK, HTX2_MARK,
+};
+static const unsigned int hscif2_clk_pins[] = {
+ /* HSCK2 */
+ RCAR_GP_PIN(8, 13),
+};
+static const unsigned int hscif2_clk_mux[] = {
+ HSCK2_MARK,
+};
+static const unsigned int hscif2_ctrl_pins[] = {
+ /* HRTS2_N, HCTS2_N */
+ RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 12),
+};
+static const unsigned int hscif2_ctrl_mux[] = {
+ HRTS2_N_MARK, HCTS2_N_MARK,
+};
+
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_pins[] = {
+ /* HRX3, HTX3 */
+ RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int hscif3_data_mux[] = {
+ HRX3_MARK, HTX3_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+ /* HSCK3 */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* HRTS3_N, HCTS3_N */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+/* - HSCIF3_A ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* HRX3_A, HTX3_A */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_a_pins[] = {
+ /* HSCK3_A */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int hscif3_clk_a_mux[] = {
+ HSCK3_A_MARK,
+};
+static const unsigned int hscif3_ctrl_a_pins[] = {
+ /* HRTS3_N_A, HCTS3_N_A */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
+};
+static const unsigned int hscif3_ctrl_a_mux[] = {
+ HRTS3_N_A_MARK, HCTS3_N_A_MARK,
+};
+
+/* - I2C0 ------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+ /* SDA0, SCL0 */
+ RCAR_GP_PIN(8, 1), RCAR_GP_PIN(8, 0),
+};
+static const unsigned int i2c0_mux[] = {
+ SDA0_MARK, SCL0_MARK,
+};
+
+/* - I2C1 ------------------------------------------------------------------- */
+static const unsigned int i2c1_pins[] = {
+ /* SDA1, SCL1 */
+ RCAR_GP_PIN(8, 3), RCAR_GP_PIN(8, 2),
+};
+static const unsigned int i2c1_mux[] = {
+ SDA1_MARK, SCL1_MARK,
+};
+
+/* - I2C2 ------------------------------------------------------------------- */
+static const unsigned int i2c2_pins[] = {
+ /* SDA2, SCL2 */
+ RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 4),
+};
+static const unsigned int i2c2_mux[] = {
+ SDA2_MARK, SCL2_MARK,
+};
+
+/* - I2C3 ------------------------------------------------------------------- */
+static const unsigned int i2c3_pins[] = {
+ /* SDA3, SCL3 */
+ RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 6),
+};
+static const unsigned int i2c3_mux[] = {
+ SDA3_MARK, SCL3_MARK,
+};
+
+/* - I2C4 ------------------------------------------------------------------- */
+static const unsigned int i2c4_pins[] = {
+ /* SDA4, SCL4 */
+ RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 8),
+};
+static const unsigned int i2c4_mux[] = {
+ SDA4_MARK, SCL4_MARK,
+};
+
+/* - I2C5 ------------------------------------------------------------------- */
+static const unsigned int i2c5_pins[] = {
+ /* SDA5, SCL5 */
+ RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 10),
+};
+static const unsigned int i2c5_mux[] = {
+ SDA5_MARK, SCL5_MARK,
+};
+
+/* - MMC -------------------------------------------------------------------- */
+static const unsigned int mmc_data_pins[] = {
+ /* MMC_SD_D[0:3], MMC_D[4:7] */
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 0),
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 6),
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 8),
+};
+static const unsigned int mmc_data_mux[] = {
+ MMC_SD_D0_MARK, MMC_SD_D1_MARK,
+ MMC_SD_D2_MARK, MMC_SD_D3_MARK,
+ MMC_D4_MARK, MMC_D5_MARK,
+ MMC_D6_MARK, MMC_D7_MARK,
+};
+static const unsigned int mmc_ctrl_pins[] = {
+ /* MMC_SD_CLK, MMC_SD_CMD */
+ RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 10),
+};
+static const unsigned int mmc_ctrl_mux[] = {
+ MMC_SD_CLK_MARK, MMC_SD_CMD_MARK,
+};
+static const unsigned int mmc_cd_pins[] = {
+ /* SD_CD */
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int mmc_cd_mux[] = {
+ SD_CD_MARK,
+};
+static const unsigned int mmc_wp_pins[] = {
+ /* SD_WP */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int mmc_wp_mux[] = {
+ SD_WP_MARK,
+};
+static const unsigned int mmc_ds_pins[] = {
+ /* MMC_DS */
+ RCAR_GP_PIN(3, 4),
+};
+static const unsigned int mmc_ds_mux[] = {
+ MMC_DS_MARK,
+};
+
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* MSIOF0_SCK */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* MSIOF0_SYNC */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* MSIOF0_SS1 */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* MSIOF0_SS2 */
+ RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+ /* MSIOF0_TXD */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+ /* MSIOF0_RXD */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* MSIOF1_SCK */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+static const unsigned int msiof1_sync_pins[] = {
+ /* MSIOF1_SYNC */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+static const unsigned int msiof1_ss1_pins[] = {
+ /* MSIOF1_SS1 */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+static const unsigned int msiof1_ss2_pins[] = {
+ /* MSIOF1_SS2 */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+static const unsigned int msiof1_txd_pins[] = {
+ /* MSIOF1_TXD */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof1_txd_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+static const unsigned int msiof1_rxd_pins[] = {
+ /* MSIOF1_RXD */
+ RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof1_rxd_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_pins[] = {
+ /* MSIOF2_SCK */
+ RCAR_GP_PIN(0, 17),
+};
+static const unsigned int msiof2_clk_mux[] = {
+ MSIOF2_SCK_MARK,
+};
+static const unsigned int msiof2_sync_pins[] = {
+ /* MSIOF2_SYNC */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof2_sync_mux[] = {
+ MSIOF2_SYNC_MARK,
+};
+static const unsigned int msiof2_ss1_pins[] = {
+ /* MSIOF2_SS1 */
+ RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof2_ss1_mux[] = {
+ MSIOF2_SS1_MARK,
+};
+static const unsigned int msiof2_ss2_pins[] = {
+ /* MSIOF2_SS2 */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_mux[] = {
+ MSIOF2_SS2_MARK,
+};
+static const unsigned int msiof2_txd_pins[] = {
+ /* MSIOF2_TXD */
+ RCAR_GP_PIN(0, 16),
+};
+static const unsigned int msiof2_txd_mux[] = {
+ MSIOF2_TXD_MARK,
+};
+static const unsigned int msiof2_rxd_pins[] = {
+ /* MSIOF2_RXD */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof2_rxd_mux[] = {
+ MSIOF2_RXD_MARK,
+};
+
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_pins[] = {
+ /* MSIOF3_SCK */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_clk_mux[] = {
+ MSIOF3_SCK_MARK,
+};
+static const unsigned int msiof3_sync_pins[] = {
+ /* MSIOF3_SYNC */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof3_sync_mux[] = {
+ MSIOF3_SYNC_MARK,
+};
+static const unsigned int msiof3_ss1_pins[] = {
+ /* MSIOF3_SS1 */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_ss1_mux[] = {
+ MSIOF3_SS1_MARK,
+};
+static const unsigned int msiof3_ss2_pins[] = {
+ /* MSIOF3_SS2 */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_ss2_mux[] = {
+ MSIOF3_SS2_MARK,
+};
+static const unsigned int msiof3_txd_pins[] = {
+ /* MSIOF3_TXD */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof3_txd_mux[] = {
+ MSIOF3_TXD_MARK,
+};
+static const unsigned int msiof3_rxd_pins[] = {
+ /* MSIOF3_RXD */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof3_rxd_mux[] = {
+ MSIOF3_RXD_MARK,
+};
+
+/* - MSIOF4 ----------------------------------------------------------------- */
+static const unsigned int msiof4_clk_pins[] = {
+ /* MSIOF4_SCK */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof4_clk_mux[] = {
+ MSIOF4_SCK_MARK,
+};
+static const unsigned int msiof4_sync_pins[] = {
+ /* MSIOF4_SYNC */
+ RCAR_GP_PIN(1, 28),
+};
+static const unsigned int msiof4_sync_mux[] = {
+ MSIOF4_SYNC_MARK,
+};
+static const unsigned int msiof4_ss1_pins[] = {
+ /* MSIOF4_SS1 */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof4_ss1_mux[] = {
+ MSIOF4_SS1_MARK,
+};
+static const unsigned int msiof4_ss2_pins[] = {
+ /* MSIOF4_SS2 */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof4_ss2_mux[] = {
+ MSIOF4_SS2_MARK,
+};
+static const unsigned int msiof4_txd_pins[] = {
+ /* MSIOF4_TXD */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof4_txd_mux[] = {
+ MSIOF4_TXD_MARK,
+};
+static const unsigned int msiof4_rxd_pins[] = {
+ /* MSIOF4_RXD */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int msiof4_rxd_mux[] = {
+ MSIOF4_RXD_MARK,
+};
+
+/* - MSIOF5 ----------------------------------------------------------------- */
+static const unsigned int msiof5_clk_pins[] = {
+ /* MSIOF5_SCK */
+ RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof5_clk_mux[] = {
+ MSIOF5_SCK_MARK,
+};
+static const unsigned int msiof5_sync_pins[] = {
+ /* MSIOF5_SYNC */
+ RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof5_sync_mux[] = {
+ MSIOF5_SYNC_MARK,
+};
+static const unsigned int msiof5_ss1_pins[] = {
+ /* MSIOF5_SS1 */
+ RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof5_ss1_mux[] = {
+ MSIOF5_SS1_MARK,
+};
+static const unsigned int msiof5_ss2_pins[] = {
+ /* MSIOF5_SS2 */
+ RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof5_ss2_mux[] = {
+ MSIOF5_SS2_MARK,
+};
+static const unsigned int msiof5_txd_pins[] = {
+ /* MSIOF5_TXD */
+ RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof5_txd_mux[] = {
+ MSIOF5_TXD_MARK,
+};
+static const unsigned int msiof5_rxd_pins[] = {
+ /* MSIOF5_RXD */
+ RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof5_rxd_mux[] = {
+ MSIOF5_RXD_MARK,
+};
+
+/* - PCIE ------------------------------------------------------------------- */
+static const unsigned int pcie0_clkreq_n_pins[] = {
+ /* PCIE0_CLKREQ_N */
+ RCAR_GP_PIN(4, 21),
+};
+
+static const unsigned int pcie0_clkreq_n_mux[] = {
+ PCIE0_CLKREQ_N_MARK,
+};
+
+static const unsigned int pcie1_clkreq_n_pins[] = {
+ /* PCIE1_CLKREQ_N */
+ RCAR_GP_PIN(4, 22),
+};
+
+static const unsigned int pcie1_clkreq_n_mux[] = {
+ PCIE1_CLKREQ_N_MARK,
+};
+
+/* - PWM0_A ------------------------------------------------------------------- */
+static const unsigned int pwm0_a_pins[] = {
+ /* PWM0_A */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int pwm0_a_mux[] = {
+ PWM0_A_MARK,
+};
+
+/* - PWM1_A ------------------------------------------------------------------- */
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM1_A */
+ RCAR_GP_PIN(3, 13),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+
+/* - PWM1_B ------------------------------------------------------------------- */
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM1_B */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+
+/* - PWM2_B ------------------------------------------------------------------- */
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM2_B */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+
+/* - PWM3_A ------------------------------------------------------------------- */
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM3_A */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+
+/* - PWM3_B ------------------------------------------------------------------- */
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM3_B */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+
+/* - PWM4 ------------------------------------------------------------------- */
+static const unsigned int pwm4_pins[] = {
+ /* PWM4 */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int pwm4_mux[] = {
+ PWM4_MARK,
+};
+
+/* - PWM5 ------------------------------------------------------------------- */
+static const unsigned int pwm5_pins[] = {
+ /* PWM5 */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int pwm5_mux[] = {
+ PWM5_MARK,
+};
+
+/* - PWM6 ------------------------------------------------------------------- */
+static const unsigned int pwm6_pins[] = {
+ /* PWM6 */
+ RCAR_GP_PIN(2, 18),
+};
+static const unsigned int pwm6_mux[] = {
+ PWM6_MARK,
+};
+
+/* - PWM7 ------------------------------------------------------------------- */
+static const unsigned int pwm7_pins[] = {
+ /* PWM7 */
+ RCAR_GP_PIN(2, 19),
+};
+static const unsigned int pwm7_mux[] = {
+ PWM7_MARK,
+};
+
+/* - PWM8_A ------------------------------------------------------------------- */
+static const unsigned int pwm8_a_pins[] = {
+ /* PWM8_A */
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int pwm8_a_mux[] = {
+ PWM8_A_MARK,
+};
+
+/* - PWM9_A ------------------------------------------------------------------- */
+static const unsigned int pwm9_a_pins[] = {
+ /* PWM9_A */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int pwm9_a_mux[] = {
+ PWM9_A_MARK,
+};
+
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 15),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 18),
+ RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 16),
+};
+static const unsigned int qspi0_data_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK
+};
+
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 25),
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 23),
+ RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 26),
+};
+static const unsigned int qspi1_data_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX0, TX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS0_N, CTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_N_MARK, CTS0_N_MARK,
+};
+
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_pins[] = {
+ /* RX1, TX1 */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int scif1_data_mux[] = {
+ RX1_MARK, TX1_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS1_N, CTS1_N */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_N_MARK, CTS1_N_MARK,
+};
+
+/* - SCIF1_X ------------------------------------------------------------------ */
+static const unsigned int scif1_data_x_pins[] = {
+ /* RX1_X, TX1_X */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int scif1_data_x_mux[] = {
+ RX1_X_MARK, TX1_X_MARK,
+};
+static const unsigned int scif1_clk_x_pins[] = {
+ /* SCK1_X */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int scif1_clk_x_mux[] = {
+ SCK1_X_MARK,
+};
+static const unsigned int scif1_ctrl_x_pins[] = {
+ /* RTS1_N_X, CTS1_N_X */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int scif1_ctrl_x_mux[] = {
+ RTS1_N_X_MARK, CTS1_N_X_MARK,
+};
+
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_pins[] = {
+ /* RX3, TX3 */
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int scif3_data_mux[] = {
+ RX3_MARK, TX3_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK3 */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCK3_MARK,
+};
+static const unsigned int scif3_ctrl_pins[] = {
+ /* RTS3_N, CTS3_N */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scif3_ctrl_mux[] = {
+ RTS3_N_MARK, CTS3_N_MARK,
+};
+
+/* - SCIF3_A ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX3_A, TX3_A */
+ RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_a_pins[] = {
+ /* SCK3_A */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int scif3_clk_a_mux[] = {
+ SCK3_A_MARK,
+};
+static const unsigned int scif3_ctrl_a_pins[] = {
+ /* RTS3_N_A, CTS3_N_A */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int scif3_ctrl_a_mux[] = {
+ RTS3_N_A_MARK, CTS3_N_A_MARK,
+};
+
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_pins[] = {
+ /* RX4, TX4 */
+ RCAR_GP_PIN(8, 13), RCAR_GP_PIN(8, 12),
+};
+static const unsigned int scif4_data_mux[] = {
+ RX4_MARK, TX4_MARK,
+};
+static const unsigned int scif4_clk_pins[] = {
+ /* SCK4 */
+ RCAR_GP_PIN(8, 8),
+};
+static const unsigned int scif4_clk_mux[] = {
+ SCK4_MARK,
+};
+static const unsigned int scif4_ctrl_pins[] = {
+ /* RTS4_N, CTS4_N */
+ RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 9),
+};
+static const unsigned int scif4_ctrl_mux[] = {
+ RTS4_N_MARK, CTS4_N_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int scif_clk_mux[] = {
+ SCIF_CLK_MARK,
+};
+
+/* - TPU ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_pins[] = {
+ /* TPU0TO0 */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int tpu_to0_mux[] = {
+ TPU0TO0_MARK,
+};
+static const unsigned int tpu_to1_pins[] = {
+ /* TPU0TO1 */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int tpu_to1_mux[] = {
+ TPU0TO1_MARK,
+};
+static const unsigned int tpu_to2_pins[] = {
+ /* TPU0TO2 */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int tpu_to2_mux[] = {
+ TPU0TO2_MARK,
+};
+static const unsigned int tpu_to3_pins[] = {
+ /* TPU0TO3 */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int tpu_to3_mux[] = {
+ TPU0TO3_MARK,
+};
+
+/* - TPU_A ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_a_pins[] = {
+ /* TPU0TO0_A */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int tpu_to0_a_mux[] = {
+ TPU0TO0_A_MARK,
+};
+static const unsigned int tpu_to1_a_pins[] = {
+ /* TPU0TO1_A */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int tpu_to1_a_mux[] = {
+ TPU0TO1_A_MARK,
+};
+static const unsigned int tpu_to2_a_pins[] = {
+ /* TPU0TO2_A */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int tpu_to2_a_mux[] = {
+ TPU0TO2_A_MARK,
+};
+static const unsigned int tpu_to3_a_pins[] = {
+ /* TPU0TO3_A */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int tpu_to3_a_mux[] = {
+ TPU0TO3_A_MARK,
+};
+
+/* - TSN0 ------------------------------------------------ */
+static const unsigned int tsn0_link_pins[] = {
+ /* TSN0_LINK */
+ RCAR_GP_PIN(4, 4),
+};
+static const unsigned int tsn0_link_mux[] = {
+ TSN0_LINK_MARK,
+};
+static const unsigned int tsn0_phy_int_pins[] = {
+ /* TSN0_PHY_INT */
+ RCAR_GP_PIN(4, 3),
+};
+static const unsigned int tsn0_phy_int_mux[] = {
+ TSN0_PHY_INT_MARK,
+};
+static const unsigned int tsn0_mdio_pins[] = {
+ /* TSN0_MDC, TSN0_MDIO */
+ RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 0),
+};
+static const unsigned int tsn0_mdio_mux[] = {
+ TSN0_MDC_MARK, TSN0_MDIO_MARK,
+};
+static const unsigned int tsn0_rgmii_pins[] = {
+ /*
+ * TSN0_TX_CTL, TSN0_TXC, TSN0_TD0, TSN0_TD1, TSN0_TD2, TSN0_TD3,
+ * TSN0_RX_CTL, TSN0_RXC, TSN0_RD0, TSN0_RD1, TSN0_RD2, TSN0_RD3,
+ */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 12),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 18),
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 11),
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 13),
+ RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int tsn0_rgmii_mux[] = {
+ TSN0_TX_CTL_MARK, TSN0_TXC_MARK,
+ TSN0_TD0_MARK, TSN0_TD1_MARK,
+ TSN0_TD2_MARK, TSN0_TD3_MARK,
+ TSN0_RX_CTL_MARK, TSN0_RXC_MARK,
+ TSN0_RD0_MARK, TSN0_RD1_MARK,
+ TSN0_RD2_MARK, TSN0_RD3_MARK,
+};
+static const unsigned int tsn0_txcrefclk_pins[] = {
+ /* TSN0_TXCREFCLK */
+ RCAR_GP_PIN(4, 20),
+};
+static const unsigned int tsn0_txcrefclk_mux[] = {
+ TSN0_TXCREFCLK_MARK,
+};
+static const unsigned int tsn0_avtp_pps_pins[] = {
+ /* TSN0_AVTP_PPS0, TSN0_AVTP_PPS1 */
+ RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 2),
+};
+static const unsigned int tsn0_avtp_pps_mux[] = {
+ TSN0_AVTP_PPS0_MARK, TSN0_AVTP_PPS1_MARK,
+};
+static const unsigned int tsn0_avtp_capture_pins[] = {
+ /* TSN0_AVTP_CAPTURE */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int tsn0_avtp_capture_mux[] = {
+ TSN0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int tsn0_avtp_match_pins[] = {
+ /* TSN0_AVTP_MATCH */
+ RCAR_GP_PIN(4, 5),
+};
+static const unsigned int tsn0_avtp_match_mux[] = {
+ TSN0_AVTP_MATCH_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb0_link),
+ SH_PFC_PIN_GROUP(avb0_magic),
+ SH_PFC_PIN_GROUP(avb0_phy_int),
+ SH_PFC_PIN_GROUP(avb0_mdio),
+ SH_PFC_PIN_GROUP(avb0_rgmii),
+ SH_PFC_PIN_GROUP(avb0_txcrefclk),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture),
+ SH_PFC_PIN_GROUP(avb0_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb1_link),
+ SH_PFC_PIN_GROUP(avb1_magic),
+ SH_PFC_PIN_GROUP(avb1_phy_int),
+ SH_PFC_PIN_GROUP(avb1_mdio),
+ SH_PFC_PIN_GROUP(avb1_rgmii),
+ SH_PFC_PIN_GROUP(avb1_txcrefclk),
+ SH_PFC_PIN_GROUP(avb1_avtp_pps),
+ SH_PFC_PIN_GROUP(avb1_avtp_capture),
+ SH_PFC_PIN_GROUP(avb1_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb2_link),
+ SH_PFC_PIN_GROUP(avb2_magic),
+ SH_PFC_PIN_GROUP(avb2_phy_int),
+ SH_PFC_PIN_GROUP(avb2_mdio),
+ SH_PFC_PIN_GROUP(avb2_rgmii),
+ SH_PFC_PIN_GROUP(avb2_txcrefclk),
+ SH_PFC_PIN_GROUP(avb2_avtp_pps),
+ SH_PFC_PIN_GROUP(avb2_avtp_capture),
+ SH_PFC_PIN_GROUP(avb2_avtp_match),
+
+ SH_PFC_PIN_GROUP(canfd0_data),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(canfd2_data),
+ SH_PFC_PIN_GROUP(canfd3_data),
+ SH_PFC_PIN_GROUP(canfd4_data),
+ SH_PFC_PIN_GROUP(canfd5_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(canfd5_data_b), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(canfd6_data),
+ SH_PFC_PIN_GROUP(canfd7_data),
+ SH_PFC_PIN_GROUP(can_clk),
+
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_data_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_clk_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_ctrl_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif2_data),
+ SH_PFC_PIN_GROUP(hscif2_clk),
+ SH_PFC_PIN_GROUP(hscif2_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_data_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_clk_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_ctrl_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c4),
+ SH_PFC_PIN_GROUP(i2c5),
+
+ BUS_DATA_PIN_GROUP(mmc_data, 1),
+ BUS_DATA_PIN_GROUP(mmc_data, 4),
+ BUS_DATA_PIN_GROUP(mmc_data, 8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(mmc_cd),
+ SH_PFC_PIN_GROUP(mmc_wp),
+ SH_PFC_PIN_GROUP(mmc_ds),
+
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_txd),
+ SH_PFC_PIN_GROUP(msiof1_rxd),
+
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_txd),
+ SH_PFC_PIN_GROUP(msiof2_rxd),
+
+ SH_PFC_PIN_GROUP(msiof3_clk),
+ SH_PFC_PIN_GROUP(msiof3_sync),
+ SH_PFC_PIN_GROUP(msiof3_ss1),
+ SH_PFC_PIN_GROUP(msiof3_ss2),
+ SH_PFC_PIN_GROUP(msiof3_txd),
+ SH_PFC_PIN_GROUP(msiof3_rxd),
+
+ SH_PFC_PIN_GROUP(msiof4_clk),
+ SH_PFC_PIN_GROUP(msiof4_sync),
+ SH_PFC_PIN_GROUP(msiof4_ss1),
+ SH_PFC_PIN_GROUP(msiof4_ss2),
+ SH_PFC_PIN_GROUP(msiof4_txd),
+ SH_PFC_PIN_GROUP(msiof4_rxd),
+
+ SH_PFC_PIN_GROUP(msiof5_clk),
+ SH_PFC_PIN_GROUP(msiof5_sync),
+ SH_PFC_PIN_GROUP(msiof5_ss1),
+ SH_PFC_PIN_GROUP(msiof5_ss2),
+ SH_PFC_PIN_GROUP(msiof5_txd),
+ SH_PFC_PIN_GROUP(msiof5_rxd),
+
+ SH_PFC_PIN_GROUP(pcie0_clkreq_n),
+ SH_PFC_PIN_GROUP(pcie1_clkreq_n),
+
+ SH_PFC_PIN_GROUP(pwm0_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_b), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4),
+ SH_PFC_PIN_GROUP(pwm5),
+ SH_PFC_PIN_GROUP(pwm6),
+ SH_PFC_PIN_GROUP(pwm7),
+ SH_PFC_PIN_GROUP(pwm8_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm9_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ BUS_DATA_PIN_GROUP(qspi0_data, 2),
+ BUS_DATA_PIN_GROUP(qspi0_data, 4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ BUS_DATA_PIN_GROUP(qspi1_data, 2),
+ BUS_DATA_PIN_GROUP(qspi1_data, 4),
+
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_data_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_clk_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_ctrl_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_data_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_clk_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_ctrl_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_clk),
+ SH_PFC_PIN_GROUP(scif4_ctrl),
+ SH_PFC_PIN_GROUP(scif_clk),
+
+ SH_PFC_PIN_GROUP(tpu_to0), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to0_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to1), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to1_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to2), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to2_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to3), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to3_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(tsn0_link),
+ SH_PFC_PIN_GROUP(tsn0_phy_int),
+ SH_PFC_PIN_GROUP(tsn0_mdio),
+ SH_PFC_PIN_GROUP(tsn0_rgmii),
+ SH_PFC_PIN_GROUP(tsn0_txcrefclk),
+ SH_PFC_PIN_GROUP(tsn0_avtp_pps),
+ SH_PFC_PIN_GROUP(tsn0_avtp_capture),
+ SH_PFC_PIN_GROUP(tsn0_avtp_match),
+};
+
+static const char * const avb0_groups[] = {
+ "avb0_link",
+ "avb0_magic",
+ "avb0_phy_int",
+ "avb0_mdio",
+ "avb0_rgmii",
+ "avb0_txcrefclk",
+ "avb0_avtp_pps",
+ "avb0_avtp_capture",
+ "avb0_avtp_match",
+};
+
+static const char * const avb1_groups[] = {
+ "avb1_link",
+ "avb1_magic",
+ "avb1_phy_int",
+ "avb1_mdio",
+ "avb1_rgmii",
+ "avb1_txcrefclk",
+ "avb1_avtp_pps",
+ "avb1_avtp_capture",
+ "avb1_avtp_match",
+};
+
+static const char * const avb2_groups[] = {
+ "avb2_link",
+ "avb2_magic",
+ "avb2_phy_int",
+ "avb2_mdio",
+ "avb2_rgmii",
+ "avb2_txcrefclk",
+ "avb2_avtp_pps",
+ "avb2_avtp_capture",
+ "avb2_avtp_match",
+};
+
+static const char * const canfd0_groups[] = {
+ "canfd0_data",
+};
+
+static const char * const canfd1_groups[] = {
+ "canfd1_data",
+};
+
+static const char * const canfd2_groups[] = {
+ "canfd2_data",
+};
+
+static const char * const canfd3_groups[] = {
+ "canfd3_data",
+};
+
+static const char * const canfd4_groups[] = {
+ "canfd4_data",
+};
+
+static const char * const canfd5_groups[] = {
+ /* suffix might be updated */
+ "canfd5_data",
+ "canfd5_data_b",
+};
+
+static const char * const canfd6_groups[] = {
+ "canfd6_data",
+};
+
+static const char * const canfd7_groups[] = {
+ "canfd7_data",
+};
+
+static const char * const can_clk_groups[] = {
+ "can_clk",
+};
+
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ /* suffix might be updated */
+ "hscif1_data",
+ "hscif1_clk",
+ "hscif1_ctrl",
+ "hscif1_data_x",
+ "hscif1_clk_x",
+ "hscif1_ctrl_x",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data",
+ "hscif2_clk",
+ "hscif2_ctrl",
+};
+
+static const char * const hscif3_groups[] = {
+ /* suffix might be updated */
+ "hscif3_data",
+ "hscif3_clk",
+ "hscif3_ctrl",
+ "hscif3_data_a",
+ "hscif3_clk_a",
+ "hscif3_ctrl_a",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3",
+};
+
+static const char * const i2c4_groups[] = {
+ "i2c4",
+};
+
+static const char * const i2c5_groups[] = {
+ "i2c5",
+};
+
+static const char * const mmc_groups[] = {
+ "mmc_data1",
+ "mmc_data4",
+ "mmc_data8",
+ "mmc_ctrl",
+ "mmc_cd",
+ "mmc_wp",
+ "mmc_ds",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_txd",
+ "msiof1_rxd",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk",
+ "msiof2_sync",
+ "msiof2_ss1",
+ "msiof2_ss2",
+ "msiof2_txd",
+ "msiof2_rxd",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk",
+ "msiof3_sync",
+ "msiof3_ss1",
+ "msiof3_ss2",
+ "msiof3_txd",
+ "msiof3_rxd",
+};
+
+static const char * const msiof4_groups[] = {
+ "msiof4_clk",
+ "msiof4_sync",
+ "msiof4_ss1",
+ "msiof4_ss2",
+ "msiof4_txd",
+ "msiof4_rxd",
+};
+
+static const char * const msiof5_groups[] = {
+ "msiof5_clk",
+ "msiof5_sync",
+ "msiof5_ss1",
+ "msiof5_ss2",
+ "msiof5_txd",
+ "msiof5_rxd",
+};
+
+static const char * const pcie_groups[] = {
+ "pcie0_clkreq_n",
+ "pcie1_clkreq_n",
+};
+
+static const char * const pwm0_groups[] = {
+ /* suffix might be updated */
+ "pwm0_a",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+ /* suffix might be updated */
+ "pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4",
+};
+
+static const char * const pwm5_groups[] = {
+ "pwm5",
+};
+
+static const char * const pwm6_groups[] = {
+ "pwm6",
+};
+
+static const char * const pwm7_groups[] = {
+ "pwm7",
+};
+
+static const char * const pwm8_groups[] = {
+ /* suffix might be updated */
+ "pwm8_a",
+};
+
+static const char * const pwm9_groups[] = {
+ /* suffix might be updated */
+ "pwm9_a",
+};
+
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_clk",
+ "scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+ /* suffix might be updated */
+ "scif1_data",
+ "scif1_clk",
+ "scif1_ctrl",
+ "scif1_data_x",
+ "scif1_clk_x",
+ "scif1_ctrl_x",
+};
+
+static const char * const scif3_groups[] = {
+ /* suffix might be updated */
+ "scif3_data",
+ "scif3_clk",
+ "scif3_ctrl",
+ "scif3_data_a",
+ "scif3_clk_a",
+ "scif3_ctrl_a",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data",
+ "scif4_clk",
+ "scif4_ctrl",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk",
+};
+
+static const char * const tpu_groups[] = {
+ /* suffix might be updated */
+ "tpu_to0",
+ "tpu_to0_a",
+ "tpu_to1",
+ "tpu_to1_a",
+ "tpu_to2",
+ "tpu_to2_a",
+ "tpu_to3",
+ "tpu_to3_a",
+};
+
+static const char * const tsn0_groups[] = {
+ "tsn0_link",
+ "tsn0_phy_int",
+ "tsn0_mdio",
+ "tsn0_rgmii",
+ "tsn0_txcrefclk",
+ "tsn0_avtp_pps",
+ "tsn0_avtp_capture",
+ "tsn0_avtp_match",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb0),
+ SH_PFC_FUNCTION(avb1),
+ SH_PFC_FUNCTION(avb2),
+
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(canfd2),
+ SH_PFC_FUNCTION(canfd3),
+ SH_PFC_FUNCTION(canfd4),
+ SH_PFC_FUNCTION(canfd5),
+ SH_PFC_FUNCTION(canfd6),
+ SH_PFC_FUNCTION(canfd7),
+ SH_PFC_FUNCTION(can_clk),
+
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c5),
+
+ SH_PFC_FUNCTION(mmc),
+
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(msiof4),
+ SH_PFC_FUNCTION(msiof5),
+
+ SH_PFC_FUNCTION(pcie),
+
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(pwm7),
+ SH_PFC_FUNCTION(pwm8),
+ SH_PFC_FUNCTION(pwm9),
+
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
+
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif_clk),
+
+ SH_PFC_FUNCTION(tpu),
+
+ SH_PFC_FUNCTION(tsn0),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y) FN_##y
+#define FM(x) FN_##x
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xE6050040, 32,
+ GROUP(-13, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_19 RESERVED */
+ GP_0_18_FN, GPSR0_18,
+ GP_0_17_FN, GPSR0_17,
+ GP_0_16_FN, GPSR0_16,
+ GP_0_15_FN, GPSR0_15,
+ GP_0_14_FN, GPSR0_14,
+ GP_0_13_FN, GPSR0_13,
+ GP_0_12_FN, GPSR0_12,
+ GP_0_11_FN, GPSR0_11,
+ GP_0_10_FN, GPSR0_10,
+ GP_0_9_FN, GPSR0_9,
+ GP_0_8_FN, GPSR0_8,
+ GP_0_7_FN, GPSR0_7,
+ GP_0_6_FN, GPSR0_6,
+ GP_0_5_FN, GPSR0_5,
+ GP_0_4_FN, GPSR0_4,
+ GP_0_3_FN, GPSR0_3,
+ GP_0_2_FN, GPSR0_2,
+ GP_0_1_FN, GPSR0_1,
+ GP_0_0_FN, GPSR0_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xE6050840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_28_FN, GPSR1_28,
+ GP_1_27_FN, GPSR1_27,
+ GP_1_26_FN, GPSR1_26,
+ GP_1_25_FN, GPSR1_25,
+ GP_1_24_FN, GPSR1_24,
+ GP_1_23_FN, GPSR1_23,
+ GP_1_22_FN, GPSR1_22,
+ GP_1_21_FN, GPSR1_21,
+ GP_1_20_FN, GPSR1_20,
+ GP_1_19_FN, GPSR1_19,
+ GP_1_18_FN, GPSR1_18,
+ GP_1_17_FN, GPSR1_17,
+ GP_1_16_FN, GPSR1_16,
+ GP_1_15_FN, GPSR1_15,
+ GP_1_14_FN, GPSR1_14,
+ GP_1_13_FN, GPSR1_13,
+ GP_1_12_FN, GPSR1_12,
+ GP_1_11_FN, GPSR1_11,
+ GP_1_10_FN, GPSR1_10,
+ GP_1_9_FN, GPSR1_9,
+ GP_1_8_FN, GPSR1_8,
+ GP_1_7_FN, GPSR1_7,
+ GP_1_6_FN, GPSR1_6,
+ GP_1_5_FN, GPSR1_5,
+ GP_1_4_FN, GPSR1_4,
+ GP_1_3_FN, GPSR1_3,
+ GP_1_2_FN, GPSR1_2,
+ GP_1_1_FN, GPSR1_1,
+ GP_1_0_FN, GPSR1_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xE6058040, 32,
+ GROUP(-12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_20 RESERVED */
+ GP_2_19_FN, GPSR2_19,
+ GP_2_18_FN, GPSR2_18,
+ GP_2_17_FN, GPSR2_17,
+ GP_2_16_FN, GPSR2_16,
+ GP_2_15_FN, GPSR2_15,
+ GP_2_14_FN, GPSR2_14,
+ GP_2_13_FN, GPSR2_13,
+ GP_2_12_FN, GPSR2_12,
+ GP_2_11_FN, GPSR2_11,
+ GP_2_10_FN, GPSR2_10,
+ GP_2_9_FN, GPSR2_9,
+ GP_2_8_FN, GPSR2_8,
+ GP_2_7_FN, GPSR2_7,
+ GP_2_6_FN, GPSR2_6,
+ GP_2_5_FN, GPSR2_5,
+ GP_2_4_FN, GPSR2_4,
+ GP_2_3_FN, GPSR2_3,
+ GP_2_2_FN, GPSR2_2,
+ GP_2_1_FN, GPSR2_1,
+ GP_2_0_FN, GPSR2_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xE6058840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ GP_3_29_FN, GPSR3_29,
+ GP_3_28_FN, GPSR3_28,
+ GP_3_27_FN, GPSR3_27,
+ GP_3_26_FN, GPSR3_26,
+ GP_3_25_FN, GPSR3_25,
+ GP_3_24_FN, GPSR3_24,
+ GP_3_23_FN, GPSR3_23,
+ GP_3_22_FN, GPSR3_22,
+ GP_3_21_FN, GPSR3_21,
+ GP_3_20_FN, GPSR3_20,
+ GP_3_19_FN, GPSR3_19,
+ GP_3_18_FN, GPSR3_18,
+ GP_3_17_FN, GPSR3_17,
+ GP_3_16_FN, GPSR3_16,
+ GP_3_15_FN, GPSR3_15,
+ GP_3_14_FN, GPSR3_14,
+ GP_3_13_FN, GPSR3_13,
+ GP_3_12_FN, GPSR3_12,
+ GP_3_11_FN, GPSR3_11,
+ GP_3_10_FN, GPSR3_10,
+ GP_3_9_FN, GPSR3_9,
+ GP_3_8_FN, GPSR3_8,
+ GP_3_7_FN, GPSR3_7,
+ GP_3_6_FN, GPSR3_6,
+ GP_3_5_FN, GPSR3_5,
+ GP_3_4_FN, GPSR3_4,
+ GP_3_3_FN, GPSR3_3,
+ GP_3_2_FN, GPSR3_2,
+ GP_3_1_FN, GPSR3_1,
+ GP_3_0_FN, GPSR3_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xE6060040, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_24_FN, GPSR4_24,
+ GP_4_23_FN, GPSR4_23,
+ GP_4_22_FN, GPSR4_22,
+ GP_4_21_FN, GPSR4_21,
+ GP_4_20_FN, GPSR4_20,
+ GP_4_19_FN, GPSR4_19,
+ GP_4_18_FN, GPSR4_18,
+ GP_4_17_FN, GPSR4_17,
+ GP_4_16_FN, GPSR4_16,
+ GP_4_15_FN, GPSR4_15,
+ GP_4_14_FN, GPSR4_14,
+ GP_4_13_FN, GPSR4_13,
+ GP_4_12_FN, GPSR4_12,
+ GP_4_11_FN, GPSR4_11,
+ GP_4_10_FN, GPSR4_10,
+ GP_4_9_FN, GPSR4_9,
+ GP_4_8_FN, GPSR4_8,
+ GP_4_7_FN, GPSR4_7,
+ GP_4_6_FN, GPSR4_6,
+ GP_4_5_FN, GPSR4_5,
+ GP_4_4_FN, GPSR4_4,
+ GP_4_3_FN, GPSR4_3,
+ GP_4_2_FN, GPSR4_2,
+ GP_4_1_FN, GPSR4_1,
+ GP_4_0_FN, GPSR4_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xE6060840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_21 RESERVED */
+ GP_5_20_FN, GPSR5_20,
+ GP_5_19_FN, GPSR5_19,
+ GP_5_18_FN, GPSR5_18,
+ GP_5_17_FN, GPSR5_17,
+ GP_5_16_FN, GPSR5_16,
+ GP_5_15_FN, GPSR5_15,
+ GP_5_14_FN, GPSR5_14,
+ GP_5_13_FN, GPSR5_13,
+ GP_5_12_FN, GPSR5_12,
+ GP_5_11_FN, GPSR5_11,
+ GP_5_10_FN, GPSR5_10,
+ GP_5_9_FN, GPSR5_9,
+ GP_5_8_FN, GPSR5_8,
+ GP_5_7_FN, GPSR5_7,
+ GP_5_6_FN, GPSR5_6,
+ GP_5_5_FN, GPSR5_5,
+ GP_5_4_FN, GPSR5_4,
+ GP_5_3_FN, GPSR5_3,
+ GP_5_2_FN, GPSR5_2,
+ GP_5_1_FN, GPSR5_1,
+ GP_5_0_FN, GPSR5_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xE6061040, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP6_31_21 RESERVED */
+ GP_6_20_FN, GPSR6_20,
+ GP_6_19_FN, GPSR6_19,
+ GP_6_18_FN, GPSR6_18,
+ GP_6_17_FN, GPSR6_17,
+ GP_6_16_FN, GPSR6_16,
+ GP_6_15_FN, GPSR6_15,
+ GP_6_14_FN, GPSR6_14,
+ GP_6_13_FN, GPSR6_13,
+ GP_6_12_FN, GPSR6_12,
+ GP_6_11_FN, GPSR6_11,
+ GP_6_10_FN, GPSR6_10,
+ GP_6_9_FN, GPSR6_9,
+ GP_6_8_FN, GPSR6_8,
+ GP_6_7_FN, GPSR6_7,
+ GP_6_6_FN, GPSR6_6,
+ GP_6_5_FN, GPSR6_5,
+ GP_6_4_FN, GPSR6_4,
+ GP_6_3_FN, GPSR6_3,
+ GP_6_2_FN, GPSR6_2,
+ GP_6_1_FN, GPSR6_1,
+ GP_6_0_FN, GPSR6_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xE6061840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_21 RESERVED */
+ GP_7_20_FN, GPSR7_20,
+ GP_7_19_FN, GPSR7_19,
+ GP_7_18_FN, GPSR7_18,
+ GP_7_17_FN, GPSR7_17,
+ GP_7_16_FN, GPSR7_16,
+ GP_7_15_FN, GPSR7_15,
+ GP_7_14_FN, GPSR7_14,
+ GP_7_13_FN, GPSR7_13,
+ GP_7_12_FN, GPSR7_12,
+ GP_7_11_FN, GPSR7_11,
+ GP_7_10_FN, GPSR7_10,
+ GP_7_9_FN, GPSR7_9,
+ GP_7_8_FN, GPSR7_8,
+ GP_7_7_FN, GPSR7_7,
+ GP_7_6_FN, GPSR7_6,
+ GP_7_5_FN, GPSR7_5,
+ GP_7_4_FN, GPSR7_4,
+ GP_7_3_FN, GPSR7_3,
+ GP_7_2_FN, GPSR7_2,
+ GP_7_1_FN, GPSR7_1,
+ GP_7_0_FN, GPSR7_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR8", 0xE6068040, 32,
+ GROUP(-18, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP8_31_14 RESERVED */
+ GP_8_13_FN, GPSR8_13,
+ GP_8_12_FN, GPSR8_12,
+ GP_8_11_FN, GPSR8_11,
+ GP_8_10_FN, GPSR8_10,
+ GP_8_9_FN, GPSR8_9,
+ GP_8_8_FN, GPSR8_8,
+ GP_8_7_FN, GPSR8_7,
+ GP_8_6_FN, GPSR8_6,
+ GP_8_5_FN, GPSR8_5,
+ GP_8_4_FN, GPSR8_4,
+ GP_8_3_FN, GPSR8_3,
+ GP_8_2_FN, GPSR8_2,
+ GP_8_1_FN, GPSR8_1,
+ GP_8_0_FN, GPSR8_0, ))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG("IP0SR0", 0xE6050060, 32, 4, GROUP(
+ IP0SR0_31_28
+ IP0SR0_27_24
+ IP0SR0_23_20
+ IP0SR0_19_16
+ IP0SR0_15_12
+ IP0SR0_11_8
+ IP0SR0_7_4
+ IP0SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR0", 0xE6050064, 32, 4, GROUP(
+ IP1SR0_31_28
+ IP1SR0_27_24
+ IP1SR0_23_20
+ IP1SR0_19_16
+ IP1SR0_15_12
+ IP1SR0_11_8
+ IP1SR0_7_4
+ IP1SR0_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR0", 0xE6050068, 32,
+ GROUP(-20, 4, 4, 4),
+ GROUP(
+ /* IP2SR0_31_12 RESERVED */
+ IP2SR0_11_8
+ IP2SR0_7_4
+ IP2SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR1", 0xE6050860, 32, 4, GROUP(
+ IP0SR1_31_28
+ IP0SR1_27_24
+ IP0SR1_23_20
+ IP0SR1_19_16
+ IP0SR1_15_12
+ IP0SR1_11_8
+ IP0SR1_7_4
+ IP0SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR1", 0xE6050864, 32, 4, GROUP(
+ IP1SR1_31_28
+ IP1SR1_27_24
+ IP1SR1_23_20
+ IP1SR1_19_16
+ IP1SR1_15_12
+ IP1SR1_11_8
+ IP1SR1_7_4
+ IP1SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR1", 0xE6050868, 32, 4, GROUP(
+ IP2SR1_31_28
+ IP2SR1_27_24
+ IP2SR1_23_20
+ IP2SR1_19_16
+ IP2SR1_15_12
+ IP2SR1_11_8
+ IP2SR1_7_4
+ IP2SR1_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR1", 0xE605086C, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR1_31_20 RESERVED */
+ IP3SR1_19_16
+ IP3SR1_15_12
+ IP3SR1_11_8
+ IP3SR1_7_4
+ IP3SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR2", 0xE6058060, 32, 4, GROUP(
+ IP0SR2_31_28
+ IP0SR2_27_24
+ IP0SR2_23_20
+ IP0SR2_19_16
+ IP0SR2_15_12
+ IP0SR2_11_8
+ IP0SR2_7_4
+ IP0SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR2", 0xE6058064, 32, 4, GROUP(
+ IP1SR2_31_28
+ IP1SR2_27_24
+ IP1SR2_23_20
+ IP1SR2_19_16
+ IP1SR2_15_12
+ IP1SR2_11_8
+ IP1SR2_7_4
+ IP1SR2_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR2", 0xE6058068, 32,
+ GROUP(-16, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR2_31_16 RESERVED */
+ IP2SR2_15_12
+ IP2SR2_11_8
+ IP2SR2_7_4
+ IP2SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR3", 0xE6058860, 32, 4, GROUP(
+ IP0SR3_31_28
+ IP0SR3_27_24
+ IP0SR3_23_20
+ IP0SR3_19_16
+ IP0SR3_15_12
+ IP0SR3_11_8
+ IP0SR3_7_4
+ IP0SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR3", 0xE6058864, 32, 4, GROUP(
+ IP1SR3_31_28
+ IP1SR3_27_24
+ IP1SR3_23_20
+ IP1SR3_19_16
+ IP1SR3_15_12
+ IP1SR3_11_8
+ IP1SR3_7_4
+ IP1SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR3", 0xE6058868, 32, 4, GROUP(
+ IP2SR3_31_28
+ IP2SR3_27_24
+ IP2SR3_23_20
+ IP2SR3_19_16
+ IP2SR3_15_12
+ IP2SR3_11_8
+ IP2SR3_7_4
+ IP2SR3_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR3", 0xE605886C, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR3_31_24 RESERVED */
+ IP3SR3_23_20
+ IP3SR3_19_16
+ IP3SR3_15_12
+ IP3SR3_11_8
+ IP3SR3_7_4
+ IP3SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR6", 0xE6061060, 32, 4, GROUP(
+ IP0SR6_31_28
+ IP0SR6_27_24
+ IP0SR6_23_20
+ IP0SR6_19_16
+ IP0SR6_15_12
+ IP0SR6_11_8
+ IP0SR6_7_4
+ IP0SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR6", 0xE6061064, 32, 4, GROUP(
+ IP1SR6_31_28
+ IP1SR6_27_24
+ IP1SR6_23_20
+ IP1SR6_19_16
+ IP1SR6_15_12
+ IP1SR6_11_8
+ IP1SR6_7_4
+ IP1SR6_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR6", 0xE6061068, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR6_31_20 RESERVED */
+ IP2SR6_19_16
+ IP2SR6_15_12
+ IP2SR6_11_8
+ IP2SR6_7_4
+ IP2SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR7", 0xE6061860, 32, 4, GROUP(
+ IP0SR7_31_28
+ IP0SR7_27_24
+ IP0SR7_23_20
+ IP0SR7_19_16
+ IP0SR7_15_12
+ IP0SR7_11_8
+ IP0SR7_7_4
+ IP0SR7_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR7", 0xE6061864, 32, 4, GROUP(
+ IP1SR7_31_28
+ IP1SR7_27_24
+ IP1SR7_23_20
+ IP1SR7_19_16
+ IP1SR7_15_12
+ IP1SR7_11_8
+ IP1SR7_7_4
+ IP1SR7_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR7", 0xE6061868, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR7_31_20 RESERVED */
+ IP2SR7_19_16
+ IP2SR7_15_12
+ IP2SR7_11_8
+ IP2SR7_7_4
+ IP2SR7_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR8", 0xE6068060, 32, 4, GROUP(
+ IP0SR8_31_28
+ IP0SR8_27_24
+ IP0SR8_23_20
+ IP0SR8_19_16
+ IP0SR8_15_12
+ IP0SR8_11_8
+ IP0SR8_7_4
+ IP0SR8_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP1SR8", 0xE6068064, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP1SR8_31_24 RESERVED */
+ IP1SR8_23_20
+ IP1SR8_19_16
+ IP1SR8_15_12
+ IP1SR8_11_8
+ IP1SR8_7_4
+ IP1SR8_3_0))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE6060100, 32,
+ GROUP(-12, 1, 1, -2, 1, 1, -1, 1, -2, 1, 1, -2, 1,
+ -2, 1, 1, -1),
+ GROUP(
+ /* RESERVED 31-20 */
+ MOD_SEL4_19
+ MOD_SEL4_18
+ /* RESERVED 17-16 */
+ MOD_SEL4_15
+ MOD_SEL4_14
+ /* RESERVED 13 */
+ MOD_SEL4_12
+ /* RESERVED 11-10 */
+ MOD_SEL4_9
+ MOD_SEL4_8
+ /* RESERVED 7-6 */
+ MOD_SEL4_5
+ /* RESERVED 4-3 */
+ MOD_SEL4_2
+ MOD_SEL4_1
+ /* RESERVED 0 */
+ ))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL5", 0xE6060900, 32,
+ GROUP(-12, 1, -2, 1, 1, -2, 1, 1, -2, 1, -1,
+ 1, 1, -2, 1, -1, 1),
+ GROUP(
+ /* RESERVED 31-20 */
+ MOD_SEL5_19
+ /* RESERVED 18-17 */
+ MOD_SEL5_16
+ MOD_SEL5_15
+ /* RESERVED 14-13 */
+ MOD_SEL5_12
+ MOD_SEL5_11
+ /* RESERVED 10-9 */
+ MOD_SEL5_8
+ /* RESERVED 7 */
+ MOD_SEL5_6
+ MOD_SEL5_5
+ /* RESERVED 4-3 */
+ MOD_SEL5_2
+ /* RESERVED 1 */
+ MOD_SEL5_0))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL6", 0xE6061100, 32,
+ GROUP(-13, 1, -1, 1, -2, 1, 1,
+ -1, 1, -2, 1, 1, 1, -2, 1, 1, -1),
+ GROUP(
+ /* RESERVED 31-19 */
+ MOD_SEL6_18
+ /* RESERVED 17 */
+ MOD_SEL6_16
+ /* RESERVED 15-14 */
+ MOD_SEL6_13
+ MOD_SEL6_12
+ /* RESERVED 11 */
+ MOD_SEL6_10
+ /* RESERVED 9-8 */
+ MOD_SEL6_7
+ MOD_SEL6_6
+ MOD_SEL6_5
+ /* RESERVED 4-3 */
+ MOD_SEL6_2
+ MOD_SEL6_1
+ /* RESERVED 0 */
+ ))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL7", 0xE6061900, 32,
+ GROUP(-15, 1, 1, -1, 1, -1, 1, 1, -2, 1, 1,
+ -2, 1, 1, -1, 1),
+ GROUP(
+ /* RESERVED 31-17 */
+ MOD_SEL7_16
+ MOD_SEL7_15
+ /* RESERVED 14 */
+ MOD_SEL7_13
+ /* RESERVED 12 */
+ MOD_SEL7_11
+ MOD_SEL7_10
+ /* RESERVED 9-8 */
+ MOD_SEL7_7
+ MOD_SEL7_6
+ /* RESERVED 5-4 */
+ MOD_SEL7_3
+ MOD_SEL7_2
+ /* RESERVED 1 */
+ MOD_SEL7_0))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL8", 0xE6068100, 32,
+ GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED 31-12 */
+ MOD_SEL8_11
+ MOD_SEL8_10
+ MOD_SEL8_9
+ MOD_SEL8_8
+ MOD_SEL8_7
+ MOD_SEL8_6
+ MOD_SEL8_5
+ MOD_SEL8_4
+ MOD_SEL8_3
+ MOD_SEL8_2
+ MOD_SEL8_1
+ MOD_SEL8_0))
+ },
+ { },
+};
+
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRV0CTRL0", 0xE6050080) {
+ { RCAR_GP_PIN(0, 7), 28, 3 }, /* MSIOF5_SS2 */
+ { RCAR_GP_PIN(0, 6), 24, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(0, 5), 20, 3 }, /* IRQ1 */
+ { RCAR_GP_PIN(0, 4), 16, 3 }, /* IRQ2 */
+ { RCAR_GP_PIN(0, 3), 12, 3 }, /* IRQ3 */
+ { RCAR_GP_PIN(0, 2), 8, 3 }, /* GP0_02 */
+ { RCAR_GP_PIN(0, 1), 4, 3 }, /* GP0_01 */
+ { RCAR_GP_PIN(0, 0), 0, 3 }, /* GP0_00 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL0", 0xE6050084) {
+ { RCAR_GP_PIN(0, 15), 28, 3 }, /* MSIOF2_SYNC */
+ { RCAR_GP_PIN(0, 14), 24, 3 }, /* MSIOF2_SS1 */
+ { RCAR_GP_PIN(0, 13), 20, 3 }, /* MSIOF2_SS2 */
+ { RCAR_GP_PIN(0, 12), 16, 3 }, /* MSIOF5_RXD */
+ { RCAR_GP_PIN(0, 11), 12, 3 }, /* MSIOF5_SCK */
+ { RCAR_GP_PIN(0, 10), 8, 3 }, /* MSIOF5_TXD */
+ { RCAR_GP_PIN(0, 9), 4, 3 }, /* MSIOF5_SYNC */
+ { RCAR_GP_PIN(0, 8), 0, 3 }, /* MSIOF5_SS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL0", 0xE6050088) {
+ { RCAR_GP_PIN(0, 18), 8, 3 }, /* MSIOF2_RXD */
+ { RCAR_GP_PIN(0, 17), 4, 3 }, /* MSIOF2_SCK */
+ { RCAR_GP_PIN(0, 16), 0, 3 }, /* MSIOF2_TXD */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL1", 0xE6050880) {
+ { RCAR_GP_PIN(1, 7), 28, 3 }, /* MSIOF0_SS1 */
+ { RCAR_GP_PIN(1, 6), 24, 3 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(1, 5), 20, 3 }, /* MSIOF1_RXD */
+ { RCAR_GP_PIN(1, 4), 16, 3 }, /* MSIOF1_TXD */
+ { RCAR_GP_PIN(1, 3), 12, 3 }, /* MSIOF1_SCK */
+ { RCAR_GP_PIN(1, 2), 8, 3 }, /* MSIOF1_SYNC */
+ { RCAR_GP_PIN(1, 1), 4, 3 }, /* MSIOF1_SS1 */
+ { RCAR_GP_PIN(1, 0), 0, 3 }, /* MSIOF1_SS2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL1", 0xE6050884) {
+ { RCAR_GP_PIN(1, 15), 28, 3 }, /* HSCK0 */
+ { RCAR_GP_PIN(1, 14), 24, 3 }, /* HRTS0_N */
+ { RCAR_GP_PIN(1, 13), 20, 3 }, /* HCTS0_N */
+ { RCAR_GP_PIN(1, 12), 16, 3 }, /* HTX0 */
+ { RCAR_GP_PIN(1, 11), 12, 3 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(1, 10), 8, 3 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(1, 9), 4, 3 }, /* MSIOF0_TXD */
+ { RCAR_GP_PIN(1, 8), 0, 3 }, /* MSIOF0_SYNC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL1", 0xE6050888) {
+ { RCAR_GP_PIN(1, 23), 28, 3 }, /* GP1_23 */
+ { RCAR_GP_PIN(1, 22), 24, 3 }, /* AUDIO_CLKIN */
+ { RCAR_GP_PIN(1, 21), 20, 3 }, /* AUDIO_CLKOUT */
+ { RCAR_GP_PIN(1, 20), 16, 3 }, /* SSI_SD */
+ { RCAR_GP_PIN(1, 19), 12, 3 }, /* SSI_WS */
+ { RCAR_GP_PIN(1, 18), 8, 3 }, /* SSI_SCK */
+ { RCAR_GP_PIN(1, 17), 4, 3 }, /* SCIF_CLK */
+ { RCAR_GP_PIN(1, 16), 0, 3 }, /* HRX0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL1", 0xE605088C) {
+ { RCAR_GP_PIN(1, 28), 16, 3 }, /* HTX3 */
+ { RCAR_GP_PIN(1, 27), 12, 3 }, /* HCTS3_N */
+ { RCAR_GP_PIN(1, 26), 8, 3 }, /* HRTS3_N */
+ { RCAR_GP_PIN(1, 25), 4, 3 }, /* HSCK3 */
+ { RCAR_GP_PIN(1, 24), 0, 3 }, /* HRX3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL2", 0xE6058080) {
+ { RCAR_GP_PIN(2, 7), 28, 3 }, /* TPU0TO1 */
+ { RCAR_GP_PIN(2, 6), 24, 3 }, /* FXR_TXDB */
+ { RCAR_GP_PIN(2, 5), 20, 3 }, /* FXR_TXENB_N */
+ { RCAR_GP_PIN(2, 4), 16, 3 }, /* RXDB_EXTFXR */
+ { RCAR_GP_PIN(2, 3), 12, 3 }, /* CLK_EXTFXR */
+ { RCAR_GP_PIN(2, 2), 8, 3 }, /* RXDA_EXTFXR */
+ { RCAR_GP_PIN(2, 1), 4, 3 }, /* FXR_TXENA_N */
+ { RCAR_GP_PIN(2, 0), 0, 3 }, /* FXR_TXDA */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL2", 0xE6058084) {
+ { RCAR_GP_PIN(2, 15), 28, 3 }, /* CANFD3_RX */
+ { RCAR_GP_PIN(2, 14), 24, 3 }, /* CANFD3_TX */
+ { RCAR_GP_PIN(2, 13), 20, 3 }, /* CANFD2_RX */
+ { RCAR_GP_PIN(2, 12), 16, 3 }, /* CANFD2_TX */
+ { RCAR_GP_PIN(2, 11), 12, 3 }, /* CANFD0_RX */
+ { RCAR_GP_PIN(2, 10), 8, 3 }, /* CANFD0_TX */
+ { RCAR_GP_PIN(2, 9), 4, 3 }, /* CAN_CLK */
+ { RCAR_GP_PIN(2, 8), 0, 3 }, /* TPU0TO0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL2", 0xE6058088) {
+ { RCAR_GP_PIN(2, 19), 12, 3 }, /* CANFD7_RX */
+ { RCAR_GP_PIN(2, 18), 8, 3 }, /* CANFD7_TX */
+ { RCAR_GP_PIN(2, 17), 4, 3 }, /* CANFD4_RX */
+ { RCAR_GP_PIN(2, 16), 0, 3 }, /* CANFD4_TX */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL3", 0xE6058880) {
+ { RCAR_GP_PIN(3, 7), 28, 3 }, /* MMC_D4 */
+ { RCAR_GP_PIN(3, 6), 24, 3 }, /* MMC_D5 */
+ { RCAR_GP_PIN(3, 5), 20, 3 }, /* MMC_SD_D3 */
+ { RCAR_GP_PIN(3, 4), 16, 3 }, /* MMC_DS */
+ { RCAR_GP_PIN(3, 3), 12, 3 }, /* MMC_SD_CLK */
+ { RCAR_GP_PIN(3, 2), 8, 3 }, /* MMC_SD_D2 */
+ { RCAR_GP_PIN(3, 1), 4, 3 }, /* MMC_SD_D0 */
+ { RCAR_GP_PIN(3, 0), 0, 3 }, /* MMC_SD_D1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL3", 0xE6058884) {
+ { RCAR_GP_PIN(3, 15), 28, 2 }, /* QSPI0_SSL */
+ { RCAR_GP_PIN(3, 14), 24, 2 }, /* IPC_CLKOUT */
+ { RCAR_GP_PIN(3, 13), 20, 2 }, /* IPC_CLKIN */
+ { RCAR_GP_PIN(3, 12), 16, 3 }, /* SD_WP */
+ { RCAR_GP_PIN(3, 11), 12, 3 }, /* SD_CD */
+ { RCAR_GP_PIN(3, 10), 8, 3 }, /* MMC_SD_CMD */
+ { RCAR_GP_PIN(3, 9), 4, 3 }, /* MMC_D6*/
+ { RCAR_GP_PIN(3, 8), 0, 3 }, /* MMC_D7 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL3", 0xE6058888) {
+ { RCAR_GP_PIN(3, 23), 28, 2 }, /* QSPI1_MISO_IO1 */
+ { RCAR_GP_PIN(3, 22), 24, 2 }, /* QSPI1_SPCLK */
+ { RCAR_GP_PIN(3, 21), 20, 2 }, /* QSPI1_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 20), 16, 2 }, /* QSPI0_SPCLK */
+ { RCAR_GP_PIN(3, 19), 12, 2 }, /* QSPI0_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 18), 8, 2 }, /* QSPI0_MISO_IO1 */
+ { RCAR_GP_PIN(3, 17), 4, 2 }, /* QSPI0_IO2 */
+ { RCAR_GP_PIN(3, 16), 0, 2 }, /* QSPI0_IO3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL3", 0xE605888C) {
+ { RCAR_GP_PIN(3, 29), 20, 2 }, /* RPC_INT_N */
+ { RCAR_GP_PIN(3, 28), 16, 2 }, /* RPC_WP_N */
+ { RCAR_GP_PIN(3, 27), 12, 2 }, /* RPC_RESET_N */
+ { RCAR_GP_PIN(3, 26), 8, 2 }, /* QSPI1_IO3 */
+ { RCAR_GP_PIN(3, 25), 4, 2 }, /* QSPI1_SSL */
+ { RCAR_GP_PIN(3, 24), 0, 2 }, /* QSPI1_IO2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL4", 0xE6060080) {
+ { RCAR_GP_PIN(4, 7), 28, 3 }, /* TSN0_RX_CTL */
+ { RCAR_GP_PIN(4, 6), 24, 3 }, /* TSN0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(4, 5), 20, 3 }, /* TSN0_AVTP_MATCH */
+ { RCAR_GP_PIN(4, 4), 16, 3 }, /* TSN0_LINK */
+ { RCAR_GP_PIN(4, 3), 12, 3 }, /* TSN0_PHY_INT */
+ { RCAR_GP_PIN(4, 2), 8, 3 }, /* TSN0_AVTP_PPS1 */
+ { RCAR_GP_PIN(4, 1), 4, 3 }, /* TSN0_MDC */
+ { RCAR_GP_PIN(4, 0), 0, 3 }, /* TSN0_MDIO */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL4", 0xE6060084) {
+ { RCAR_GP_PIN(4, 15), 28, 3 }, /* TSN0_TD0 */
+ { RCAR_GP_PIN(4, 14), 24, 3 }, /* TSN0_TD1 */
+ { RCAR_GP_PIN(4, 13), 20, 3 }, /* TSN0_RD1 */
+ { RCAR_GP_PIN(4, 12), 16, 3 }, /* TSN0_TXC */
+ { RCAR_GP_PIN(4, 11), 12, 3 }, /* TSN0_RXC */
+ { RCAR_GP_PIN(4, 10), 8, 3 }, /* TSN0_RD0 */
+ { RCAR_GP_PIN(4, 9), 4, 3 }, /* TSN0_TX_CTL */
+ { RCAR_GP_PIN(4, 8), 0, 3 }, /* TSN0_AVTP_PPS0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL4", 0xE6060088) {
+ { RCAR_GP_PIN(4, 23), 28, 3 }, /* AVS0 */
+ { RCAR_GP_PIN(4, 22), 24, 3 }, /* PCIE1_CLKREQ_N */
+ { RCAR_GP_PIN(4, 21), 20, 3 }, /* PCIE0_CLKREQ_N */
+ { RCAR_GP_PIN(4, 20), 16, 3 }, /* TSN0_TXCREFCLK */
+ { RCAR_GP_PIN(4, 19), 12, 3 }, /* TSN0_TD2 */
+ { RCAR_GP_PIN(4, 18), 8, 3 }, /* TSN0_TD3 */
+ { RCAR_GP_PIN(4, 17), 4, 3 }, /* TSN0_RD2 */
+ { RCAR_GP_PIN(4, 16), 0, 3 }, /* TSN0_RD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL4", 0xE606008C) {
+ { RCAR_GP_PIN(4, 24), 0, 3 }, /* AVS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL5", 0xE6060880) {
+ { RCAR_GP_PIN(5, 7), 28, 3 }, /* AVB2_TXCREFCLK */
+ { RCAR_GP_PIN(5, 6), 24, 3 }, /* AVB2_MDC */
+ { RCAR_GP_PIN(5, 5), 20, 3 }, /* AVB2_MAGIC */
+ { RCAR_GP_PIN(5, 4), 16, 3 }, /* AVB2_PHY_INT */
+ { RCAR_GP_PIN(5, 3), 12, 3 }, /* AVB2_LINK */
+ { RCAR_GP_PIN(5, 2), 8, 3 }, /* AVB2_AVTP_MATCH */
+ { RCAR_GP_PIN(5, 1), 4, 3 }, /* AVB2_AVTP_CAPTURE */
+ { RCAR_GP_PIN(5, 0), 0, 3 }, /* AVB2_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL5", 0xE6060884) {
+ { RCAR_GP_PIN(5, 15), 28, 3 }, /* AVB2_TD0 */
+ { RCAR_GP_PIN(5, 14), 24, 3 }, /* AVB2_RD1 */
+ { RCAR_GP_PIN(5, 13), 20, 3 }, /* AVB2_RD2 */
+ { RCAR_GP_PIN(5, 12), 16, 3 }, /* AVB2_TD1 */
+ { RCAR_GP_PIN(5, 11), 12, 3 }, /* AVB2_TD2 */
+ { RCAR_GP_PIN(5, 10), 8, 3 }, /* AVB2_MDIO */
+ { RCAR_GP_PIN(5, 9), 4, 3 }, /* AVB2_RD3 */
+ { RCAR_GP_PIN(5, 8), 0, 3 }, /* AVB2_TD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL5", 0xE6060888) {
+ { RCAR_GP_PIN(5, 20), 16, 3 }, /* AVB2_RX_CTL */
+ { RCAR_GP_PIN(5, 19), 12, 3 }, /* AVB2_TX_CTL */
+ { RCAR_GP_PIN(5, 18), 8, 3 }, /* AVB2_RXC */
+ { RCAR_GP_PIN(5, 17), 4, 3 }, /* AVB2_RD0 */
+ { RCAR_GP_PIN(5, 16), 0, 3 }, /* AVB2_TXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL6", 0xE6061080) {
+ { RCAR_GP_PIN(6, 7), 28, 3 }, /* AVB1_TX_CTL */
+ { RCAR_GP_PIN(6, 6), 24, 3 }, /* AVB1_TXC */
+ { RCAR_GP_PIN(6, 5), 20, 3 }, /* AVB1_AVTP_MATCH */
+ { RCAR_GP_PIN(6, 4), 16, 3 }, /* AVB1_LINK */
+ { RCAR_GP_PIN(6, 3), 12, 3 }, /* AVB1_PHY_INT */
+ { RCAR_GP_PIN(6, 2), 8, 3 }, /* AVB1_MDC */
+ { RCAR_GP_PIN(6, 1), 4, 3 }, /* AVB1_MAGIC */
+ { RCAR_GP_PIN(6, 0), 0, 3 }, /* AVB1_MDIO */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL6", 0xE6061084) {
+ { RCAR_GP_PIN(6, 15), 28, 3 }, /* AVB1_RD0 */
+ { RCAR_GP_PIN(6, 14), 24, 3 }, /* AVB1_RD1 */
+ { RCAR_GP_PIN(6, 13), 20, 3 }, /* AVB1_TD0 */
+ { RCAR_GP_PIN(6, 12), 16, 3 }, /* AVB1_TD1 */
+ { RCAR_GP_PIN(6, 11), 12, 3 }, /* AVB1_AVTP_CAPTURE */
+ { RCAR_GP_PIN(6, 10), 8, 3 }, /* AVB1_AVTP_PPS */
+ { RCAR_GP_PIN(6, 9), 4, 3 }, /* AVB1_RX_CTL */
+ { RCAR_GP_PIN(6, 8), 0, 3 }, /* AVB1_RXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL6", 0xE6061088) {
+ { RCAR_GP_PIN(6, 20), 16, 3 }, /* AVB1_TXCREFCLK */
+ { RCAR_GP_PIN(6, 19), 12, 3 }, /* AVB1_RD3 */
+ { RCAR_GP_PIN(6, 18), 8, 3 }, /* AVB1_TD3 */
+ { RCAR_GP_PIN(6, 17), 4, 3 }, /* AVB1_RD2 */
+ { RCAR_GP_PIN(6, 16), 0, 3 }, /* AVB1_TD2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL7", 0xE6061880) {
+ { RCAR_GP_PIN(7, 7), 28, 3 }, /* AVB0_TD1 */
+ { RCAR_GP_PIN(7, 6), 24, 3 }, /* AVB0_TD2 */
+ { RCAR_GP_PIN(7, 5), 20, 3 }, /* AVB0_PHY_INT */
+ { RCAR_GP_PIN(7, 4), 16, 3 }, /* AVB0_LINK */
+ { RCAR_GP_PIN(7, 3), 12, 3 }, /* AVB0_TD3 */
+ { RCAR_GP_PIN(7, 2), 8, 3 }, /* AVB0_AVTP_MATCH */
+ { RCAR_GP_PIN(7, 1), 4, 3 }, /* AVB0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(7, 0), 0, 3 }, /* AVB0_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL7", 0xE6061884) {
+ { RCAR_GP_PIN(7, 15), 28, 3 }, /* AVB0_TXC */
+ { RCAR_GP_PIN(7, 14), 24, 3 }, /* AVB0_MDIO */
+ { RCAR_GP_PIN(7, 13), 20, 3 }, /* AVB0_MDC */
+ { RCAR_GP_PIN(7, 12), 16, 3 }, /* AVB0_RD2 */
+ { RCAR_GP_PIN(7, 11), 12, 3 }, /* AVB0_TD0 */
+ { RCAR_GP_PIN(7, 10), 8, 3 }, /* AVB0_MAGIC */
+ { RCAR_GP_PIN(7, 9), 4, 3 }, /* AVB0_TXCREFCLK */
+ { RCAR_GP_PIN(7, 8), 0, 3 }, /* AVB0_RD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL7", 0xE6061888) {
+ { RCAR_GP_PIN(7, 20), 16, 3 }, /* AVB0_RX_CTL */
+ { RCAR_GP_PIN(7, 19), 12, 3 }, /* AVB0_RXC */
+ { RCAR_GP_PIN(7, 18), 8, 3 }, /* AVB0_RD0 */
+ { RCAR_GP_PIN(7, 17), 4, 3 }, /* AVB0_RD1 */
+ { RCAR_GP_PIN(7, 16), 0, 3 }, /* AVB0_TX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL8", 0xE6068080) {
+ { RCAR_GP_PIN(8, 7), 28, 3 }, /* SDA3 */
+ { RCAR_GP_PIN(8, 6), 24, 3 }, /* SCL3 */
+ { RCAR_GP_PIN(8, 5), 20, 3 }, /* SDA2 */
+ { RCAR_GP_PIN(8, 4), 16, 3 }, /* SCL2 */
+ { RCAR_GP_PIN(8, 3), 12, 3 }, /* SDA1 */
+ { RCAR_GP_PIN(8, 2), 8, 3 }, /* SCL1 */
+ { RCAR_GP_PIN(8, 1), 4, 3 }, /* SDA0 */
+ { RCAR_GP_PIN(8, 0), 0, 3 }, /* SCL0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL8", 0xE6068084) {
+ { RCAR_GP_PIN(8, 13), 20, 3 }, /* GP8_13 */
+ { RCAR_GP_PIN(8, 12), 16, 3 }, /* GP8_12 */
+ { RCAR_GP_PIN(8, 11), 12, 3 }, /* SDA5 */
+ { RCAR_GP_PIN(8, 10), 8, 3 }, /* SCL5 */
+ { RCAR_GP_PIN(8, 9), 4, 3 }, /* SDA4 */
+ { RCAR_GP_PIN(8, 8), 0, 3 }, /* SCL4 */
+ } },
+ { },
+};
+
+enum ioctrl_regs {
+ POC0,
+ POC1,
+ POC3,
+ POC4,
+ POC5,
+ POC6,
+ POC7,
+ POC8,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POC0] = { 0xE60500A0, },
+ [POC1] = { 0xE60508A0, },
+ [POC3] = { 0xE60588A0, },
+ [POC4] = { 0xE60600A0, },
+ [POC5] = { 0xE60608A0, },
+ [POC6] = { 0xE60610A0, },
+ [POC7] = { 0xE60618A0, },
+ [POC8] = { 0xE60680A0, },
+ { /* sentinel */ },
+};
+
+static int r8a779g0_pin_to_pocctrl(unsigned int pin, u32 *pocctrl)
+{
+ int bit = pin & 0x1f;
+
+ *pocctrl = pinmux_ioctrl_regs[POC0].reg;
+ if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 18))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC1].reg;
+ if (pin >= RCAR_GP_PIN(1, 0) && pin <= RCAR_GP_PIN(1, 22))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC3].reg;
+ if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 12))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC8].reg;
+ if (pin >= RCAR_GP_PIN(8, 0) && pin <= RCAR_GP_PIN(8, 13))
+ return bit;
+
+ return -EINVAL;
+}
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xE60500C0, "PUD0", 0xE60500E0) {
+ [ 0] = RCAR_GP_PIN(0, 0), /* GP0_00 */
+ [ 1] = RCAR_GP_PIN(0, 1), /* GP0_01 */
+ [ 2] = RCAR_GP_PIN(0, 2), /* GP0_02 */
+ [ 3] = RCAR_GP_PIN(0, 3), /* IRQ3 */
+ [ 4] = RCAR_GP_PIN(0, 4), /* IRQ2 */
+ [ 5] = RCAR_GP_PIN(0, 5), /* IRQ1 */
+ [ 6] = RCAR_GP_PIN(0, 6), /* IRQ0 */
+ [ 7] = RCAR_GP_PIN(0, 7), /* MSIOF5_SS2 */
+ [ 8] = RCAR_GP_PIN(0, 8), /* MSIOF5_SS1 */
+ [ 9] = RCAR_GP_PIN(0, 9), /* MSIOF5_SYNC */
+ [10] = RCAR_GP_PIN(0, 10), /* MSIOF5_TXD */
+ [11] = RCAR_GP_PIN(0, 11), /* MSIOF5_SCK */
+ [12] = RCAR_GP_PIN(0, 12), /* MSIOF5_RXD */
+ [13] = RCAR_GP_PIN(0, 13), /* MSIOF2_SS2 */
+ [14] = RCAR_GP_PIN(0, 14), /* MSIOF2_SS1 */
+ [15] = RCAR_GP_PIN(0, 15), /* MSIOF2_SYNC */
+ [16] = RCAR_GP_PIN(0, 16), /* MSIOF2_TXD */
+ [17] = RCAR_GP_PIN(0, 17), /* MSIOF2_SCK */
+ [18] = RCAR_GP_PIN(0, 18), /* MSIOF2_RXD */
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xE60508C0, "PUD1", 0xE60508E0) {
+ [ 0] = RCAR_GP_PIN(1, 0), /* MSIOF1_SS2 */
+ [ 1] = RCAR_GP_PIN(1, 1), /* MSIOF1_SS1 */
+ [ 2] = RCAR_GP_PIN(1, 2), /* MSIOF1_SYNC */
+ [ 3] = RCAR_GP_PIN(1, 3), /* MSIOF1_SCK */
+ [ 4] = RCAR_GP_PIN(1, 4), /* MSIOF1_TXD */
+ [ 5] = RCAR_GP_PIN(1, 5), /* MSIOF1_RXD */
+ [ 6] = RCAR_GP_PIN(1, 6), /* MSIOF0_SS2 */
+ [ 7] = RCAR_GP_PIN(1, 7), /* MSIOF0_SS1 */
+ [ 8] = RCAR_GP_PIN(1, 8), /* MSIOF0_SYNC */
+ [ 9] = RCAR_GP_PIN(1, 9), /* MSIOF0_TXD */
+ [10] = RCAR_GP_PIN(1, 10), /* MSIOF0_SCK */
+ [11] = RCAR_GP_PIN(1, 11), /* MSIOF0_RXD */
+ [12] = RCAR_GP_PIN(1, 12), /* HTX0 */
+ [13] = RCAR_GP_PIN(1, 13), /* HCTS0_N */
+ [14] = RCAR_GP_PIN(1, 14), /* HRTS0_N */
+ [15] = RCAR_GP_PIN(1, 15), /* HSCK0 */
+ [16] = RCAR_GP_PIN(1, 16), /* HRX0 */
+ [17] = RCAR_GP_PIN(1, 17), /* SCIF_CLK */
+ [18] = RCAR_GP_PIN(1, 18), /* SSI_SCK */
+ [19] = RCAR_GP_PIN(1, 19), /* SSI_WS */
+ [20] = RCAR_GP_PIN(1, 20), /* SSI_SD */
+ [21] = RCAR_GP_PIN(1, 21), /* AUDIO_CLKOUT */
+ [22] = RCAR_GP_PIN(1, 22), /* AUDIO_CLKIN */
+ [23] = RCAR_GP_PIN(1, 23), /* GP1_23 */
+ [24] = RCAR_GP_PIN(1, 24), /* HRX3 */
+ [25] = RCAR_GP_PIN(1, 25), /* HSCK3 */
+ [26] = RCAR_GP_PIN(1, 26), /* HRTS3_N */
+ [27] = RCAR_GP_PIN(1, 27), /* HCTS3_N */
+ [28] = RCAR_GP_PIN(1, 28), /* HTX3 */
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xE60580C0, "PUD2", 0xE60580E0) {
+ [ 0] = RCAR_GP_PIN(2, 0), /* FXR_TXDA */
+ [ 1] = RCAR_GP_PIN(2, 1), /* FXR_TXENA_N */
+ [ 2] = RCAR_GP_PIN(2, 2), /* RXDA_EXTFXR */
+ [ 3] = RCAR_GP_PIN(2, 3), /* CLK_EXTFXR */
+ [ 4] = RCAR_GP_PIN(2, 4), /* RXDB_EXTFXR */
+ [ 5] = RCAR_GP_PIN(2, 5), /* FXR_TXENB_N */
+ [ 6] = RCAR_GP_PIN(2, 6), /* FXR_TXDB */
+ [ 7] = RCAR_GP_PIN(2, 7), /* TPU0TO1 */
+ [ 8] = RCAR_GP_PIN(2, 8), /* TPU0TO0 */
+ [ 9] = RCAR_GP_PIN(2, 9), /* CAN_CLK */
+ [10] = RCAR_GP_PIN(2, 10), /* CANFD0_TX */
+ [11] = RCAR_GP_PIN(2, 11), /* CANFD0_RX */
+ [12] = RCAR_GP_PIN(2, 12), /* CANFD2_TX */
+ [13] = RCAR_GP_PIN(2, 13), /* CANFD2_RX */
+ [14] = RCAR_GP_PIN(2, 14), /* CANFD3_TX */
+ [15] = RCAR_GP_PIN(2, 15), /* CANFD3_RX */
+ [16] = RCAR_GP_PIN(2, 16), /* CANFD4_TX */
+ [17] = RCAR_GP_PIN(2, 17), /* CANFD4_RX */
+ [18] = RCAR_GP_PIN(2, 18), /* CANFD7_TX */
+ [19] = RCAR_GP_PIN(2, 19), /* CANFD7_RX */
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xE60588C0, "PUD3", 0xE60588E0) {
+ [ 0] = RCAR_GP_PIN(3, 0), /* MMC_SD_D1 */
+ [ 1] = RCAR_GP_PIN(3, 1), /* MMC_SD_D0 */
+ [ 2] = RCAR_GP_PIN(3, 2), /* MMC_SD_D2 */
+ [ 3] = RCAR_GP_PIN(3, 3), /* MMC_SD_CLK */
+ [ 4] = RCAR_GP_PIN(3, 4), /* MMC_DS */
+ [ 5] = RCAR_GP_PIN(3, 5), /* MMC_SD_D3 */
+ [ 6] = RCAR_GP_PIN(3, 6), /* MMC_D5 */
+ [ 7] = RCAR_GP_PIN(3, 7), /* MMC_D4 */
+ [ 8] = RCAR_GP_PIN(3, 8), /* MMC_D7 */
+ [ 9] = RCAR_GP_PIN(3, 9), /* MMC_D6 */
+ [10] = RCAR_GP_PIN(3, 10), /* MMC_SD_CMD */
+ [11] = RCAR_GP_PIN(3, 11), /* SD_CD */
+ [12] = RCAR_GP_PIN(3, 12), /* SD_WP */
+ [13] = RCAR_GP_PIN(3, 13), /* IPC_CLKIN */
+ [14] = RCAR_GP_PIN(3, 14), /* IPC_CLKOUT */
+ [15] = RCAR_GP_PIN(3, 15), /* QSPI0_SSL */
+ [16] = RCAR_GP_PIN(3, 16), /* QSPI0_IO3 */
+ [17] = RCAR_GP_PIN(3, 17), /* QSPI0_IO2 */
+ [18] = RCAR_GP_PIN(3, 18), /* QSPI0_MISO_IO1 */
+ [19] = RCAR_GP_PIN(3, 19), /* QSPI0_MOSI_IO0 */
+ [20] = RCAR_GP_PIN(3, 20), /* QSPI0_SPCLK */
+ [21] = RCAR_GP_PIN(3, 21), /* QSPI1_MOSI_IO0 */
+ [22] = RCAR_GP_PIN(3, 22), /* QSPI1_SPCLK */
+ [23] = RCAR_GP_PIN(3, 23), /* QSPI1_MISO_IO1 */
+ [24] = RCAR_GP_PIN(3, 24), /* QSPI1_IO2 */
+ [25] = RCAR_GP_PIN(3, 25), /* QSPI1_SSL */
+ [26] = RCAR_GP_PIN(3, 26), /* QSPI1_IO3 */
+ [27] = RCAR_GP_PIN(3, 27), /* RPC_RESET_N */
+ [28] = RCAR_GP_PIN(3, 28), /* RPC_WP_N */
+ [29] = RCAR_GP_PIN(3, 29), /* RPC_INT_N */
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xE60600C0, "PUD4", 0xE60600E0) {
+ [ 0] = RCAR_GP_PIN(4, 0), /* TSN0_MDIO */
+ [ 1] = RCAR_GP_PIN(4, 1), /* TSN0_MDC */
+ [ 2] = RCAR_GP_PIN(4, 2), /* TSN0_AVTP_PPS1 */
+ [ 3] = RCAR_GP_PIN(4, 3), /* TSN0_PHY_INT */
+ [ 4] = RCAR_GP_PIN(4, 4), /* TSN0_LINK */
+ [ 5] = RCAR_GP_PIN(4, 5), /* TSN0_AVTP_MATCH */
+ [ 6] = RCAR_GP_PIN(4, 6), /* TSN0_AVTP_CAPTURE */
+ [ 7] = RCAR_GP_PIN(4, 7), /* TSN0_RX_CTL */
+ [ 8] = RCAR_GP_PIN(4, 8), /* TSN0_AVTP_PPS0 */
+ [ 9] = RCAR_GP_PIN(4, 9), /* TSN0_TX_CTL */
+ [10] = RCAR_GP_PIN(4, 10), /* TSN0_RD0 */
+ [11] = RCAR_GP_PIN(4, 11), /* TSN0_RXC */
+ [12] = RCAR_GP_PIN(4, 12), /* TSN0_TXC */
+ [13] = RCAR_GP_PIN(4, 13), /* TSN0_RD1 */
+ [14] = RCAR_GP_PIN(4, 14), /* TSN0_TD1 */
+ [15] = RCAR_GP_PIN(4, 15), /* TSN0_TD0 */
+ [16] = RCAR_GP_PIN(4, 16), /* TSN0_RD3 */
+ [17] = RCAR_GP_PIN(4, 17), /* TSN0_RD2 */
+ [18] = RCAR_GP_PIN(4, 18), /* TSN0_TD3 */
+ [19] = RCAR_GP_PIN(4, 19), /* TSN0_TD2 */
+ [20] = RCAR_GP_PIN(4, 20), /* TSN0_TXCREFCLK */
+ [21] = RCAR_GP_PIN(4, 21), /* PCIE0_CLKREQ_N */
+ [22] = RCAR_GP_PIN(4, 22), /* PCIE1_CLKREQ_N */
+ [23] = RCAR_GP_PIN(4, 23), /* AVS0 */
+ [24] = RCAR_GP_PIN(4, 24), /* AVS1 */
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xE60608C0, "PUD5", 0xE60608E0) {
+ [ 0] = RCAR_GP_PIN(5, 0), /* AVB2_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(5, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(5, 2), /* AVB2_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(5, 3), /* AVB2_LINK */
+ [ 4] = RCAR_GP_PIN(5, 4), /* AVB2_PHY_INT */
+ [ 5] = RCAR_GP_PIN(5, 5), /* AVB2_MAGIC */
+ [ 6] = RCAR_GP_PIN(5, 6), /* AVB2_MDC */
+ [ 7] = RCAR_GP_PIN(5, 7), /* AVB2_TXCREFCLK */
+ [ 8] = RCAR_GP_PIN(5, 8), /* AVB2_TD3 */
+ [ 9] = RCAR_GP_PIN(5, 9), /* AVB2_RD3 */
+ [10] = RCAR_GP_PIN(5, 10), /* AVB2_MDIO */
+ [11] = RCAR_GP_PIN(5, 11), /* AVB2_TD2 */
+ [12] = RCAR_GP_PIN(5, 12), /* AVB2_TD1 */
+ [13] = RCAR_GP_PIN(5, 13), /* AVB2_RD2 */
+ [14] = RCAR_GP_PIN(5, 14), /* AVB2_RD1 */
+ [15] = RCAR_GP_PIN(5, 15), /* AVB2_TD0 */
+ [16] = RCAR_GP_PIN(5, 16), /* AVB2_TXC */
+ [17] = RCAR_GP_PIN(5, 17), /* AVB2_RD0 */
+ [18] = RCAR_GP_PIN(5, 18), /* AVB2_RXC */
+ [19] = RCAR_GP_PIN(5, 19), /* AVB2_TX_CTL */
+ [20] = RCAR_GP_PIN(5, 20), /* AVB2_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xE60610C0, "PUD6", 0xE60610E0) {
+ [ 0] = RCAR_GP_PIN(6, 0), /* AVB1_MDIO */
+ [ 1] = RCAR_GP_PIN(6, 1), /* AVB1_MAGIC */
+ [ 2] = RCAR_GP_PIN(6, 2), /* AVB1_MDC */
+ [ 3] = RCAR_GP_PIN(6, 3), /* AVB1_PHY_INT */
+ [ 4] = RCAR_GP_PIN(6, 4), /* AVB1_LINK */
+ [ 5] = RCAR_GP_PIN(6, 5), /* AVB1_AVTP_MATCH */
+ [ 6] = RCAR_GP_PIN(6, 6), /* AVB1_TXC */
+ [ 7] = RCAR_GP_PIN(6, 7), /* AVB1_TX_CTL */
+ [ 8] = RCAR_GP_PIN(6, 8), /* AVB1_RXC */
+ [ 9] = RCAR_GP_PIN(6, 9), /* AVB1_RX_CTL */
+ [10] = RCAR_GP_PIN(6, 10), /* AVB1_AVTP_PPS */
+ [11] = RCAR_GP_PIN(6, 11), /* AVB1_AVTP_CAPTURE */
+ [12] = RCAR_GP_PIN(6, 12), /* AVB1_TD1 */
+ [13] = RCAR_GP_PIN(6, 13), /* AVB1_TD0 */
+ [14] = RCAR_GP_PIN(6, 14), /* AVB1_RD1*/
+ [15] = RCAR_GP_PIN(6, 15), /* AVB1_RD0 */
+ [16] = RCAR_GP_PIN(6, 16), /* AVB1_TD2 */
+ [17] = RCAR_GP_PIN(6, 17), /* AVB1_RD2 */
+ [18] = RCAR_GP_PIN(6, 18), /* AVB1_TD3 */
+ [19] = RCAR_GP_PIN(6, 19), /* AVB1_RD3 */
+ [20] = RCAR_GP_PIN(6, 20), /* AVB1_TXCREFCLK */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN7", 0xE60618C0, "PUD7", 0xE60618E0) {
+ [ 0] = RCAR_GP_PIN(7, 0), /* AVB0_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(7, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(7, 2), /* AVB0_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(7, 3), /* AVB0_TD3 */
+ [ 4] = RCAR_GP_PIN(7, 4), /* AVB0_LINK */
+ [ 5] = RCAR_GP_PIN(7, 5), /* AVB0_PHY_INT */
+ [ 6] = RCAR_GP_PIN(7, 6), /* AVB0_TD2 */
+ [ 7] = RCAR_GP_PIN(7, 7), /* AVB0_TD1 */
+ [ 8] = RCAR_GP_PIN(7, 8), /* AVB0_RD3 */
+ [ 9] = RCAR_GP_PIN(7, 9), /* AVB0_TXCREFCLK */
+ [10] = RCAR_GP_PIN(7, 10), /* AVB0_MAGIC */
+ [11] = RCAR_GP_PIN(7, 11), /* AVB0_TD0 */
+ [12] = RCAR_GP_PIN(7, 12), /* AVB0_RD2 */
+ [13] = RCAR_GP_PIN(7, 13), /* AVB0_MDC */
+ [14] = RCAR_GP_PIN(7, 14), /* AVB0_MDIO */
+ [15] = RCAR_GP_PIN(7, 15), /* AVB0_TXC */
+ [16] = RCAR_GP_PIN(7, 16), /* AVB0_TX_CTL */
+ [17] = RCAR_GP_PIN(7, 17), /* AVB0_RD1 */
+ [18] = RCAR_GP_PIN(7, 18), /* AVB0_RD0 */
+ [19] = RCAR_GP_PIN(7, 19), /* AVB0_RXC */
+ [20] = RCAR_GP_PIN(7, 20), /* AVB0_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN8", 0xE60680C0, "PUD8", 0xE60680E0) {
+ [ 0] = RCAR_GP_PIN(8, 0), /* SCL0 */
+ [ 1] = RCAR_GP_PIN(8, 1), /* SDA0 */
+ [ 2] = RCAR_GP_PIN(8, 2), /* SCL1 */
+ [ 3] = RCAR_GP_PIN(8, 3), /* SDA1 */
+ [ 4] = RCAR_GP_PIN(8, 4), /* SCL2 */
+ [ 5] = RCAR_GP_PIN(8, 5), /* SDA2 */
+ [ 6] = RCAR_GP_PIN(8, 6), /* SCL3 */
+ [ 7] = RCAR_GP_PIN(8, 7), /* SDA3 */
+ [ 8] = RCAR_GP_PIN(8, 8), /* SCL4 */
+ [ 9] = RCAR_GP_PIN(8, 9), /* SDA4 */
+ [10] = RCAR_GP_PIN(8, 10), /* SCL5 */
+ [11] = RCAR_GP_PIN(8, 11), /* SDA5 */
+ [12] = RCAR_GP_PIN(8, 12), /* GP8_12 */
+ [13] = RCAR_GP_PIN(8, 13), /* GP8_13 */
+ [14] = SH_PFC_PIN_NONE,
+ [15] = SH_PFC_PIN_NONE,
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = SH_PFC_PIN_NONE,
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { /* sentinel */ },
+};
+
+static const struct sh_pfc_soc_operations r8a779g0_pin_ops = {
+ .pin_to_pocctrl = r8a779g0_pin_to_pocctrl,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a779g0_pinmux_info = {
+ .name = "r8a779g0_pfc",
+ .ops = &r8a779g0_pin_ops,
+ .unlock_reg = 0x1ff, /* PMMRn mask */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index c47eed9d948f..a43824fd9505 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -527,6 +527,8 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
arg = rzg2l_read_pin_config(pctrl, IEN(port_offset), bit, IEN_MASK);
+ if (!arg)
+ return -EINVAL;
break;
case PIN_CONFIG_POWER_SOURCE: {
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
new file mode 100644
index 000000000000..e8c18198bebd
--- /dev/null
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2M Pin Control and GPIO driver core
+ *
+ * Based on:
+ * Renesas RZ/G2L Pin Control and GPIO driver core
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/pinctrl/rzv2m-pinctrl.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+
+#define DRV_NAME "pinctrl-rzv2m"
+
+/*
+ * Use 16 lower bits [15:0] for pin identifier
+ * Use 16 higher bits [31:16] for pin mux function
+ */
+#define MUX_PIN_ID_MASK GENMASK(15, 0)
+#define MUX_FUNC_MASK GENMASK(31, 16)
+#define MUX_FUNC(pinconf) FIELD_GET(MUX_FUNC_MASK, (pinconf))
+
+/* PIN capabilities */
+#define PIN_CFG_GRP_1_8V_2 1
+#define PIN_CFG_GRP_1_8V_3 2
+#define PIN_CFG_GRP_SWIO_1 3
+#define PIN_CFG_GRP_SWIO_2 4
+#define PIN_CFG_GRP_3_3V 5
+#define PIN_CFG_GRP_MASK GENMASK(2, 0)
+#define PIN_CFG_BIAS BIT(3)
+#define PIN_CFG_DRV BIT(4)
+#define PIN_CFG_SLEW BIT(5)
+
+#define RZV2M_MPXED_PIN_FUNCS (PIN_CFG_BIAS | \
+ PIN_CFG_DRV | \
+ PIN_CFG_SLEW)
+
+/*
+ * n indicates number of pins in the port, a is the register index
+ * and f is pin configuration capabilities supported.
+ */
+#define RZV2M_GPIO_PORT_PACK(n, a, f) (((n) << 24) | ((a) << 16) | (f))
+#define RZV2M_GPIO_PORT_GET_PINCNT(x) FIELD_GET(GENMASK(31, 24), (x))
+#define RZV2M_GPIO_PORT_GET_INDEX(x) FIELD_GET(GENMASK(23, 16), (x))
+#define RZV2M_GPIO_PORT_GET_CFGS(x) FIELD_GET(GENMASK(15, 0), (x))
+
+#define RZV2M_DEDICATED_PORT_IDX 22
+
+/*
+ * BIT(31) indicates dedicated pin, b is the register bits (b * 16)
+ * and f is the pin configuration capabilities supported.
+ */
+#define RZV2M_SINGLE_PIN BIT(31)
+#define RZV2M_SINGLE_PIN_PACK(b, f) (RZV2M_SINGLE_PIN | \
+ ((RZV2M_DEDICATED_PORT_IDX) << 24) | \
+ ((b) << 16) | (f))
+#define RZV2M_SINGLE_PIN_GET_PORT(x) FIELD_GET(GENMASK(30, 24), (x))
+#define RZV2M_SINGLE_PIN_GET_BIT(x) FIELD_GET(GENMASK(23, 16), (x))
+#define RZV2M_SINGLE_PIN_GET_CFGS(x) FIELD_GET(GENMASK(15, 0), (x))
+
+#define RZV2M_PIN_ID_TO_PORT(id) ((id) / RZV2M_PINS_PER_PORT)
+#define RZV2M_PIN_ID_TO_PIN(id) ((id) % RZV2M_PINS_PER_PORT)
+
+#define DO(n) (0x00 + (n) * 0x40)
+#define OE(n) (0x04 + (n) * 0x40)
+#define IE(n) (0x08 + (n) * 0x40)
+#define PFSEL(n) (0x10 + (n) * 0x40)
+#define DI(n) (0x20 + (n) * 0x40)
+#define PUPD(n) (0x24 + (n) * 0x40)
+#define DRV(n) ((n) < RZV2M_DEDICATED_PORT_IDX ? (0x28 + (n) * 0x40) \
+ : 0x590)
+#define SR(n) ((n) < RZV2M_DEDICATED_PORT_IDX ? (0x2c + (n) * 0x40) \
+ : 0x594)
+#define DI_MSK(n) (0x30 + (n) * 0x40)
+#define EN_MSK(n) (0x34 + (n) * 0x40)
+
+#define PFC_MASK 0x07
+#define PUPD_MASK 0x03
+#define DRV_MASK 0x03
+
+struct rzv2m_dedicated_configs {
+ const char *name;
+ u32 config;
+};
+
+struct rzv2m_pinctrl_data {
+ const char * const *port_pins;
+ const u32 *port_pin_configs;
+ const struct rzv2m_dedicated_configs *dedicated_pins;
+ unsigned int n_port_pins;
+ unsigned int n_dedicated_pins;
+};
+
+struct rzv2m_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc desc;
+ struct pinctrl_pin_desc *pins;
+
+ const struct rzv2m_pinctrl_data *data;
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+
+ struct gpio_chip gpio_chip;
+ struct pinctrl_gpio_range gpio_range;
+
+ spinlock_t lock;
+};
+
+static const unsigned int drv_1_8V_group2_uA[] = { 1800, 3800, 7800, 11000 };
+static const unsigned int drv_1_8V_group3_uA[] = { 1600, 3200, 6400, 9600 };
+static const unsigned int drv_SWIO_group2_3_3V_uA[] = { 9000, 11000, 13000, 18000 };
+static const unsigned int drv_3_3V_group_uA[] = { 2000, 4000, 8000, 12000 };
+
+/* Helper for registers that have a write enable bit in the upper word */
+static void rzv2m_writel_we(void __iomem *addr, u8 shift, u8 value)
+{
+ writel((BIT(16) | value) << shift, addr);
+}
+
+static void rzv2m_pinctrl_set_pfc_mode(struct rzv2m_pinctrl *pctrl,
+ u8 port, u8 pin, u8 func)
+{
+ void __iomem *addr;
+
+ /* Mask input/output */
+ rzv2m_writel_we(pctrl->base + DI_MSK(port), pin, 1);
+ rzv2m_writel_we(pctrl->base + EN_MSK(port), pin, 1);
+
+ /* Select the function and set the write enable bits */
+ addr = pctrl->base + PFSEL(port) + (pin / 4) * 4;
+ writel(((PFC_MASK << 16) | func) << ((pin % 4) * 4), addr);
+
+ /* Unmask input/output */
+ rzv2m_writel_we(pctrl->base + EN_MSK(port), pin, 0);
+ rzv2m_writel_we(pctrl->base + DI_MSK(port), pin, 0);
+};
+
+static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ unsigned int i, *psel_val;
+ struct group_desc *group;
+ int *pins;
+
+ func = pinmux_generic_get_function(pctldev, func_selector);
+ if (!func)
+ return -EINVAL;
+ group = pinctrl_generic_get_group(pctldev, group_selector);
+ if (!group)
+ return -EINVAL;
+
+ psel_val = func->data;
+ pins = group->pins;
+
+ for (i = 0; i < group->num_pins; i++) {
+ dev_dbg(pctrl->dev, "port:%u pin: %u PSEL:%u\n",
+ RZV2M_PIN_ID_TO_PORT(pins[i]), RZV2M_PIN_ID_TO_PIN(pins[i]),
+ psel_val[i]);
+ rzv2m_pinctrl_set_pfc_mode(pctrl, RZV2M_PIN_ID_TO_PORT(pins[i]),
+ RZV2M_PIN_ID_TO_PIN(pins[i]), psel_val[i]);
+ }
+
+ return 0;
+};
+
+static int rzv2m_map_add_config(struct pinctrl_map *map,
+ const char *group_or_pin,
+ enum pinctrl_map_type type,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ unsigned long *cfgs;
+
+ cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
+ GFP_KERNEL);
+ if (!cfgs)
+ return -ENOMEM;
+
+ map->type = type;
+ map->data.configs.group_or_pin = group_or_pin;
+ map->data.configs.configs = cfgs;
+ map->data.configs.num_configs = num_configs;
+
+ return 0;
+}
+
+static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps,
+ unsigned int *index)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_map *maps = *map;
+ unsigned int nmaps = *num_maps;
+ unsigned long *configs = NULL;
+ unsigned int *pins, *psel_val;
+ unsigned int num_pinmux = 0;
+ unsigned int idx = *index;
+ unsigned int num_pins, i;
+ unsigned int num_configs;
+ struct property *pinmux;
+ struct property *prop;
+ int ret, gsel, fsel;
+ const char **pin_fn;
+ const char *pin;
+
+ pinmux = of_find_property(np, "pinmux", NULL);
+ if (pinmux)
+ num_pinmux = pinmux->length / sizeof(u32);
+
+ ret = of_property_count_strings(np, "pins");
+ if (ret == -EINVAL) {
+ num_pins = 0;
+ } else if (ret < 0) {
+ dev_err(pctrl->dev, "Invalid pins list in DT\n");
+ return ret;
+ } else {
+ num_pins = ret;
+ }
+
+ if (!num_pinmux && !num_pins)
+ return 0;
+
+ if (num_pinmux && num_pins) {
+ dev_err(pctrl->dev,
+ "DT node must contain either a pinmux or pins and not both\n");
+ return -EINVAL;
+ }
+
+ ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &num_configs);
+ if (ret < 0)
+ return ret;
+
+ if (num_pins && !num_configs) {
+ dev_err(pctrl->dev, "DT node must contain a config\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (num_pinmux)
+ nmaps += 1;
+
+ if (num_pins)
+ nmaps += num_pins;
+
+ maps = krealloc_array(maps, nmaps, sizeof(*maps), GFP_KERNEL);
+ if (!maps) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ *map = maps;
+ *num_maps = nmaps;
+ if (num_pins) {
+ of_property_for_each_string(np, "pins", prop, pin) {
+ ret = rzv2m_map_add_config(&maps[idx], pin,
+ PIN_MAP_TYPE_CONFIGS_PIN,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+ ret = 0;
+ goto done;
+ }
+
+ pins = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*pins), GFP_KERNEL);
+ psel_val = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*psel_val),
+ GFP_KERNEL);
+ pin_fn = devm_kzalloc(pctrl->dev, sizeof(*pin_fn), GFP_KERNEL);
+ if (!pins || !psel_val || !pin_fn) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* Collect pin locations and mux settings from DT properties */
+ for (i = 0; i < num_pinmux; ++i) {
+ u32 value;
+
+ ret = of_property_read_u32_index(np, "pinmux", i, &value);
+ if (ret)
+ goto done;
+ pins[i] = value & MUX_PIN_ID_MASK;
+ psel_val[i] = MUX_FUNC(value);
+ }
+
+ /* Register a single pin group listing all the pins we read from DT */
+ gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
+ if (gsel < 0) {
+ ret = gsel;
+ goto done;
+ }
+
+ /*
+ * Register a single group function where the 'data' is an array PSEL
+ * register values read from DT.
+ */
+ pin_fn[0] = np->name;
+ fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
+ psel_val);
+ if (fsel < 0) {
+ ret = fsel;
+ goto remove_group;
+ }
+
+ maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+ maps[idx].data.mux.group = np->name;
+ maps[idx].data.mux.function = np->name;
+ idx++;
+
+ dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
+ ret = 0;
+ goto done;
+
+remove_group:
+ pinctrl_generic_remove_group(pctldev, gsel);
+done:
+ *index = idx;
+ kfree(configs);
+ return ret;
+}
+
+static void rzv2m_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned int num_maps)
+{
+ unsigned int i;
+
+ if (!map)
+ return;
+
+ for (i = 0; i < num_maps; ++i) {
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
+ map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(map[i].data.configs.configs);
+ }
+ kfree(map);
+}
+
+static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device_node *child;
+ unsigned int index;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+ index = 0;
+
+ for_each_child_of_node(np, child) {
+ ret = rzv2m_dt_subnode_to_map(pctldev, child, map,
+ num_maps, &index);
+ if (ret < 0) {
+ of_node_put(child);
+ goto done;
+ }
+ }
+
+ if (*num_maps == 0) {
+ ret = rzv2m_dt_subnode_to_map(pctldev, np, map,
+ num_maps, &index);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (*num_maps)
+ return 0;
+
+ dev_err(pctrl->dev, "no mapping found in node %pOF\n", np);
+ ret = -EINVAL;
+
+done:
+ if (ret < 0)
+ rzv2m_dt_free_map(pctldev, *map, *num_maps);
+
+ return ret;
+}
+
+static int rzv2m_validate_gpio_pin(struct rzv2m_pinctrl *pctrl,
+ u32 cfg, u32 port, u8 bit)
+{
+ u8 pincount = RZV2M_GPIO_PORT_GET_PINCNT(cfg);
+ u32 port_index = RZV2M_GPIO_PORT_GET_INDEX(cfg);
+ u32 data;
+
+ if (bit >= pincount || port >= pctrl->data->n_port_pins)
+ return -EINVAL;
+
+ data = pctrl->data->port_pin_configs[port];
+ if (port_index != RZV2M_GPIO_PORT_GET_INDEX(data))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void rzv2m_rmw_pin_config(struct rzv2m_pinctrl *pctrl, u32 offset,
+ u8 shift, u32 mask, u32 val)
+{
+ void __iomem *addr = pctrl->base + offset;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ reg = readl(addr) & ~(mask << shift);
+ writel(reg | (val << shift), addr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int rzv2m_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *config)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ unsigned int *pin_data = pin->drv_data;
+ unsigned int arg = 0;
+ u32 port;
+ u32 cfg;
+ u8 bit;
+ u32 val;
+
+ if (!pin_data)
+ return -EINVAL;
+
+ if (*pin_data & RZV2M_SINGLE_PIN) {
+ port = RZV2M_SINGLE_PIN_GET_PORT(*pin_data);
+ cfg = RZV2M_SINGLE_PIN_GET_CFGS(*pin_data);
+ bit = RZV2M_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZV2M_GPIO_PORT_GET_CFGS(*pin_data);
+ port = RZV2M_PIN_ID_TO_PORT(_pin);
+ bit = RZV2M_PIN_ID_TO_PIN(_pin);
+
+ if (rzv2m_validate_gpio_pin(pctrl, *pin_data, RZV2M_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN: {
+ enum pin_config_param bias;
+
+ if (!(cfg & PIN_CFG_BIAS))
+ return -EINVAL;
+
+ /* PUPD uses 2-bits per pin */
+ bit *= 2;
+
+ switch ((readl(pctrl->base + PUPD(port)) >> bit) & PUPD_MASK) {
+ case 0:
+ bias = PIN_CONFIG_BIAS_PULL_DOWN;
+ break;
+ case 2:
+ bias = PIN_CONFIG_BIAS_PULL_UP;
+ break;
+ default:
+ bias = PIN_CONFIG_BIAS_DISABLE;
+ }
+
+ if (bias != param)
+ return -EINVAL;
+ break;
+ }
+
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ if (!(cfg & PIN_CFG_DRV))
+ return -EINVAL;
+
+ /* DRV uses 2-bits per pin */
+ bit *= 2;
+
+ val = (readl(pctrl->base + DRV(port)) >> bit) & DRV_MASK;
+
+ switch (cfg & PIN_CFG_GRP_MASK) {
+ case PIN_CFG_GRP_1_8V_2:
+ arg = drv_1_8V_group2_uA[val];
+ break;
+ case PIN_CFG_GRP_1_8V_3:
+ arg = drv_1_8V_group3_uA[val];
+ break;
+ case PIN_CFG_GRP_SWIO_2:
+ arg = drv_SWIO_group2_3_3V_uA[val];
+ break;
+ case PIN_CFG_GRP_SWIO_1:
+ case PIN_CFG_GRP_3_3V:
+ arg = drv_3_3V_group_uA[val];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ if (!(cfg & PIN_CFG_SLEW))
+ return -EINVAL;
+
+ arg = readl(pctrl->base + SR(port)) & BIT(bit);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+};
+
+static int rzv2m_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *_configs,
+ unsigned int num_configs)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ unsigned int *pin_data = pin->drv_data;
+ enum pin_config_param param;
+ u32 port;
+ unsigned int i;
+ u32 cfg;
+ u8 bit;
+ u32 val;
+
+ if (!pin_data)
+ return -EINVAL;
+
+ if (*pin_data & RZV2M_SINGLE_PIN) {
+ port = RZV2M_SINGLE_PIN_GET_PORT(*pin_data);
+ cfg = RZV2M_SINGLE_PIN_GET_CFGS(*pin_data);
+ bit = RZV2M_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZV2M_GPIO_PORT_GET_CFGS(*pin_data);
+ port = RZV2M_PIN_ID_TO_PORT(_pin);
+ bit = RZV2M_PIN_ID_TO_PIN(_pin);
+
+ if (rzv2m_validate_gpio_pin(pctrl, *pin_data, RZV2M_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(_configs[i]);
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!(cfg & PIN_CFG_BIAS))
+ return -EINVAL;
+
+ /* PUPD uses 2-bits per pin */
+ bit *= 2;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val = 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = 2;
+ break;
+ default:
+ val = 1;
+ }
+
+ rzv2m_rmw_pin_config(pctrl, PUPD(port), bit, PUPD_MASK, val);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH_UA: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+ const unsigned int *drv_strengths;
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_DRV))
+ return -EINVAL;
+
+ switch (cfg & PIN_CFG_GRP_MASK) {
+ case PIN_CFG_GRP_1_8V_2:
+ drv_strengths = drv_1_8V_group2_uA;
+ break;
+ case PIN_CFG_GRP_1_8V_3:
+ drv_strengths = drv_1_8V_group3_uA;
+ break;
+ case PIN_CFG_GRP_SWIO_2:
+ drv_strengths = drv_SWIO_group2_3_3V_uA;
+ break;
+ case PIN_CFG_GRP_SWIO_1:
+ case PIN_CFG_GRP_3_3V:
+ drv_strengths = drv_3_3V_group_uA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (index = 0; index < 4; index++) {
+ if (arg == drv_strengths[index])
+ break;
+ }
+ if (index >= 4)
+ return -EINVAL;
+
+ /* DRV uses 2-bits per pin */
+ bit *= 2;
+
+ rzv2m_rmw_pin_config(pctrl, DRV(port), bit, DRV_MASK, index);
+ break;
+ }
+
+ case PIN_CONFIG_SLEW_RATE: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+
+ if (!(cfg & PIN_CFG_SLEW))
+ return -EINVAL;
+
+ rzv2m_writel_we(pctrl->base + SR(port), bit, !arg);
+ break;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int rzv2m_pinctrl_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = rzv2m_pinctrl_pinconf_set(pctldev, pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+};
+
+static int rzv2m_pinctrl_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ const unsigned int *pins;
+ unsigned int i, npins, prev_config = 0;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = rzv2m_pinctrl_pinconf_get(pctldev, pins[i], config);
+ if (ret)
+ return ret;
+
+ /* Check config matches previous pins */
+ if (i && prev_config != *config)
+ return -EOPNOTSUPP;
+
+ prev_config = *config;
+ }
+
+ return 0;
+};
+
+static const struct pinctrl_ops rzv2m_pinctrl_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = rzv2m_dt_node_to_map,
+ .dt_free_map = rzv2m_dt_free_map,
+};
+
+static const struct pinmux_ops rzv2m_pinctrl_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = rzv2m_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct pinconf_ops rzv2m_pinctrl_confops = {
+ .is_generic = true,
+ .pin_config_get = rzv2m_pinctrl_pinconf_get,
+ .pin_config_set = rzv2m_pinctrl_pinconf_set,
+ .pin_config_group_set = rzv2m_pinctrl_pinconf_group_set,
+ .pin_config_group_get = rzv2m_pinctrl_pinconf_group_get,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static int rzv2m_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+ int ret;
+
+ ret = pinctrl_gpio_request(chip->base + offset);
+ if (ret)
+ return ret;
+
+ rzv2m_pinctrl_set_pfc_mode(pctrl, port, bit, 0);
+
+ return 0;
+}
+
+static void rzv2m_gpio_set_direction(struct rzv2m_pinctrl *pctrl, u32 port,
+ u8 bit, bool output)
+{
+ rzv2m_writel_we(pctrl->base + OE(port), bit, output);
+ rzv2m_writel_we(pctrl->base + IE(port), bit, !output);
+}
+
+static int rzv2m_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ if (!(readl(pctrl->base + IE(port)) & BIT(bit)))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int rzv2m_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_gpio_set_direction(pctrl, port, bit, false);
+
+ return 0;
+}
+
+static void rzv2m_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_writel_we(pctrl->base + DO(port), bit, !!value);
+}
+
+static int rzv2m_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_gpio_set(chip, offset, value);
+ rzv2m_gpio_set_direction(pctrl, port, bit, true);
+
+ return 0;
+}
+
+static int rzv2m_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+ int direction = rzv2m_gpio_get_direction(chip, offset);
+
+ if (direction == GPIO_LINE_DIRECTION_IN)
+ return !!(readl(pctrl->base + DI(port)) & BIT(bit));
+ else
+ return !!(readl(pctrl->base + DO(port)) & BIT(bit));
+}
+
+static void rzv2m_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pinctrl_gpio_free(chip->base + offset);
+
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
+ * drive the GPIO pin as an output.
+ */
+ rzv2m_gpio_direction_input(chip, offset);
+}
+
+static const char * const rzv2m_gpio_names[] = {
+ "P0_0", "P0_1", "P0_2", "P0_3", "P0_4", "P0_5", "P0_6", "P0_7",
+ "P0_8", "P0_9", "P0_10", "P0_11", "P0_12", "P0_13", "P0_14", "P0_15",
+ "P1_0", "P1_1", "P1_2", "P1_3", "P1_4", "P1_5", "P1_6", "P1_7",
+ "P1_8", "P1_9", "P1_10", "P1_11", "P1_12", "P1_13", "P1_14", "P1_15",
+ "P2_0", "P2_1", "P2_2", "P2_3", "P2_4", "P2_5", "P2_6", "P2_7",
+ "P2_8", "P2_9", "P2_10", "P2_11", "P2_12", "P2_13", "P2_14", "P2_15",
+ "P3_0", "P3_1", "P3_2", "P3_3", "P3_4", "P3_5", "P3_6", "P3_7",
+ "P3_8", "P3_9", "P3_10", "P3_11", "P3_12", "P3_13", "P3_14", "P3_15",
+ "P4_0", "P4_1", "P4_2", "P4_3", "P4_4", "P4_5", "P4_6", "P4_7",
+ "P4_8", "P4_9", "P4_10", "P4_11", "P4_12", "P4_13", "P4_14", "P4_15",
+ "P5_0", "P5_1", "P5_2", "P5_3", "P5_4", "P5_5", "P5_6", "P5_7",
+ "P5_8", "P5_9", "P5_10", "P5_11", "P5_12", "P5_13", "P5_14", "P5_15",
+ "P6_0", "P6_1", "P6_2", "P6_3", "P6_4", "P6_5", "P6_6", "P6_7",
+ "P6_8", "P6_9", "P6_10", "P6_11", "P6_12", "P6_13", "P6_14", "P6_15",
+ "P7_0", "P7_1", "P7_2", "P7_3", "P7_4", "P7_5", "P7_6", "P7_7",
+ "P7_8", "P7_9", "P7_10", "P7_11", "P7_12", "P7_13", "P7_14", "P7_15",
+ "P8_0", "P8_1", "P8_2", "P8_3", "P8_4", "P8_5", "P8_6", "P8_7",
+ "P8_8", "P8_9", "P8_10", "P8_11", "P8_12", "P8_13", "P8_14", "P8_15",
+ "P9_0", "P9_1", "P9_2", "P9_3", "P9_4", "P9_5", "P9_6", "P9_7",
+ "P9_8", "P9_9", "P9_10", "P9_11", "P9_12", "P9_13", "P9_14", "P9_15",
+ "P10_0", "P10_1", "P10_2", "P10_3", "P10_4", "P10_5", "P10_6", "P10_7",
+ "P10_8", "P10_9", "P10_10", "P10_11", "P10_12", "P10_13", "P10_14", "P10_15",
+ "P11_0", "P11_1", "P11_2", "P11_3", "P11_4", "P11_5", "P11_6", "P11_7",
+ "P11_8", "P11_9", "P11_10", "P11_11", "P11_12", "P11_13", "P11_14", "P11_15",
+ "P12_0", "P12_1", "P12_2", "P12_3", "P12_4", "P12_5", "P12_6", "P12_7",
+ "P12_8", "P12_9", "P12_10", "P12_11", "P12_12", "P12_13", "P12_14", "P12_15",
+ "P13_0", "P13_1", "P13_2", "P13_3", "P13_4", "P13_5", "P13_6", "P13_7",
+ "P13_8", "P13_9", "P13_10", "P13_11", "P13_12", "P13_13", "P13_14", "P13_15",
+ "P14_0", "P14_1", "P14_2", "P14_3", "P14_4", "P14_5", "P14_6", "P14_7",
+ "P14_8", "P14_9", "P14_10", "P14_11", "P14_12", "P14_13", "P14_14", "P14_15",
+ "P15_0", "P15_1", "P15_2", "P15_3", "P15_4", "P15_5", "P15_6", "P15_7",
+ "P15_8", "P15_9", "P15_10", "P15_11", "P15_12", "P15_13", "P15_14", "P15_15",
+ "P16_0", "P16_1", "P16_2", "P16_3", "P16_4", "P16_5", "P16_6", "P16_7",
+ "P16_8", "P16_9", "P16_10", "P16_11", "P16_12", "P16_13", "P16_14", "P16_15",
+ "P17_0", "P17_1", "P17_2", "P17_3", "P17_4", "P17_5", "P17_6", "P17_7",
+ "P17_8", "P17_9", "P17_10", "P17_11", "P17_12", "P17_13", "P17_14", "P17_15",
+ "P18_0", "P18_1", "P18_2", "P18_3", "P18_4", "P18_5", "P18_6", "P18_7",
+ "P18_8", "P18_9", "P18_10", "P18_11", "P18_12", "P18_13", "P18_14", "P18_15",
+ "P19_0", "P19_1", "P19_2", "P19_3", "P19_4", "P19_5", "P19_6", "P19_7",
+ "P19_8", "P19_9", "P19_10", "P19_11", "P19_12", "P19_13", "P19_14", "P19_15",
+ "P20_0", "P20_1", "P20_2", "P20_3", "P20_4", "P20_5", "P20_6", "P20_7",
+ "P20_8", "P20_9", "P20_10", "P20_11", "P20_12", "P20_13", "P20_14", "P20_15",
+ "P21_0", "P21_1", "P21_2", "P21_3", "P21_4", "P21_5", "P21_6", "P21_7",
+ "P21_8", "P21_9", "P21_10", "P21_11", "P21_12", "P21_13", "P21_14", "P21_15",
+};
+
+static const u32 rzv2m_gpio_configs[] = {
+ RZV2M_GPIO_PORT_PACK(14, 0, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 1, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 2, PIN_CFG_GRP_1_8V_3 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 3, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 4, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(4, 5, PIN_CFG_GRP_1_8V_3 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(12, 6, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(6, 7, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 8, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 9, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(9, 10, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(9, 11, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(4, 12, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(12, 13, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 14, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 15, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(14, 16, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(1, 17, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(0, 18, 0),
+ RZV2M_GPIO_PORT_PACK(0, 19, 0),
+ RZV2M_GPIO_PORT_PACK(3, 20, PIN_CFG_GRP_1_8V_2 | PIN_CFG_DRV),
+ RZV2M_GPIO_PORT_PACK(1, 21, PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW),
+};
+
+static const struct rzv2m_dedicated_configs rzv2m_dedicated_pins[] = {
+ { "NAWPN", RZV2M_SINGLE_PIN_PACK(0,
+ (PIN_CFG_GRP_SWIO_2 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "IM0CLK", RZV2M_SINGLE_PIN_PACK(1,
+ (PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "IM1CLK", RZV2M_SINGLE_PIN_PACK(2,
+ (PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "DETDO", RZV2M_SINGLE_PIN_PACK(5,
+ (PIN_CFG_GRP_1_8V_3 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "DETMS", RZV2M_SINGLE_PIN_PACK(6,
+ (PIN_CFG_GRP_1_8V_3 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "PCRSTOUTB", RZV2M_SINGLE_PIN_PACK(12,
+ (PIN_CFG_GRP_3_3V | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "USPWEN", RZV2M_SINGLE_PIN_PACK(14,
+ (PIN_CFG_GRP_3_3V | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+};
+
+static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl)
+{
+ struct device_node *np = pctrl->dev->of_node;
+ struct gpio_chip *chip = &pctrl->gpio_chip;
+ const char *name = dev_name(pctrl->dev);
+ struct of_phandle_args of_args;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args);
+ if (ret) {
+ dev_err(pctrl->dev, "Unable to parse gpio-ranges\n");
+ return ret;
+ }
+
+ if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
+ of_args.args[2] != pctrl->data->n_port_pins) {
+ dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n");
+ return -EINVAL;
+ }
+
+ chip->names = pctrl->data->port_pins;
+ chip->request = rzv2m_gpio_request;
+ chip->free = rzv2m_gpio_free;
+ chip->get_direction = rzv2m_gpio_get_direction;
+ chip->direction_input = rzv2m_gpio_direction_input;
+ chip->direction_output = rzv2m_gpio_direction_output;
+ chip->get = rzv2m_gpio_get;
+ chip->set = rzv2m_gpio_set;
+ chip->label = name;
+ chip->parent = pctrl->dev;
+ chip->owner = THIS_MODULE;
+ chip->base = -1;
+ chip->ngpio = of_args.args[2];
+
+ pctrl->gpio_range.id = 0;
+ pctrl->gpio_range.pin_base = 0;
+ pctrl->gpio_range.base = 0;
+ pctrl->gpio_range.npins = chip->ngpio;
+ pctrl->gpio_range.name = chip->label;
+ pctrl->gpio_range.gc = chip;
+ ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO controller\n");
+ return ret;
+ }
+
+ dev_dbg(pctrl->dev, "Registered gpio controller\n");
+
+ return 0;
+}
+
+static int rzv2m_pinctrl_register(struct rzv2m_pinctrl *pctrl)
+{
+ struct pinctrl_pin_desc *pins;
+ unsigned int i, j;
+ u32 *pin_data;
+ int ret;
+
+ pctrl->desc.name = DRV_NAME;
+ pctrl->desc.npins = pctrl->data->n_port_pins + pctrl->data->n_dedicated_pins;
+ pctrl->desc.pctlops = &rzv2m_pinctrl_pctlops;
+ pctrl->desc.pmxops = &rzv2m_pinctrl_pmxops;
+ pctrl->desc.confops = &rzv2m_pinctrl_confops;
+ pctrl->desc.owner = THIS_MODULE;
+
+ pins = devm_kcalloc(pctrl->dev, pctrl->desc.npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ pin_data = devm_kcalloc(pctrl->dev, pctrl->desc.npins,
+ sizeof(*pin_data), GFP_KERNEL);
+ if (!pin_data)
+ return -ENOMEM;
+
+ pctrl->pins = pins;
+ pctrl->desc.pins = pins;
+
+ for (i = 0, j = 0; i < pctrl->data->n_port_pins; i++) {
+ pins[i].number = i;
+ pins[i].name = pctrl->data->port_pins[i];
+ if (i && !(i % RZV2M_PINS_PER_PORT))
+ j++;
+ pin_data[i] = pctrl->data->port_pin_configs[j];
+ pins[i].drv_data = &pin_data[i];
+ }
+
+ for (i = 0; i < pctrl->data->n_dedicated_pins; i++) {
+ unsigned int index = pctrl->data->n_port_pins + i;
+
+ pins[index].number = index;
+ pins[index].name = pctrl->data->dedicated_pins[i].name;
+ pin_data[index] = pctrl->data->dedicated_pins[i].config;
+ pins[index].drv_data = &pin_data[index];
+ }
+
+ ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl,
+ &pctrl->pctl);
+ if (ret) {
+ dev_err(pctrl->dev, "pinctrl registration failed\n");
+ return ret;
+ }
+
+ ret = pinctrl_enable(pctrl->pctl);
+ if (ret) {
+ dev_err(pctrl->dev, "pinctrl enable failed\n");
+ return ret;
+ }
+
+ ret = rzv2m_gpio_register(pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO chip: %i\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rzv2m_pinctrl_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int rzv2m_pinctrl_probe(struct platform_device *pdev)
+{
+ struct rzv2m_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = &pdev->dev;
+
+ pctrl->data = of_device_get_match_data(&pdev->dev);
+ if (!pctrl->data)
+ return -EINVAL;
+
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pctrl->base))
+ return PTR_ERR(pctrl->base);
+
+ pctrl->clk = devm_clk_get(pctrl->dev, NULL);
+ if (IS_ERR(pctrl->clk)) {
+ ret = PTR_ERR(pctrl->clk);
+ dev_err(pctrl->dev, "failed to get GPIO clk : %i\n", ret);
+ return ret;
+ }
+
+ spin_lock_init(&pctrl->lock);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ ret = clk_prepare_enable(pctrl->clk);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to enable GPIO clk: %i\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, rzv2m_pinctrl_clk_disable,
+ pctrl->clk);
+ if (ret) {
+ dev_err(pctrl->dev,
+ "failed to register GPIO clk disable action, %i\n",
+ ret);
+ return ret;
+ }
+
+ ret = rzv2m_pinctrl_register(pctrl);
+ if (ret)
+ return ret;
+
+ dev_info(pctrl->dev, "%s support registered\n", DRV_NAME);
+ return 0;
+}
+
+static struct rzv2m_pinctrl_data r9a09g011_data = {
+ .port_pins = rzv2m_gpio_names,
+ .port_pin_configs = rzv2m_gpio_configs,
+ .dedicated_pins = rzv2m_dedicated_pins,
+ .n_port_pins = ARRAY_SIZE(rzv2m_gpio_configs) * RZV2M_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzv2m_dedicated_pins),
+};
+
+static const struct of_device_id rzv2m_pinctrl_of_table[] = {
+ {
+ .compatible = "renesas,r9a09g011-pinctrl",
+ .data = &r9a09g011_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver rzv2m_pinctrl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(rzv2m_pinctrl_of_table),
+ },
+ .probe = rzv2m_pinctrl_probe,
+};
+
+static int __init rzv2m_pinctrl_init(void)
+{
+ return platform_driver_register(&rzv2m_pinctrl_driver);
+}
+core_initcall(rzv2m_pinctrl_init);
+
+MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
+MODULE_DESCRIPTION("Pin and gpio controller driver for RZ/V2M");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index 12bc279f5733..0fcb29ab0c84 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -325,6 +325,7 @@ extern const struct sh_pfc_soc_info r8a77990_pinmux_info;
extern const struct sh_pfc_soc_info r8a77995_pinmux_info;
extern const struct sh_pfc_soc_info r8a779a0_pinmux_info;
extern const struct sh_pfc_soc_info r8a779f0_pinmux_info;
+extern const struct sh_pfc_soc_info r8a779g0_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
extern const struct sh_pfc_soc_info sh7269_pinmux_info;
@@ -492,9 +493,13 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
PORT_GP_CFG_1(bank, 11, fn, sfx, cfg)
#define PORT_GP_12(bank, fn, sfx) PORT_GP_CFG_12(bank, fn, sfx, 0)
-#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \
+#define PORT_GP_CFG_13(bank, fn, sfx, cfg) \
PORT_GP_CFG_12(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 12, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 12, fn, sfx, cfg)
+#define PORT_GP_13(bank, fn, sfx) PORT_GP_CFG_13(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_13(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 13, fn, sfx, cfg)
#define PORT_GP_14(bank, fn, sfx) PORT_GP_CFG_14(bank, fn, sfx, 0)
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 6d7ca1758292..a8212fc126bf 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -27,8 +27,6 @@
#include <linux/soc/samsung/exynos-pmu.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
-#include <dt-bindings/pinctrl/samsung.h>
-
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
@@ -173,7 +171,7 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
con = readl(bank->pctl_base + reg_con);
con &= ~(mask << shift);
- con |= EXYNOS_PIN_FUNC_EINT << shift;
+ con |= EXYNOS_PIN_CON_FUNC_EINT << shift;
writel(con, bank->pctl_base + reg_con);
raw_spin_unlock_irqrestore(&bank->slock, flags);
@@ -196,7 +194,7 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
con = readl(bank->pctl_base + reg_con);
con &= ~(mask << shift);
- con |= EXYNOS_PIN_FUNC_INPUT << shift;
+ con |= PIN_CON_FUNC_INPUT << shift;
writel(con, bank->pctl_base + reg_con);
raw_spin_unlock_irqrestore(&bank->slock, flags);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index bfad1ced8017..7bd6d82c9f36 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -16,6 +16,9 @@
#ifndef __PINCTRL_SAMSUNG_EXYNOS_H
#define __PINCTRL_SAMSUNG_EXYNOS_H
+/* Values for the pin CON register */
+#define EXYNOS_PIN_CON_FUNC_EINT 0xf
+
/* External GPIO and wakeup interrupt related definitions */
#define EXYNOS_GPIO_ECON_OFFSET 0x700
#define EXYNOS_GPIO_EFLTCON_OFFSET 0x800
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 26d309d2516d..4837bceb767b 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -26,8 +26,6 @@
#include <linux/of_device.h>
#include <linux/spinlock.h>
-#include <dt-bindings/pinctrl/samsung.h>
-
#include "../core.h"
#include "pinctrl-samsung.h"
@@ -614,7 +612,7 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
data = readl(reg);
data &= ~(mask << shift);
if (!input)
- data |= EXYNOS_PIN_FUNC_OUTPUT << shift;
+ data |= PIN_CON_FUNC_OUTPUT << shift;
writel(data, reg);
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index fc6f5199c548..9af93e3d8d9f 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -53,6 +53,14 @@ enum pincfg_type {
#define PINCFG_UNPACK_TYPE(cfg) ((cfg) & PINCFG_TYPE_MASK)
#define PINCFG_UNPACK_VALUE(cfg) (((cfg) & PINCFG_VALUE_MASK) >> \
PINCFG_VALUE_SHIFT)
+/*
+ * Values for the pin CON register, choosing pin function.
+ * The basic set (input and output) are same between: S3C24xx, S3C64xx, S5PV210,
+ * Exynos ARMv7, Exynos ARMv8, Tesla FSD.
+ */
+#define PIN_CON_FUNC_INPUT 0x0
+#define PIN_CON_FUNC_OUTPUT 0x1
+
/**
* enum eint_type - possible external interrupt types.
* @EINT_TYPE_NONE: bank does not support external interrupts
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index 33751a6a0757..a78fdbbdfc0c 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -29,7 +29,6 @@ config PINCTRL_SUN6I_A31
config PINCTRL_SUN6I_A31_R
bool "Support for the Allwinner A31 R-PIO"
default MACH_SUN6I
- depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN8I_A23
@@ -55,7 +54,6 @@ config PINCTRL_SUN8I_A83T_R
config PINCTRL_SUN8I_A23_R
bool "Support for the Allwinner A23 and A33 R-PIO"
default MACH_SUN8I
- depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN8I_H3
@@ -81,7 +79,11 @@ config PINCTRL_SUN9I_A80
config PINCTRL_SUN9I_A80_R
bool "Support for the Allwinner A80 R-PIO"
default MACH_SUN9I
- depends on RESET_CONTROLLER
+ select PINCTRL_SUNXI
+
+config PINCTRL_SUN20I_D1
+ bool "Support for the Allwinner D1 PIO"
+ default MACH_SUN8I || (RISCV && ARCH_SUNXI)
select PINCTRL_SUNXI
config PINCTRL_SUN50I_A64
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index d3440c42b9d6..2ff5a55927ad 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PINCTRL_SUN8I_A83T_R) += pinctrl-sun8i-a83t-r.o
obj-$(CONFIG_PINCTRL_SUN8I_H3) += pinctrl-sun8i-h3.o
obj-$(CONFIG_PINCTRL_SUN8I_H3_R) += pinctrl-sun8i-h3-r.o
obj-$(CONFIG_PINCTRL_SUN8I_V3S) += pinctrl-sun8i-v3s.o
+obj-$(CONFIG_PINCTRL_SUN20I_D1) += pinctrl-sun20i-d1.o
obj-$(CONFIG_PINCTRL_SUN50I_H5) += pinctrl-sun50i-h5.o
obj-$(CONFIG_PINCTRL_SUN50I_H6) += pinctrl-sun50i-h6.o
obj-$(CONFIG_PINCTRL_SUN50I_H6_R) += pinctrl-sun50i-h6-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
new file mode 100644
index 000000000000..40858b881298
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner D1 SoC pinctrl driver.
+ *
+ * Copyright (c) 2020 wuyan@allwinnertech.com
+ * Copyright (c) 2021-2022 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin d1_pins[] = {
+ /* PB */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm3"),
+ SUNXI_FUNCTION(0x3, "ir"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* WP */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x8, "spdif"), /* OUT */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm4"),
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT3 */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN3 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x7, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x8, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT2 */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN2 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x7, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x7, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x7, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x5, "pwm0"),
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x7, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* LRCK */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x7, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x8, "bist0"), /* BIST_RESULT0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x7, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x8, "bist1"), /* BIST_RESULT1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x3, "pwm5"),
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* HOLD */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x3, "pwm6"),
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x3, "pwm7"),
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION(0x3, "pwm2"),
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x7, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* CLK */
+ SUNXI_FUNCTION(0x3, "pwm0"),
+ SUNXI_FUNCTION(0x4, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x5, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x7, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 12)),
+ /* PC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "ledc"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D2 */
+ SUNXI_FUNCTION(0x4, "boot"), /* SEL0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* MISO */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D1 */
+ SUNXI_FUNCTION(0x4, "boot"), /* SEL1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* WP */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D0 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x6, "pll"), /* DBG-CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* HOLD */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x6, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 7)),
+ /* PD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V0P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D0P */
+ SUNXI_FUNCTION(0x5, "i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V0N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D0N */
+ SUNXI_FUNCTION(0x5, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V1P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D1P */
+ SUNXI_FUNCTION(0x5, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V1N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D1N */
+ SUNXI_FUNCTION(0x5, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V2P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* CKP */
+ SUNXI_FUNCTION(0x5, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V2N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* CKN */
+ SUNXI_FUNCTION(0x5, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* CKP */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D2P */
+ SUNXI_FUNCTION(0x5, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* CKN */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D2N */
+ SUNXI_FUNCTION(0x5, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V3P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D3P */
+ SUNXI_FUNCTION(0x5, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V3N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D3N */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V0P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V0N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V1P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x5, "i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V1N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V2P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* HOLD */
+ SUNXI_FUNCTION(0x5, "uart3"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V2N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* WP */
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKP */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x5, "pwm0"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKN */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 17)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V3P */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x5, "pwm2"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 18)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V3N */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION(0x5, "pwm3"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 19)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "dmic"), /* CLK */
+ SUNXI_FUNCTION(0x5, "pwm4"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 20)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x5, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 21)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x3, "ir"), /* RX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x5, "pwm7"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 22)),
+ /* PE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x5, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXCTL/CRS_DV */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x5, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* PCLK */
+ SUNXI_FUNCTION(0x3, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* MS */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* MS */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "ledc"),
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* DI */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* DI */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* DO */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* DO */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXCTL/TXEN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* CK */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* CK */
+ SUNXI_FUNCTION(0x8, "emac"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x4, "pwm2"),
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* MS */
+ SUNXI_FUNCTION(0x8, "emac"), /* MDC */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* CTS */
+ SUNXI_FUNCTION(0x4, "pwm3"),
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* DI */
+ SUNXI_FUNCTION(0x8, "emac"), /* MDIO */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x4, "pwm4"),
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* DO */
+ SUNXI_FUNCTION(0x8, "emac"), /* EPHY-25M */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT3 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN3 */
+ SUNXI_FUNCTION(0x6, "jtag"), /* CK */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD2 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x3, "ncsi0"), /* FIELD */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT2 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN2 */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD3 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x3, "pwm5"),
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD2 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* MS */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD3 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* DI */
+ SUNXI_FUNCTION(0x4, "pwm6"),
+ SUNXI_FUNCTION(0x5, "i2s0"), /* LRCK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* DO */
+ SUNXI_FUNCTION(0x4, "pwm7"),
+ SUNXI_FUNCTION(0x5, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* CK */
+ SUNXI_FUNCTION(0x4, "ir"), /* TX */
+ SUNXI_FUNCTION(0x5, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 17)),
+ /* PF */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* MS */
+ SUNXI_FUNCTION(0x5, "i2s2_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x6, "i2s2_din"), /* DIN0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* DI */
+ SUNXI_FUNCTION(0x5, "i2s2_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x6, "i2s2_din"), /* DIN1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "ledc"),
+ SUNXI_FUNCTION(0x6, "spdif"), /* IN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* DO */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION(0x6, "ir"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* CK */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* LRCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x4, "ir"), /* RX */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x6, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 6)),
+ /* PG */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXCTRL/CRS_DV */
+ SUNXI_FUNCTION(0x5, "pwm7"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD0 */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD1 */
+ SUNXI_FUNCTION(0x5, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXCK */
+ SUNXI_FUNCTION(0x5, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD0 */
+ SUNXI_FUNCTION(0x5, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD1 */
+ SUNXI_FUNCTION(0x5, "pwm4"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD2 */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD3 */
+ SUNXI_FUNCTION(0x5, "spdif"), /* IN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD2 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD3 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm3"),
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* EPHY-25M */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* LRCK */
+ SUNXI_FUNCTION(0x3, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXCTL/TXEN */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "pwm0"),
+ SUNXI_FUNCTION(0x7, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* BCLK */
+ SUNXI_FUNCTION(0x3, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* CLKIN/RXER */
+ SUNXI_FUNCTION(0x5, "pwm2"),
+ SUNXI_FUNCTION(0x6, "ledc"),
+ SUNXI_FUNCTION(0x7, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* MDC */
+ SUNXI_FUNCTION(0x5, "i2s1_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x6, "spi0"), /* WP */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* MDIO */
+ SUNXI_FUNCTION(0x5, "i2s1_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "spi0"), /* HOLD */
+ SUNXI_FUNCTION(0x7, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ir"), /* RX */
+ SUNXI_FUNCTION(0x3, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION(0x4, "pwm5"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x7, "ledc"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x4, "pwm7"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "ir"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 17)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x4, "pwm6"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x7, "uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 18)),
+};
+
+static const unsigned int d1_irq_bank_map[] = { 1, 2, 3, 4, 5, 6 };
+
+static const struct sunxi_pinctrl_desc d1_pinctrl_data = {
+ .pins = d1_pins,
+ .npins = ARRAY_SIZE(d1_pins),
+ .irq_banks = ARRAY_SIZE(d1_irq_bank_map),
+ .irq_bank_map = d1_irq_bank_map,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
+};
+
+static int d1_pinctrl_probe(struct platform_device *pdev)
+{
+ unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ return sunxi_pinctrl_init_with_variant(pdev, &d1_pinctrl_data, variant);
+}
+
+static const struct of_device_id d1_pinctrl_match[] = {
+ {
+ .compatible = "allwinner,sun20i-d1-pinctrl",
+ .data = (void *)PINCTRL_SUN20I_D1
+ },
+ {}
+};
+
+static struct platform_driver d1_pinctrl_driver = {
+ .probe = d1_pinctrl_probe,
+ .driver = {
+ .name = "sun20i-d1-pinctrl",
+ .of_match_table = d1_pinctrl_match,
+ },
+};
+builtin_platform_driver(d1_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
index 21054fcacd34..afc1f5df7545 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
@@ -82,6 +82,7 @@ static const struct sunxi_pinctrl_desc a100_r_pinctrl_data = {
.npins = ARRAY_SIZE(a100_r_pins),
.pin_base = PL_BASE,
.irq_banks = 1,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int a100_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
index e69f6da40dc0..f682e0e4244d 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
@@ -684,7 +684,7 @@ static const struct sunxi_pinctrl_desc a100_pinctrl_data = {
.npins = ARRAY_SIZE(a100_pins),
.irq_banks = 7,
.irq_bank_map = a100_irq_bank_map,
- .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int a100_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
index e69c8dae121a..ef261eccda56 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
@@ -24,7 +24,6 @@
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
index c7d90c44e87a..3aba0aec3d78 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
@@ -16,7 +16,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -107,6 +106,7 @@ static const struct sunxi_pinctrl_desc sun50i_h6_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun50i_h6_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
};
static int sun50i_h6_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
index 8e4f10ab96ce..c39ea46046c2 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
@@ -12,7 +12,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
index 152b71226a80..d6ca720ee8d8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
@@ -525,7 +525,7 @@ static const struct sunxi_pinctrl_desc h616_pinctrl_data = {
.irq_banks = ARRAY_SIZE(h616_irq_bank_map),
.irq_bank_map = h616_irq_bank_map,
.irq_read_needs_mux = true,
- .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int h616_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index a00246d3dd49..2486cdf345e1 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -17,7 +17,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -111,26 +110,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_r_pinctrl_data = {
static int sun6i_a31_r_pinctrl_probe(struct platform_device *pdev)
{
- struct reset_control *rstc;
- int ret;
-
- rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- dev_err(&pdev->dev, "Reset controller missing\n");
- return PTR_ERR(rstc);
- }
-
- ret = reset_control_deassert(rstc);
- if (ret)
- return ret;
-
- ret = sunxi_pinctrl_init(pdev,
- &sun6i_a31_r_pinctrl_data);
-
- if (ret)
- reset_control_assert(rstc);
-
- return ret;
+ return sunxi_pinctrl_init(pdev, &sun6i_a31_r_pinctrl_data);
}
static const struct of_device_id sun6i_a31_r_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 9e5b61449999..4fae12c905b7 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -98,29 +97,7 @@ static const struct sunxi_pinctrl_desc sun8i_a23_r_pinctrl_data = {
static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
{
- struct reset_control *rstc;
- int ret;
-
- rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- ret = PTR_ERR(rstc);
- if (ret == -EPROBE_DEFER)
- return ret;
- dev_err(&pdev->dev, "Reset controller missing err=%d\n", ret);
- return ret;
- }
-
- ret = reset_control_deassert(rstc);
- if (ret)
- return ret;
-
- ret = sunxi_pinctrl_init(pdev,
- &sun8i_a23_r_pinctrl_data);
-
- if (ret)
- reset_control_assert(rstc);
-
- return ret;
+ return sunxi_pinctrl_init(pdev, &sun8i_a23_r_pinctrl_data);
}
static const struct of_device_id sun8i_a23_r_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
index 6531cf67958e..0cb6c1a970c9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
@@ -27,7 +27,6 @@
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index a191a65217ac..f11cb5bba0f7 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -14,7 +14,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index dd928402af99..6c04027d0dd9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -46,6 +46,67 @@ static struct lock_class_key sunxi_pinctrl_irq_request_class;
static struct irq_chip sunxi_pinctrl_edge_irq_chip;
static struct irq_chip sunxi_pinctrl_level_irq_chip;
+/*
+ * The sunXi PIO registers are organized as a series of banks, with registers
+ * for each bank in the following order:
+ * - Mux config
+ * - Data value
+ * - Drive level
+ * - Pull direction
+ *
+ * Multiple consecutive registers are used for fields wider than one bit.
+ *
+ * The following functions calculate the register and the bit offset to access.
+ * They take a pin number which is relative to the start of the current device.
+ */
+static void sunxi_mux_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * MUX_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + MUX_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(MUX_FIELD_WIDTH) - 1) << *shift;
+}
+
+static void sunxi_data_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * DATA_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + DATA_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(DATA_FIELD_WIDTH) - 1) << *shift;
+}
+
+static void sunxi_dlevel_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * pctl->dlevel_field_width;
+
+ *reg = bank * pctl->bank_mem_size + DLEVEL_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(pctl->dlevel_field_width) - 1) << *shift;
+}
+
+static void sunxi_pull_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * PULL_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + pctl->pull_regs_offset +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(PULL_FIELD_WIDTH) - 1) << *shift;
+}
+
static struct sunxi_pinctrl_group *
sunxi_pinctrl_find_group_by_name(struct sunxi_pinctrl *pctl, const char *group)
{
@@ -451,22 +512,19 @@ static const struct pinctrl_ops sunxi_pctrl_ops = {
.get_group_pins = sunxi_pctrl_get_group_pins,
};
-static int sunxi_pconf_reg(unsigned pin, enum pin_config_param param,
- u32 *offset, u32 *shift, u32 *mask)
+static int sunxi_pconf_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, enum pin_config_param param,
+ u32 *reg, u32 *shift, u32 *mask)
{
switch (param) {
case PIN_CONFIG_DRIVE_STRENGTH:
- *offset = sunxi_dlevel_reg(pin);
- *shift = sunxi_dlevel_offset(pin);
- *mask = DLEVEL_PINS_MASK;
+ sunxi_dlevel_reg(pctl, pin, reg, shift, mask);
break;
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_DISABLE:
- *offset = sunxi_pull_reg(pin);
- *shift = sunxi_pull_offset(pin);
- *mask = PULL_PINS_MASK;
+ sunxi_pull_reg(pctl, pin, reg, shift, mask);
break;
default:
@@ -481,17 +539,17 @@ static int sunxi_pconf_get(struct pinctrl_dev *pctldev, unsigned pin,
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
- u32 offset, shift, mask, val;
+ u32 reg, shift, mask, val;
u16 arg;
int ret;
pin -= pctl->desc->pin_base;
- ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask);
+ ret = sunxi_pconf_reg(pctl, pin, param, &reg, &shift, &mask);
if (ret < 0)
return ret;
- val = (readl(pctl->membase + offset) >> shift) & mask;
+ val = (readl(pctl->membase + reg) & mask) >> shift;
switch (pinconf_to_config_param(*config)) {
case PIN_CONFIG_DRIVE_STRENGTH:
@@ -547,16 +605,15 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
pin -= pctl->desc->pin_base;
for (i = 0; i < num_configs; i++) {
+ u32 arg, reg, shift, mask, val;
enum pin_config_param param;
unsigned long flags;
- u32 offset, shift, mask, reg;
- u32 arg, val;
int ret;
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
- ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask);
+ ret = sunxi_pconf_reg(pctl, pin, param, &reg, &shift, &mask);
if (ret < 0)
return ret;
@@ -593,9 +650,8 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
}
raw_spin_lock_irqsave(&pctl->lock, flags);
- reg = readl(pctl->membase + offset);
- reg &= ~(mask << shift);
- writel(reg | val << shift, pctl->membase + offset);
+ writel((readl(pctl->membase + reg) & ~mask) | val << shift,
+ pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
} /* for each config */
@@ -624,7 +680,7 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
unsigned pin,
struct regulator *supply)
{
- unsigned short bank = pin / PINS_PER_BANK;
+ unsigned short bank;
unsigned long flags;
u32 val, reg;
int uV;
@@ -640,6 +696,9 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
if (uV == 0)
return 0;
+ pin -= pctl->desc->pin_base;
+ bank = pin / PINS_PER_BANK;
+
switch (pctl->desc->io_bias_cfg_variant) {
case BIAS_VOLTAGE_GRP_CONFIG:
/*
@@ -657,12 +716,20 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
else
val = 0xD; /* 3.3V */
- pin -= pctl->desc->pin_base;
-
reg = readl(pctl->membase + sunxi_grp_config_reg(pin));
reg &= ~IO_BIAS_MASK;
writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin));
return 0;
+ case BIAS_VOLTAGE_PIO_POW_MODE_CTL:
+ val = uV > 1800000 && uV <= 2500000 ? BIT(bank) : 0;
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ reg = readl(pctl->membase + PIO_POW_MOD_CTL_REG);
+ reg &= ~BIT(bank);
+ writel(reg | val, pctl->membase + PIO_POW_MOD_CTL_REG);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+
+ fallthrough;
case BIAS_VOLTAGE_PIO_POW_MODE_SEL:
val = uV <= 1800000 ? 1 : 0;
@@ -710,16 +777,16 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
u8 config)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ u32 reg, shift, mask;
unsigned long flags;
- u32 val, mask;
+
+ pin -= pctl->desc->pin_base;
+ sunxi_mux_reg(pctl, pin, &reg, &shift, &mask);
raw_spin_lock_irqsave(&pctl->lock, flags);
- pin -= pctl->desc->pin_base;
- val = readl(pctl->membase + sunxi_mux_reg(pin));
- mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
- writel((val & ~mask) | config << sunxi_mux_offset(pin),
- pctl->membase + sunxi_mux_reg(pin));
+ writel((readl(pctl->membase + reg) & ~mask) | config << shift,
+ pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
}
@@ -852,43 +919,43 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- u32 reg = sunxi_data_reg(offset);
- u8 index = sunxi_data_offset(offset);
bool set_mux = pctl->desc->irq_read_needs_mux &&
gpiochip_line_is_irq(chip, offset);
u32 pin = offset + chip->base;
- u32 val;
+ u32 reg, shift, mask, val;
+
+ sunxi_data_reg(pctl, offset, &reg, &shift, &mask);
if (set_mux)
sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_INPUT);
- val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
+ val = (readl(pctl->membase + reg) & mask) >> shift;
if (set_mux)
sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_IRQ);
- return !!val;
+ return val;
}
static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- u32 reg = sunxi_data_reg(offset);
- u8 index = sunxi_data_offset(offset);
+ u32 reg, shift, mask, val;
unsigned long flags;
- u32 regval;
+
+ sunxi_data_reg(pctl, offset, &reg, &shift, &mask);
raw_spin_lock_irqsave(&pctl->lock, flags);
- regval = readl(pctl->membase + reg);
+ val = readl(pctl->membase + reg);
if (value)
- regval |= BIT(index);
+ val |= mask;
else
- regval &= ~(BIT(index));
+ val &= ~mask;
- writel(regval, pctl->membase + reg);
+ writel(val, pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
}
@@ -1232,11 +1299,11 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
/*
* Find an upper bound for the maximum number of functions: in
- * the worst case we have gpio_in, gpio_out, irq and up to four
+ * the worst case we have gpio_in, gpio_out, irq and up to seven
* special functions per pin, plus one entry for the sentinel.
* We'll reallocate that later anyway.
*/
- pctl->functions = kcalloc(4 * pctl->ngroups + 4,
+ pctl->functions = kcalloc(7 * pctl->ngroups + 4,
sizeof(*pctl->functions),
GFP_KERNEL);
if (!pctl->functions)
@@ -1429,6 +1496,15 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
pctl->dev = &pdev->dev;
pctl->desc = desc;
pctl->variant = variant;
+ if (pctl->variant >= PINCTRL_SUN20I_D1) {
+ pctl->bank_mem_size = D1_BANK_MEM_SIZE;
+ pctl->pull_regs_offset = D1_PULL_REGS_OFFSET;
+ pctl->dlevel_field_width = D1_DLEVEL_FIELD_WIDTH;
+ } else {
+ pctl->bank_mem_size = BANK_MEM_SIZE;
+ pctl->pull_regs_offset = PULL_REGS_OFFSET;
+ pctl->dlevel_field_width = DLEVEL_FIELD_WIDTH;
+ }
pctl->irq_array = devm_kcalloc(&pdev->dev,
IRQ_PER_BANK * pctl->desc->irq_banks,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index a32bb5bcb754..a87a2f944d60 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -36,23 +36,19 @@
#define BANK_MEM_SIZE 0x24
#define MUX_REGS_OFFSET 0x0
+#define MUX_FIELD_WIDTH 4
#define DATA_REGS_OFFSET 0x10
+#define DATA_FIELD_WIDTH 1
#define DLEVEL_REGS_OFFSET 0x14
+#define DLEVEL_FIELD_WIDTH 2
#define PULL_REGS_OFFSET 0x1c
+#define PULL_FIELD_WIDTH 2
+
+#define D1_BANK_MEM_SIZE 0x30
+#define D1_DLEVEL_FIELD_WIDTH 4
+#define D1_PULL_REGS_OFFSET 0x24
#define PINS_PER_BANK 32
-#define MUX_PINS_PER_REG 8
-#define MUX_PINS_BITS 4
-#define MUX_PINS_MASK 0x0f
-#define DATA_PINS_PER_REG 32
-#define DATA_PINS_BITS 1
-#define DATA_PINS_MASK 0x01
-#define DLEVEL_PINS_PER_REG 16
-#define DLEVEL_PINS_BITS 2
-#define DLEVEL_PINS_MASK 0x03
-#define PULL_PINS_PER_REG 16
-#define PULL_PINS_BITS 2
-#define PULL_PINS_MASK 0x03
#define IRQ_PER_BANK 32
@@ -96,8 +92,11 @@
#define PINCTRL_SUN8I_R40 BIT(8)
#define PINCTRL_SUN8I_V3 BIT(9)
#define PINCTRL_SUN8I_V3S BIT(10)
+/* Variants below here have an updated register layout. */
+#define PINCTRL_SUN20I_D1 BIT(11)
#define PIO_POW_MOD_SEL_REG 0x340
+#define PIO_POW_MOD_CTL_REG 0x344
enum sunxi_desc_bias_voltage {
BIAS_VOLTAGE_NONE,
@@ -111,6 +110,12 @@ enum sunxi_desc_bias_voltage {
* register, as seen on H6 SoC, for example.
*/
BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ /*
+ * Bias voltage is set through PIO_POW_MOD_SEL_REG
+ * and PIO_POW_MOD_CTL_REG register, as seen on
+ * A100 and D1 SoC, for example.
+ */
+ BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
struct sunxi_desc_function {
@@ -170,6 +175,9 @@ struct sunxi_pinctrl {
raw_spinlock_t lock;
struct pinctrl_dev *pctl_dev;
unsigned long variant;
+ u32 bank_mem_size;
+ u32 pull_regs_offset;
+ u32 dlevel_field_width;
};
#define SUNXI_PIN(_pin, ...) \
@@ -215,83 +223,6 @@ struct sunxi_pinctrl {
.irqnum = _irq, \
}
-/*
- * The sunXi PIO registers are organized as is:
- * 0x00 - 0x0c Muxing values.
- * 8 pins per register, each pin having a 4bits value
- * 0x10 Pin values
- * 32 bits per register, each pin corresponding to one bit
- * 0x14 - 0x18 Drive level
- * 16 pins per register, each pin having a 2bits value
- * 0x1c - 0x20 Pull-Up values
- * 16 pins per register, each pin having a 2bits value
- *
- * This is for the first bank. Each bank will have the same layout,
- * with an offset being a multiple of 0x24.
- *
- * The following functions calculate from the pin number the register
- * and the bit offset that we should access.
- */
-static inline u32 sunxi_mux_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += MUX_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / MUX_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_mux_offset(u16 pin)
-{
- u32 pin_num = pin % MUX_PINS_PER_REG;
- return pin_num * MUX_PINS_BITS;
-}
-
-static inline u32 sunxi_data_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += DATA_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / DATA_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_data_offset(u16 pin)
-{
- u32 pin_num = pin % DATA_PINS_PER_REG;
- return pin_num * DATA_PINS_BITS;
-}
-
-static inline u32 sunxi_dlevel_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += DLEVEL_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / DLEVEL_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_dlevel_offset(u16 pin)
-{
- u32 pin_num = pin % DLEVEL_PINS_PER_REG;
- return pin_num * DLEVEL_PINS_BITS;
-}
-
-static inline u32 sunxi_pull_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += PULL_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / PULL_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_pull_offset(u16 pin)
-{
- u32 pin_num = pin % PULL_PINS_PER_REG;
- return pin_num * PULL_PINS_BITS;
-}
-
static inline u32 sunxi_irq_hw_bank_num(const struct sunxi_pinctrl_desc *desc, u8 bank)
{
if (!desc->irq_bank_map)
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 18fc6a08569e..b437847b6237 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-if X86
-source "drivers/platform/x86/Kconfig"
-endif
if MIPS
source "drivers/platform/mips/Kconfig"
endif
@@ -15,3 +12,5 @@ source "drivers/platform/mellanox/Kconfig"
source "drivers/platform/olpc/Kconfig"
source "drivers/platform/surface/Kconfig"
+
+source "drivers/platform/x86/Kconfig"
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 717299cbccac..c45fb376d653 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -139,7 +139,7 @@ config CROS_EC_PROTO
config CROS_KBD_LED_BACKLIGHT
tristate "Backlight LED support for Chrome OS keyboards"
- depends on LEDS_CLASS && ACPI
+ depends on LEDS_CLASS && (ACPI || CROS_EC)
help
This option enables support for the keyboard backlight LEDs on
select Chrome OS systems.
@@ -267,4 +267,13 @@ config CHROMEOS_PRIVACY_SCREEN
source "drivers/platform/chrome/wilco_ec/Kconfig"
+# Kunit test cases
+config CROS_KUNIT
+ tristate "Kunit tests for ChromeOS" if !KUNIT_ALL_TESTS
+ depends on KUNIT && CROS_EC
+ default KUNIT_ALL_TESTS
+ select CROS_EC_PROTO
+ help
+ ChromeOS Kunit tests.
+
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 52f5a2dde8b8..f7e74a845afc 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -30,3 +30,8 @@ obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
obj-$(CONFIG_CROS_USBPD_NOTIFY) += cros_usbpd_notify.o
obj-$(CONFIG_WILCO_EC) += wilco_ec/
+
+# Kunit test cases
+obj-$(CONFIG_CROS_KUNIT) += cros_kunit.o
+cros_kunit-objs := cros_kunit_util.o
+cros_kunit-objs += cros_ec_proto_test.o
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index b3e94cdf7d1a..8aace50d446d 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -19,9 +19,6 @@
#include "cros_ec.h"
-#define CROS_EC_DEV_EC_INDEX 0
-#define CROS_EC_DEV_PD_INDEX 1
-
static struct cros_ec_platform ec_p = {
.ec_name = CROS_EC_DEV_NAME,
.cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_EC_INDEX),
@@ -135,16 +132,16 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
buf.msg.command = EC_CMD_HOST_SLEEP_EVENT;
ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
-
- /* For now, report failure to transition to S0ix with a warning. */
+ /* Report failure to transition to system wide suspend with a warning. */
if (ret >= 0 && ec_dev->host_sleep_v1 &&
- (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME)) {
+ (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME ||
+ sleep_event == HOST_SLEEP_EVENT_S3_RESUME)) {
ec_dev->last_resume_result =
buf.u.resp1.resume_response.sleep_transitions;
WARN_ONCE(buf.u.resp1.resume_response.sleep_transitions &
EC_HOST_RESUME_SLEEP_TIMEOUT,
- "EC detected sleep transition timeout. Total slp_s0 transitions: %d",
+ "EC detected sleep transition timeout. Total sleep transitions: %d",
buf.u.resp1.resume_response.sleep_transitions &
EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK);
}
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index ff767dccdf0f..05d2e8765a66 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -52,8 +52,8 @@ static int cros_ec_map_error(uint32_t result)
return ret;
}
-static int prepare_packet(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
+static int prepare_tx(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
{
struct ec_host_request *request;
u8 *out;
@@ -85,8 +85,29 @@ static int prepare_packet(struct cros_ec_device *ec_dev,
return sizeof(*request) + msg->outsize;
}
-static int send_command(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
+static int prepare_tx_legacy(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ u8 *out;
+ u8 csum;
+ int i;
+
+ if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE)
+ return -EINVAL;
+
+ out = ec_dev->dout;
+ out[0] = EC_CMD_VERSION0 + msg->version;
+ out[1] = msg->command;
+ out[2] = msg->outsize;
+ csum = out[0] + out[1] + out[2];
+ for (i = 0; i < msg->outsize; i++)
+ csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->data[i];
+ out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = csum;
+
+ return EC_MSG_TX_PROTO_BYTES + msg->outsize;
+}
+
+static int cros_ec_xfer_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
int ret;
int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
@@ -102,57 +123,68 @@ static int send_command(struct cros_ec_device *ec_dev,
* the EC is trying to use protocol v2, on an underlying
* communication mechanism that does not support v2.
*/
- dev_err_once(ec_dev->dev,
- "missing EC transfer API, cannot send command\n");
+ dev_err_once(ec_dev->dev, "missing EC transfer API, cannot send command\n");
return -EIO;
}
trace_cros_ec_request_start(msg);
ret = (*xfer_fxn)(ec_dev, msg);
trace_cros_ec_request_done(msg, ret);
- if (msg->result == EC_RES_IN_PROGRESS) {
- int i;
- struct cros_ec_command *status_msg;
- struct ec_response_get_comms_status *status;
- status_msg = kmalloc(sizeof(*status_msg) + sizeof(*status),
- GFP_KERNEL);
- if (!status_msg)
- return -ENOMEM;
+ return ret;
+}
- status_msg->version = 0;
- status_msg->command = EC_CMD_GET_COMMS_STATUS;
- status_msg->insize = sizeof(*status);
- status_msg->outsize = 0;
+static int cros_ec_wait_until_complete(struct cros_ec_device *ec_dev, uint32_t *result)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_get_comms_status status;
+ } __packed buf;
+ struct cros_ec_command *msg = &buf.msg;
+ struct ec_response_get_comms_status *status = &buf.status;
+ int ret = 0, i;
- /*
- * Query the EC's status until it's no longer busy or
- * we encounter an error.
- */
- for (i = 0; i < EC_COMMAND_RETRIES; i++) {
- usleep_range(10000, 11000);
-
- trace_cros_ec_request_start(status_msg);
- ret = (*xfer_fxn)(ec_dev, status_msg);
- trace_cros_ec_request_done(status_msg, ret);
- if (ret == -EAGAIN)
- continue;
- if (ret < 0)
- break;
-
- msg->result = status_msg->result;
- if (status_msg->result != EC_RES_SUCCESS)
- break;
-
- status = (struct ec_response_get_comms_status *)
- status_msg->data;
- if (!(status->flags & EC_COMMS_STATUS_PROCESSING))
- break;
+ msg->version = 0;
+ msg->command = EC_CMD_GET_COMMS_STATUS;
+ msg->insize = sizeof(*status);
+ msg->outsize = 0;
+
+ /* Query the EC's status until it's no longer busy or we encounter an error. */
+ for (i = 0; i < EC_COMMAND_RETRIES; ++i) {
+ usleep_range(10000, 11000);
+
+ ret = cros_ec_xfer_command(ec_dev, msg);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ return ret;
+
+ *result = msg->result;
+ if (msg->result != EC_RES_SUCCESS)
+ return ret;
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ break;
}
- kfree(status_msg);
+ if (!(status->flags & EC_COMMS_STATUS_PROCESSING))
+ return ret;
}
+ if (i >= EC_COMMAND_RETRIES)
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static int cros_ec_send_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ int ret = cros_ec_xfer_command(ec_dev, msg);
+
+ if (msg->result == EC_RES_IN_PROGRESS)
+ ret = cros_ec_wait_until_complete(ec_dev, &msg->result);
+
return ret;
}
@@ -161,35 +193,18 @@ static int send_command(struct cros_ec_device *ec_dev,
* @ec_dev: Device to register.
* @msg: Message to write.
*
- * This is intended to be used by all ChromeOS EC drivers, but at present
- * only SPI uses it. Once LPC uses the same protocol it can start using it.
- * I2C could use it now, with a refactor of the existing code.
+ * This is used by all ChromeOS EC drivers to prepare the outgoing message
+ * according to different protocol versions.
*
* Return: number of prepared bytes on success or negative error code.
*/
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
- u8 *out;
- u8 csum;
- int i;
-
if (ec_dev->proto_version > 2)
- return prepare_packet(ec_dev, msg);
-
- if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE)
- return -EINVAL;
+ return prepare_tx(ec_dev, msg);
- out = ec_dev->dout;
- out[0] = EC_CMD_VERSION0 + msg->version;
- out[1] = msg->command;
- out[2] = msg->outsize;
- csum = out[0] + out[1] + out[2];
- for (i = 0; i < msg->outsize; i++)
- csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->data[i];
- out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = csum;
-
- return EC_MSG_TX_PROTO_BYTES + msg->outsize;
+ return prepare_tx_legacy(ec_dev, msg);
}
EXPORT_SYMBOL(cros_ec_prepare_tx);
@@ -199,9 +214,12 @@ EXPORT_SYMBOL(cros_ec_prepare_tx);
* @msg: Message to check.
*
* This is used by ChromeOS EC drivers to check the ec_msg->result for
- * errors and to warn about them.
+ * EC_RES_IN_PROGRESS and to warn about them.
*
- * Return: 0 on success or negative error code.
+ * The function should not check for furthermore error codes. Otherwise,
+ * it would break the ABI.
+ *
+ * Return: -EAGAIN if ec_msg->result == EC_RES_IN_PROGRESS. Otherwise, 0.
*/
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
@@ -228,59 +246,66 @@ EXPORT_SYMBOL(cros_ec_check_result);
*
* @ec_dev: EC device to call
* @msg: message structure to use
- * @mask: result when function returns >=0.
+ * @mask: result when function returns 0.
*
* LOCKING:
* the caller has ec_dev->lock mutex, or the caller knows there is
* no other command in progress.
*/
-static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg,
- uint32_t *mask)
+static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev, uint32_t *mask)
{
+ struct cros_ec_command *msg;
struct ec_response_host_event_mask *r;
- int ret;
+ int ret, mapped;
+
+ msg = kzalloc(sizeof(*msg) + sizeof(*r), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
msg->command = EC_CMD_HOST_EVENT_GET_WAKE_MASK;
- msg->version = 0;
- msg->outsize = 0;
msg->insize = sizeof(*r);
- ret = send_command(ec_dev, msg);
- if (ret >= 0) {
- if (msg->result == EC_RES_INVALID_COMMAND)
- return -EOPNOTSUPP;
- if (msg->result != EC_RES_SUCCESS)
- return -EPROTO;
+ ret = cros_ec_send_command(ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
}
- if (ret > 0) {
- r = (struct ec_response_host_event_mask *)msg->data;
- *mask = r->mask;
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
}
+ r = (struct ec_response_host_event_mask *)msg->data;
+ *mask = r->mask;
+ ret = 0;
+exit:
+ kfree(msg);
return ret;
}
-static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
- int devidx,
- struct cros_ec_command *msg)
+static int cros_ec_get_proto_info(struct cros_ec_device *ec_dev, int devidx)
{
- /*
- * Try using v3+ to query for supported protocols. If this
- * command fails, fall back to v2. Returns the highest protocol
- * supported by the EC.
- * Also sets the max request/response/passthru size.
- */
- int ret;
+ struct cros_ec_command *msg;
+ struct ec_response_get_protocol_info *info;
+ int ret, mapped;
- if (!ec_dev->pkt_xfer)
- return -EPROTONOSUPPORT;
+ ec_dev->proto_version = 3;
+ if (devidx > 0)
+ ec_dev->max_passthru = 0;
+
+ msg = kzalloc(sizeof(*msg) + sizeof(*info), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
- memset(msg, 0, sizeof(*msg));
msg->command = EC_CMD_PASSTHRU_OFFSET(devidx) | EC_CMD_GET_PROTOCOL_INFO;
- msg->insize = sizeof(struct ec_response_get_protocol_info);
+ msg->insize = sizeof(*info);
- ret = send_command(ec_dev, msg);
+ ret = cros_ec_send_command(ec_dev, msg);
/*
* Send command once again when timeout occurred.
* Fingerprint MCU (FPMCU) is restarted during system boot which
@@ -289,68 +314,115 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
* attempt because we waited at least EC_MSG_DEADLINE_MS.
*/
if (ret == -ETIMEDOUT)
- ret = send_command(ec_dev, msg);
+ ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0) {
dev_dbg(ec_dev->dev,
"failed to check for EC[%d] protocol version: %d\n",
devidx, ret);
- return ret;
+ goto exit;
}
- if (devidx > 0 && msg->result == EC_RES_INVALID_COMMAND)
- return -ENODEV;
- else if (msg->result != EC_RES_SUCCESS)
- return msg->result;
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
+ }
- return 0;
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ info = (struct ec_response_get_protocol_info *)msg->data;
+
+ switch (devidx) {
+ case CROS_EC_DEV_EC_INDEX:
+ ec_dev->max_request = info->max_request_packet_size -
+ sizeof(struct ec_host_request);
+ ec_dev->max_response = info->max_response_packet_size -
+ sizeof(struct ec_host_response);
+ ec_dev->proto_version = min(EC_HOST_REQUEST_VERSION,
+ fls(info->protocol_versions) - 1);
+ ec_dev->din_size = info->max_response_packet_size + EC_MAX_RESPONSE_OVERHEAD;
+ ec_dev->dout_size = info->max_request_packet_size + EC_MAX_REQUEST_OVERHEAD;
+
+ dev_dbg(ec_dev->dev, "using proto v%u\n", ec_dev->proto_version);
+ break;
+ case CROS_EC_DEV_PD_INDEX:
+ ec_dev->max_passthru = info->max_request_packet_size -
+ sizeof(struct ec_host_request);
+
+ dev_dbg(ec_dev->dev, "found PD chip\n");
+ break;
+ default:
+ dev_dbg(ec_dev->dev, "unknown passthru index: %d\n", devidx);
+ break;
+ }
+
+ ret = 0;
+exit:
+ kfree(msg);
+ return ret;
}
-static int cros_ec_host_command_proto_query_v2(struct cros_ec_device *ec_dev)
+static int cros_ec_get_proto_info_legacy(struct cros_ec_device *ec_dev)
{
struct cros_ec_command *msg;
- struct ec_params_hello *hello_params;
- struct ec_response_hello *hello_response;
- int ret;
- int len = max(sizeof(*hello_params), sizeof(*hello_response));
+ struct ec_params_hello *params;
+ struct ec_response_hello *response;
+ int ret, mapped;
+
+ ec_dev->proto_version = 2;
- msg = kmalloc(sizeof(*msg) + len, GFP_KERNEL);
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*response)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
- msg->version = 0;
msg->command = EC_CMD_HELLO;
- hello_params = (struct ec_params_hello *)msg->data;
- msg->outsize = sizeof(*hello_params);
- hello_response = (struct ec_response_hello *)msg->data;
- msg->insize = sizeof(*hello_response);
-
- hello_params->in_data = 0xa0b0c0d0;
+ msg->insize = sizeof(*response);
+ msg->outsize = sizeof(*params);
- ret = send_command(ec_dev, msg);
+ params = (struct ec_params_hello *)msg->data;
+ params->in_data = 0xa0b0c0d0;
+ ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0) {
- dev_dbg(ec_dev->dev,
- "EC failed to respond to v2 hello: %d\n",
- ret);
+ dev_dbg(ec_dev->dev, "EC failed to respond to v2 hello: %d\n", ret);
goto exit;
- } else if (msg->result != EC_RES_SUCCESS) {
- dev_err(ec_dev->dev,
- "EC responded to v2 hello with error: %d\n",
- msg->result);
- ret = msg->result;
+ }
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ dev_err(ec_dev->dev, "EC responded to v2 hello with error: %d\n", msg->result);
+ goto exit;
+ }
+
+ if (ret == 0) {
+ ret = -EPROTO;
goto exit;
- } else if (hello_response->out_data != 0xa1b2c3d4) {
+ }
+
+ response = (struct ec_response_hello *)msg->data;
+ if (response->out_data != 0xa1b2c3d4) {
dev_err(ec_dev->dev,
"EC responded to v2 hello with bad result: %u\n",
- hello_response->out_data);
+ response->out_data);
ret = -EBADMSG;
goto exit;
}
- ret = 0;
+ ec_dev->max_request = EC_PROTO2_MAX_PARAM_SIZE;
+ ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
+ ec_dev->max_passthru = 0;
+ ec_dev->pkt_xfer = NULL;
+ ec_dev->din_size = EC_PROTO2_MSG_BYTES;
+ ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
- exit:
+ dev_dbg(ec_dev->dev, "falling back to proto v2\n");
+ ret = 0;
+exit:
kfree(msg);
return ret;
}
@@ -371,13 +443,12 @@ static int cros_ec_host_command_proto_query_v2(struct cros_ec_device *ec_dev)
* the caller has ec_dev->lock mutex or the caller knows there is
* no other command in progress.
*/
-static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
- u16 cmd, u32 *mask)
+static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev, u16 cmd, u32 *mask)
{
struct ec_params_get_cmd_versions *pver;
struct ec_response_get_cmd_versions *rver;
struct cros_ec_command *msg;
- int ret;
+ int ret, mapped;
msg = kmalloc(sizeof(*msg) + max(sizeof(*rver), sizeof(*pver)),
GFP_KERNEL);
@@ -392,14 +463,26 @@ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
pver = (struct ec_params_get_cmd_versions *)msg->data;
pver->cmd = cmd;
- ret = send_command(ec_dev, msg);
- if (ret > 0) {
- rver = (struct ec_response_get_cmd_versions *)msg->data;
- *mask = rver->version_mask;
+ ret = cros_ec_send_command(ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
}
- kfree(msg);
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
+ }
+ rver = (struct ec_response_get_cmd_versions *)msg->data;
+ *mask = rver->version_mask;
+ ret = 0;
+exit:
+ kfree(msg);
return ret;
}
@@ -413,71 +496,17 @@ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
int cros_ec_query_all(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
- struct cros_ec_command *proto_msg;
- struct ec_response_get_protocol_info *proto_info;
- u32 ver_mask = 0;
+ u32 ver_mask;
int ret;
- proto_msg = kzalloc(sizeof(*proto_msg) + sizeof(*proto_info),
- GFP_KERNEL);
- if (!proto_msg)
- return -ENOMEM;
-
/* First try sending with proto v3. */
- ec_dev->proto_version = 3;
- ret = cros_ec_host_command_proto_query(ec_dev, 0, proto_msg);
-
- if (ret == 0) {
- proto_info = (struct ec_response_get_protocol_info *)
- proto_msg->data;
- ec_dev->max_request = proto_info->max_request_packet_size -
- sizeof(struct ec_host_request);
- ec_dev->max_response = proto_info->max_response_packet_size -
- sizeof(struct ec_host_response);
- ec_dev->proto_version =
- min(EC_HOST_REQUEST_VERSION,
- fls(proto_info->protocol_versions) - 1);
- dev_dbg(ec_dev->dev,
- "using proto v%u\n",
- ec_dev->proto_version);
-
- ec_dev->din_size = ec_dev->max_response +
- sizeof(struct ec_host_response) +
- EC_MAX_RESPONSE_OVERHEAD;
- ec_dev->dout_size = ec_dev->max_request +
- sizeof(struct ec_host_request) +
- EC_MAX_REQUEST_OVERHEAD;
-
- /*
- * Check for PD
- */
- ret = cros_ec_host_command_proto_query(ec_dev, 1, proto_msg);
-
- if (ret) {
- dev_dbg(ec_dev->dev, "no PD chip found: %d\n", ret);
- ec_dev->max_passthru = 0;
- } else {
- dev_dbg(ec_dev->dev, "found PD chip\n");
- ec_dev->max_passthru =
- proto_info->max_request_packet_size -
- sizeof(struct ec_host_request);
- }
+ if (!cros_ec_get_proto_info(ec_dev, CROS_EC_DEV_EC_INDEX)) {
+ /* Check for PD. */
+ cros_ec_get_proto_info(ec_dev, CROS_EC_DEV_PD_INDEX);
} else {
/* Try querying with a v2 hello message. */
- ec_dev->proto_version = 2;
- ret = cros_ec_host_command_proto_query_v2(ec_dev);
-
- if (ret == 0) {
- /* V2 hello succeeded. */
- dev_dbg(ec_dev->dev, "falling back to proto v2\n");
-
- ec_dev->max_request = EC_PROTO2_MAX_PARAM_SIZE;
- ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
- ec_dev->max_passthru = 0;
- ec_dev->pkt_xfer = NULL;
- ec_dev->din_size = EC_PROTO2_MSG_BYTES;
- ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
- } else {
+ ret = cros_ec_get_proto_info_legacy(ec_dev);
+ if (ret) {
/*
* It's possible for a test to occur too early when
* the EC isn't listening. If this happens, we'll
@@ -485,7 +514,7 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
*/
ec_dev->proto_version = EC_PROTO_VERSION_UNKNOWN;
dev_dbg(ec_dev->dev, "EC query failed: %d\n", ret);
- goto exit;
+ return ret;
}
}
@@ -506,26 +535,21 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
}
/* Probe if MKBP event is supported */
- ret = cros_ec_get_host_command_version_mask(ec_dev,
- EC_CMD_GET_NEXT_EVENT,
- &ver_mask);
- if (ret < 0 || ver_mask == 0)
+ ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_GET_NEXT_EVENT, &ver_mask);
+ if (ret < 0 || ver_mask == 0) {
ec_dev->mkbp_event_supported = 0;
- else
+ } else {
ec_dev->mkbp_event_supported = fls(ver_mask);
- dev_dbg(ec_dev->dev, "MKBP support version %u\n",
- ec_dev->mkbp_event_supported - 1);
+ dev_dbg(ec_dev->dev, "MKBP support version %u\n", ec_dev->mkbp_event_supported - 1);
+ }
/* Probe if host sleep v1 is supported for S0ix failure detection. */
- ret = cros_ec_get_host_command_version_mask(ec_dev,
- EC_CMD_HOST_SLEEP_EVENT,
- &ver_mask);
- ec_dev->host_sleep_v1 = (ret >= 0 && (ver_mask & EC_VER_MASK(1)));
+ ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_HOST_SLEEP_EVENT, &ver_mask);
+ ec_dev->host_sleep_v1 = (ret == 0 && (ver_mask & EC_VER_MASK(1)));
/* Get host event wake mask. */
- ret = cros_ec_get_host_event_wake_mask(ec_dev, proto_msg,
- &ec_dev->host_event_wake_mask);
+ ret = cros_ec_get_host_event_wake_mask(ec_dev, &ec_dev->host_event_wake_mask);
if (ret < 0) {
/*
* If the EC doesn't support EC_CMD_HOST_EVENT_GET_WAKE_MASK,
@@ -556,7 +580,6 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
ret = 0;
exit:
- kfree(proto_msg);
return ret;
}
EXPORT_SYMBOL(cros_ec_query_all);
@@ -601,7 +624,7 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
msg->insize = ec_dev->max_response;
}
- if (msg->command < EC_CMD_PASSTHRU_OFFSET(1)) {
+ if (msg->command < EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX)) {
if (msg->outsize > ec_dev->max_request) {
dev_err(ec_dev->dev,
"request of size %u is too big (max: %u)\n",
@@ -621,7 +644,7 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
}
}
- ret = send_command(ec_dev, msg);
+ ret = cros_ec_send_command(ec_dev, msg);
mutex_unlock(&ec_dev->lock);
return ret;
@@ -852,8 +875,8 @@ bool cros_ec_check_features(struct cros_ec_dev *ec, int feature)
if (features->flags[0] == -1U && features->flags[1] == -1U) {
/* features bitmap not read yet */
- ret = cros_ec_command(ec->ec_dev, 0, EC_CMD_GET_FEATURES + ec->cmd_offset,
- NULL, 0, features, sizeof(*features));
+ ret = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_GET_FEATURES + ec->cmd_offset,
+ NULL, 0, features, sizeof(*features));
if (ret < 0) {
dev_warn(ec->dev, "cannot get EC features: %d\n", ret);
memset(features, 0, sizeof(*features));
@@ -934,7 +957,7 @@ int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
/**
- * cros_ec_command - Send a command to the EC.
+ * cros_ec_cmd - Send a command to the EC.
*
* @ec_dev: EC device
* @version: EC command version
@@ -946,13 +969,13 @@ EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
*
* Return: >= 0 on success, negative error number on failure.
*/
-int cros_ec_command(struct cros_ec_device *ec_dev,
- unsigned int version,
- int command,
- void *outdata,
- int outsize,
- void *indata,
- int insize)
+int cros_ec_cmd(struct cros_ec_device *ec_dev,
+ unsigned int version,
+ int command,
+ void *outdata,
+ size_t outsize,
+ void *indata,
+ size_t insize)
{
struct cros_ec_command *msg;
int ret;
@@ -979,4 +1002,4 @@ error:
kfree(msg);
return ret;
}
-EXPORT_SYMBOL_GPL(cros_ec_command);
+EXPORT_SYMBOL_GPL(cros_ec_cmd);
diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
new file mode 100644
index 000000000000..c6a83df91ae1
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_proto_test.c
@@ -0,0 +1,2753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit tests for ChromeOS Embedded Controller protocol.
+ */
+
+#include <kunit/test.h>
+
+#include <asm-generic/unaligned.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include "cros_ec.h"
+#include "cros_kunit_util.h"
+
+#define BUFSIZE 512
+
+struct cros_ec_proto_test_priv {
+ struct cros_ec_device ec_dev;
+ u8 dout[BUFSIZE];
+ u8 din[BUFSIZE];
+ struct cros_ec_command *msg;
+ u8 _msg[BUFSIZE];
+};
+
+static void cros_ec_proto_test_prepare_tx_legacy_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret, i;
+ u8 csum;
+
+ ec_dev->proto_version = 2;
+
+ msg->command = EC_CMD_HELLO;
+ msg->outsize = EC_PROTO2_MAX_PARAM_SIZE;
+ msg->data[0] = 0xde;
+ msg->data[1] = 0xad;
+ msg->data[2] = 0xbe;
+ msg->data[3] = 0xef;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+
+ KUNIT_EXPECT_EQ(test, ret, EC_MSG_TX_PROTO_BYTES + EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[0], EC_CMD_VERSION0);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[1], EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[2], EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, EC_MSG_TX_HEADER_BYTES, 3);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 0], 0xde);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 1], 0xad);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 2], 0xbe);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 3], 0xef);
+ for (i = 4; i < EC_PROTO2_MAX_PARAM_SIZE; ++i)
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + i], 0);
+
+ csum = EC_CMD_VERSION0;
+ csum += EC_CMD_HELLO;
+ csum += EC_PROTO2_MAX_PARAM_SIZE;
+ csum += 0xde;
+ csum += 0xad;
+ csum += 0xbe;
+ csum += 0xef;
+ KUNIT_EXPECT_EQ(test,
+ ec_dev->dout[EC_MSG_TX_HEADER_BYTES + EC_PROTO2_MAX_PARAM_SIZE],
+ csum);
+}
+
+static void cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret;
+
+ ec_dev->proto_version = 2;
+
+ msg->outsize = EC_PROTO2_MAX_PARAM_SIZE + 1;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+}
+
+static void cros_ec_proto_test_prepare_tx_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ struct ec_host_request *request = (struct ec_host_request *)ec_dev->dout;
+ int ret, i;
+ u8 csum;
+
+ msg->command = EC_CMD_HELLO;
+ msg->outsize = 0x88;
+ msg->data[0] = 0xde;
+ msg->data[1] = 0xad;
+ msg->data[2] = 0xbe;
+ msg->data[3] = 0xef;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+
+ KUNIT_EXPECT_EQ(test, ret, sizeof(*request) + 0x88);
+
+ KUNIT_EXPECT_EQ(test, request->struct_version, EC_HOST_REQUEST_VERSION);
+ KUNIT_EXPECT_EQ(test, request->command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, request->command_version, 0);
+ KUNIT_EXPECT_EQ(test, request->data_len, 0x88);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 0], 0xde);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 1], 0xad);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 2], 0xbe);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 3], 0xef);
+ for (i = 4; i < 0x88; ++i)
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + i], 0);
+
+ csum = EC_HOST_REQUEST_VERSION;
+ csum += EC_CMD_HELLO;
+ csum += 0x88;
+ csum += 0xde;
+ csum += 0xad;
+ csum += 0xbe;
+ csum += 0xef;
+ KUNIT_EXPECT_EQ(test, request->checksum, (u8)-csum);
+}
+
+static void cros_ec_proto_test_prepare_tx_bad_msg_outsize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret;
+
+ msg->outsize = ec_dev->dout_size - sizeof(struct ec_host_request) + 1;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+}
+
+static void cros_ec_proto_test_check_result(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret, i;
+ static enum ec_status status[] = {
+ EC_RES_SUCCESS,
+ EC_RES_INVALID_COMMAND,
+ EC_RES_ERROR,
+ EC_RES_INVALID_PARAM,
+ EC_RES_ACCESS_DENIED,
+ EC_RES_INVALID_RESPONSE,
+ EC_RES_INVALID_VERSION,
+ EC_RES_INVALID_CHECKSUM,
+ EC_RES_UNAVAILABLE,
+ EC_RES_TIMEOUT,
+ EC_RES_OVERFLOW,
+ EC_RES_INVALID_HEADER,
+ EC_RES_REQUEST_TRUNCATED,
+ EC_RES_RESPONSE_TOO_BIG,
+ EC_RES_BUS_ERROR,
+ EC_RES_BUSY,
+ EC_RES_INVALID_HEADER_VERSION,
+ EC_RES_INVALID_HEADER_CRC,
+ EC_RES_INVALID_DATA_CRC,
+ EC_RES_DUP_UNAVAILABLE,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(status); ++i) {
+ msg->result = status[i];
+ ret = cros_ec_check_result(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ }
+
+ msg->result = EC_RES_IN_PROGRESS;
+ ret = cros_ec_check_result(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+}
+
+static void cros_ec_proto_test_query_all_pretest(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+
+ /*
+ * cros_ec_query_all() will free din and dout and allocate them again to fit the usage by
+ * calling devm_kfree() and devm_kzalloc(). Set them to NULL as they aren't managed by
+ * ec_dev->dev but allocated statically in struct cros_ec_proto_test_priv
+ * (see cros_ec_proto_test_init()).
+ */
+ ec_dev->din = NULL;
+ ec_dev->dout = NULL;
+}
+
+static void cros_ec_proto_test_query_all_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->protocol_versions = BIT(3) | BIT(2);
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbf;
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = BIT(6) | BIT(5);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = BIT(1);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ struct ec_response_host_event_mask *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_host_event_mask *)mock->o_data;
+ data->mask = 0xbeef;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, 0xbe - sizeof(struct ec_host_request));
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, 0xef - sizeof(struct ec_host_response));
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 3);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, 0xef + EC_MAX_RESPONSE_OVERHEAD);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, 0xbe + EC_MAX_REQUEST_OVERHEAD);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0xbf - sizeof(struct ec_host_request));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 7);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_HOST_SLEEP_EVENT);
+
+ KUNIT_EXPECT_TRUE(test, ec_dev->host_sleep_v1);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->host_event_wake_mask, 0xbeef);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_pd_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->max_passthru = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_pd_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->max_passthru = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_normal_v3_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xa1b2c3d4;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_params_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_hello *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_normal_v3_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xa1b2c3d4;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_params_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_hello *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EOPNOTSUPP);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_data_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xbeefbfbf;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EBADMSG);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_host_sleep(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_sleep_v1 = true;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+
+ KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_host_sleep_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_sleep_v1 = true;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /* In order to pollute next cros_ec_get_host_command_version_mask(). */
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0xbeef;
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+
+ KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1);
+ }
+}
+
+static void cros_ec_proto_test_query_all_default_wake_mask_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ u32 mask;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ mask = ec_dev->host_event_wake_mask;
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_default_wake_mask_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For get_host_event_wake_mask(). */
+ {
+ u32 mask;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ mask = ec_dev->host_event_wake_mask;
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 2;
+ buf.data[0] = 0x55;
+ buf.data[1] = 0xaa;
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, 4);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (u8 *)mock->o_data;
+ data[0] = 0xaa;
+ data[1] = 0x55;
+ data[2] = 0xcc;
+ data[3] = 0x33;
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, 4);
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, 4);
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2);
+
+ data = (u8 *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data[0], 0x55);
+ KUNIT_EXPECT_EQ(test, data[1], 0xaa);
+
+ KUNIT_EXPECT_EQ(test, buf.data[0], 0xaa);
+ KUNIT_EXPECT_EQ(test, buf.data[1], 0x55);
+ KUNIT_EXPECT_EQ(test, buf.data[2], 0xcc);
+ KUNIT_EXPECT_EQ(test, buf.data[3], 0x33);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_insize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 0xee + 1;
+ buf.msg.outsize = 2;
+
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0xcc);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, 0xcc);
+
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, 0xee);
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 0xff + 1;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) + EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 0xdd + 1;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v3_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 3;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 0);
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 1);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v3_no_op(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 3;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = NULL;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v2_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 2;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 1);
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 0);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v2_no_op(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 2;
+ ec_dev->cmd_xfer = NULL;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ struct ec_response_get_comms_status *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_comms_status *)mock->o_data;
+ data->flags = 0;
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_comms_status));
+
+ KUNIT_EXPECT_EQ(test, msg.result, EC_RES_SUCCESS);
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_COMMS_STATUS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_comms_status));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ cros_kunit_ec_xfer_mock_default_ret = -EAGAIN;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ {
+ struct ec_response_get_comms_status *data;
+ int i;
+
+ for (i = 0; i < 50; ++i) {
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_comms_status *)mock->o_data;
+ data->flags |= EC_COMMS_STATUS_PROCESSING;
+ }
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, msg.result, EC_RES_INVALID_COMMAND);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For cros_ec_cmd_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For cros_ec_cmd_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_command msg;
+ static const int map[] = {
+ [EC_RES_SUCCESS] = 0,
+ [EC_RES_INVALID_COMMAND] = -EOPNOTSUPP,
+ [EC_RES_ERROR] = -EIO,
+ [EC_RES_INVALID_PARAM] = -EINVAL,
+ [EC_RES_ACCESS_DENIED] = -EACCES,
+ [EC_RES_INVALID_RESPONSE] = -EPROTO,
+ [EC_RES_INVALID_VERSION] = -ENOPROTOOPT,
+ [EC_RES_INVALID_CHECKSUM] = -EBADMSG,
+ /*
+ * EC_RES_IN_PROGRESS is special because cros_ec_send_command() has extra logic to
+ * handle it. Note that default cros_kunit_ec_xfer_mock_default_ret == 0 thus
+ * cros_ec_xfer_command() in cros_ec_wait_until_complete() returns 0. As a result,
+ * it returns -EPROTO without calling cros_ec_map_error().
+ */
+ [EC_RES_IN_PROGRESS] = -EPROTO,
+ [EC_RES_UNAVAILABLE] = -ENODATA,
+ [EC_RES_TIMEOUT] = -ETIMEDOUT,
+ [EC_RES_OVERFLOW] = -EOVERFLOW,
+ [EC_RES_INVALID_HEADER] = -EBADR,
+ [EC_RES_REQUEST_TRUNCATED] = -EBADR,
+ [EC_RES_RESPONSE_TOO_BIG] = -EFBIG,
+ [EC_RES_BUS_ERROR] = -EFAULT,
+ [EC_RES_BUSY] = -EBUSY,
+ [EC_RES_INVALID_HEADER_VERSION] = -EBADMSG,
+ [EC_RES_INVALID_HEADER_CRC] = -EBADMSG,
+ [EC_RES_INVALID_DATA_CRC] = -EBADMSG,
+ [EC_RES_DUP_UNAVAILABLE] = -ENODATA,
+ };
+
+ memset(&msg, 0, sizeof(msg));
+
+ for (i = 0; i < ARRAY_SIZE(map); ++i) {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, i, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, map[i]);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_no_mkbp_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 0;
+
+ /* Set some garbage bytes. */
+ wake_event = false;
+ more_events = true;
+
+ /* For get_keyboard_state_event(). */
+ {
+ union ec_response_get_next_data_v1 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (union ec_response_get_next_data_v1 *)mock->o_data;
+ data->host_event = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(union ec_response_get_next_data_v1));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_KEY_MATRIX);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.host_event, 0xbeef);
+
+ KUNIT_EXPECT_TRUE(test, wake_event);
+ KUNIT_EXPECT_FALSE(test, more_events);
+
+ /* For get_keyboard_state_event(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MKBP_STATE);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(union ec_response_get_next_data_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->suspended = true;
+
+ ret = cros_ec_get_next_event(ec_dev, NULL, NULL);
+ KUNIT_EXPECT_EQ(test, ret, -EHOSTDOWN);
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_version0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 1;
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+ more_events = false;
+
+ /* For get_next_event_xfer(). */
+ {
+ struct ec_response_get_next_event *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_SENSOR_FIFO | EC_MKBP_HAS_MORE_EVENTS;
+ data->data.sysrq = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_SENSOR_FIFO);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+ KUNIT_EXPECT_TRUE(test, more_events);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_next_event));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_version2(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+
+ /* Set some garbage bytes. */
+ wake_event = false;
+ more_events = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ struct ec_response_get_next_event_v1 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_FINGERPRINT;
+ data->data.sysrq = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event_v1));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_FINGERPRINT);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef);
+
+ KUNIT_EXPECT_TRUE(test, wake_event);
+ KUNIT_EXPECT_FALSE(test, more_events);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event;
+ struct ec_response_get_next_event_v1 *data;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test,
+ sizeof(data->event_type) +
+ sizeof(data->data.host_event));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_HOST_EVENT;
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC), &data->data.host_event);
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event;
+ struct ec_response_get_next_event_v1 *data;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+ ec_dev->host_event_wake_mask = U32_MAX & ~EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED);
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test,
+ sizeof(data->event_type) +
+ sizeof(data->data.host_event));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_HOST_EVENT;
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED),
+ &data->data.host_event);
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_host_event_no_mkbp_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 0;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_not_host_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_FINGERPRINT;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_wrong_event_size(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT;
+ ec_dev->event_size = 0xff;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT;
+ ec_dev->event_size = sizeof(ec_dev->event_data.data.host_event);
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC),
+ &ec_dev->event_data.data.host_event);
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC));
+}
+
+static void cros_ec_proto_test_check_features_cached(struct kunit *test)
+{
+ int ret, i;
+ struct cros_ec_dev ec;
+
+ ec.features.flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
+ ec.features.flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
+
+ for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) {
+ ret = cros_ec_check_features(&ec, i);
+ switch (i) {
+ case EC_FEATURE_FINGERPRINT:
+ case EC_FEATURE_SCP:
+ KUNIT_EXPECT_TRUE(test, ret);
+ break;
+ default:
+ KUNIT_EXPECT_FALSE(test, ret);
+ break;
+ }
+ }
+}
+
+static void cros_ec_proto_test_check_features_not_cached(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+ ec.features.flags[0] = -1;
+ ec.features.flags[1] = -1;
+
+ /* For EC_CMD_GET_FEATURES. */
+ {
+ struct ec_response_get_features *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_features *)mock->o_data;
+ data->flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
+ data->flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
+ }
+
+ for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) {
+ ret = cros_ec_check_features(&ec, i);
+ switch (i) {
+ case EC_FEATURE_FINGERPRINT:
+ case EC_FEATURE_SCP:
+ KUNIT_EXPECT_TRUE(test, ret);
+ break;
+ default:
+ KUNIT_EXPECT_FALSE(test, ret);
+ break;
+ }
+ }
+
+ /* For EC_CMD_GET_FEATURES. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_FEATURES);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_features));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_response_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_motion_sense *)mock->o_data;
+ data->dump.sensor_count = 0xbf;
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, 0xbf);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_legacy(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_dev ec;
+ struct {
+ u8 readmem_data;
+ int expected_result;
+ } test_data[] = {
+ { 0, 0 },
+ { EC_MEMMAP_ACC_STATUS_PRESENCE_BIT, 2 },
+ };
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->cmd_readmem = cros_kunit_readmem_mock;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ for (i = 0; i < ARRAY_SIZE(test_data); ++i) {
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For readmem. */
+ {
+ cros_kunit_readmem_mock_data = kunit_kzalloc(test, 1, GFP_KERNEL);
+ KUNIT_ASSERT_PTR_NE(test, cros_kunit_readmem_mock_data, NULL);
+ cros_kunit_readmem_mock_data[0] = test_data[i].readmem_data;
+
+ cros_kunit_ec_xfer_mock_default_ret = 1;
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, test_data[i].expected_result);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+
+ /* For readmem. */
+ {
+ KUNIT_EXPECT_EQ(test, cros_kunit_readmem_mock_offset, EC_MEMMAP_ACC_STATUS);
+ }
+ }
+}
+
+static void cros_ec_proto_test_ec_cmd(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ u8 out[3], in[2];
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+
+ out[0] = 0xdd;
+ out[1] = 0xcc;
+ out[2] = 0xbb;
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, 2);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (u8 *)mock->o_data;
+ data[0] = 0xaa;
+ data[1] = 0x99;
+ }
+
+ ret = cros_ec_cmd(ec_dev, 0x88, 0x77, out, ARRAY_SIZE(out), in, ARRAY_SIZE(in));
+ KUNIT_EXPECT_EQ(test, ret, 2);
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0x88);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, 0x77);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, ARRAY_SIZE(in));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, ARRAY_SIZE(out));
+
+ data = (u8 *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data[0], 0xdd);
+ KUNIT_EXPECT_EQ(test, data[1], 0xcc);
+ KUNIT_EXPECT_EQ(test, data[2], 0xbb);
+ }
+}
+
+static void cros_ec_proto_test_release(struct device *dev)
+{
+}
+
+static int cros_ec_proto_test_init(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv;
+ struct cros_ec_device *ec_dev;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ test->priv = priv;
+
+ ec_dev = &priv->ec_dev;
+ ec_dev->dout = (u8 *)priv->dout;
+ ec_dev->dout_size = ARRAY_SIZE(priv->dout);
+ ec_dev->din = (u8 *)priv->din;
+ ec_dev->din_size = ARRAY_SIZE(priv->din);
+ ec_dev->proto_version = EC_HOST_REQUEST_VERSION;
+ ec_dev->dev = kunit_kzalloc(test, sizeof(*ec_dev->dev), GFP_KERNEL);
+ if (!ec_dev->dev)
+ return -ENOMEM;
+ device_initialize(ec_dev->dev);
+ dev_set_name(ec_dev->dev, "cros_ec_proto_test");
+ ec_dev->dev->release = cros_ec_proto_test_release;
+ ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
+
+ priv->msg = (struct cros_ec_command *)priv->_msg;
+
+ cros_kunit_mock_reset();
+
+ return 0;
+}
+
+static void cros_ec_proto_test_exit(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+
+ put_device(ec_dev->dev);
+}
+
+static struct kunit_case cros_ec_proto_test_cases[] = {
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_normal),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_normal),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_bad_msg_outsize),
+ KUNIT_CASE(cros_ec_proto_test_check_result),
+ KUNIT_CASE(cros_ec_proto_test_query_all_normal),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_data_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return0),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_insize),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_no_op),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_no_op),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return0),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_return_error),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_no_mkbp_event),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version0),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version2),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_no_mkbp_event),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_not_host_event),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_wrong_event_size),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_normal),
+ KUNIT_CASE(cros_ec_proto_test_check_features_cached),
+ KUNIT_CASE(cros_ec_proto_test_check_features_not_cached),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_normal),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_legacy),
+ KUNIT_CASE(cros_ec_proto_test_ec_cmd),
+ {}
+};
+
+static struct kunit_suite cros_ec_proto_test_suite = {
+ .name = "cros_ec_proto_test",
+ .init = cros_ec_proto_test_init,
+ .exit = cros_ec_proto_test_exit,
+ .test_cases = cros_ec_proto_test_cases,
+};
+
+kunit_test_suite(cros_ec_proto_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index 9bb5cd2c98b8..d7e407de88df 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -30,8 +30,8 @@ TRACE_EVENT(cros_ec_request_start,
),
TP_fast_assign(
__entry->version = cmd->version;
- __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(1);
- __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(1);
+ __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
+ __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
__entry->outsize = cmd->outsize;
__entry->insize = cmd->insize;
),
@@ -55,8 +55,8 @@ TRACE_EVENT(cros_ec_request_done,
),
TP_fast_assign(
__entry->version = cmd->version;
- __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(1);
- __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(1);
+ __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
+ __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
__entry->outsize = cmd->outsize;
__entry->insize = cmd->insize;
__entry->result = cmd->result;
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 7cb2e35c4ded..de6ee0f926a6 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -25,6 +25,8 @@
#define DRV_NAME "cros-ec-typec"
+#define DP_PORT_VDO (BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D) | DP_CAP_DFP_D)
+
/* Supported alt modes. */
enum {
CROS_EC_ALTMODE_DP = 0,
@@ -60,8 +62,7 @@ struct cros_typec_port {
uint8_t mux_flags;
uint8_t role;
- /* Port alt modes. */
- struct typec_altmode p_altmode[CROS_EC_ALTMODE_MAX];
+ struct typec_altmode *port_altmode[CROS_EC_ALTMODE_MAX];
/* Flag indicating that PD partner discovery data parsing is completed. */
bool sop_disc_done;
@@ -254,6 +255,14 @@ static void cros_typec_remove_cable(struct cros_typec_data *typec,
port->sop_prime_disc_done = false;
}
+static void cros_typec_unregister_port_altmodes(struct cros_typec_port *port)
+{
+ int i;
+
+ for (i = 0; i < CROS_EC_ALTMODE_MAX; i++)
+ typec_unregister_altmode(port->port_altmode[i]);
+}
+
static void cros_unregister_ports(struct cros_typec_data *typec)
{
int i;
@@ -268,34 +277,49 @@ static void cros_unregister_ports(struct cros_typec_data *typec)
usb_role_switch_put(typec->ports[i]->role_sw);
typec_switch_put(typec->ports[i]->ori_sw);
typec_mux_put(typec->ports[i]->mux);
+ cros_typec_unregister_port_altmodes(typec->ports[i]);
typec_unregister_port(typec->ports[i]->port);
}
}
/*
- * Fake the alt mode structs until we actually start registering Type C port
- * and partner alt modes.
+ * Register port alt modes with known values till we start retrieving
+ * port capabilities from the EC.
*/
-static void cros_typec_register_port_altmodes(struct cros_typec_data *typec,
+static int cros_typec_register_port_altmodes(struct cros_typec_data *typec,
int port_num)
{
struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_altmode_desc desc;
+ struct typec_altmode *amode;
/* All PD capable CrOS devices are assumed to support DP altmode. */
- port->p_altmode[CROS_EC_ALTMODE_DP].svid = USB_TYPEC_DP_SID;
- port->p_altmode[CROS_EC_ALTMODE_DP].mode = USB_TYPEC_DP_MODE;
+ desc.svid = USB_TYPEC_DP_SID,
+ desc.mode = USB_TYPEC_DP_MODE,
+ desc.vdo = DP_PORT_VDO,
+ amode = typec_port_register_altmode(port->port, &desc);
+ if (IS_ERR(amode))
+ return PTR_ERR(amode);
+ port->port_altmode[CROS_EC_ALTMODE_DP] = amode;
/*
* Register TBT compatibility alt mode. The EC will not enter the mode
* if it doesn't support it, so it's safe to register it unconditionally
* here for now.
*/
- port->p_altmode[CROS_EC_ALTMODE_TBT].svid = USB_TYPEC_TBT_SID;
- port->p_altmode[CROS_EC_ALTMODE_TBT].mode = TYPEC_ANY_MODE;
+ memset(&desc, 0, sizeof(desc));
+ desc.svid = USB_TYPEC_TBT_SID,
+ desc.mode = TYPEC_ANY_MODE,
+ amode = typec_port_register_altmode(port->port, &desc);
+ if (IS_ERR(amode))
+ return PTR_ERR(amode);
+ port->port_altmode[CROS_EC_ALTMODE_TBT] = amode;
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
port->state.data = NULL;
+
+ return 0;
}
static int cros_typec_init_ports(struct cros_typec_data *typec)
@@ -352,8 +376,8 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
cros_port->port = typec_register_port(dev, cap);
if (IS_ERR(cros_port->port)) {
- dev_err(dev, "Failed to register port %d\n", port_num);
ret = PTR_ERR(cros_port->port);
+ dev_err_probe(dev, ret, "Failed to register port %d\n", port_num);
goto unregister_ports;
}
@@ -362,7 +386,11 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
dev_dbg(dev, "No switch control for port %d\n",
port_num);
- cros_typec_register_port_altmodes(typec, port_num);
+ ret = cros_typec_register_port_altmodes(typec, port_num);
+ if (ret) {
+ dev_err(dev, "Failed to register port altmodes\n");
+ goto unregister_ports;
+ }
cros_port->disc_data = devm_kzalloc(dev, EC_PROTO2_MAX_RESPONSE_SIZE, GFP_KERNEL);
if (!cros_port->disc_data) {
@@ -431,7 +459,7 @@ static int cros_typec_enable_tbt(struct cros_typec_data *typec,
data.enter_vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
if (!port->state.alt) {
- port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_TBT];
+ port->state.alt = port->port_altmode[CROS_EC_ALTMODE_TBT];
ret = cros_typec_usb_safe_state(port);
if (ret)
return ret;
@@ -473,7 +501,7 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
/* Configuration VDO. */
dp_data.conf = DP_CONF_SET_PIN_ASSIGN(pd_ctrl->dp_mode);
if (!port->state.alt) {
- port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_DP];
+ port->state.alt = port->port_altmode[CROS_EC_ALTMODE_DP];
ret = cros_typec_usb_safe_state(port);
if (ret)
return ret;
@@ -525,8 +553,8 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
enum typec_orientation orientation;
int ret;
- ret = cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
- &req, sizeof(req), &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
+ &req, sizeof(req), &resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev, "Failed to get mux info for port: %d, err = %d\n",
port_num, ret);
@@ -585,8 +613,8 @@ mux_ack:
/* Sending Acknowledgment to EC */
mux_ack.port = port_num;
- if (cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_ACK, &mux_ack,
- sizeof(mux_ack), NULL, 0) < 0)
+ if (cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_ACK, &mux_ack,
+ sizeof(mux_ack), NULL, 0) < 0)
dev_warn(typec->dev,
"Failed to send Mux ACK to EC for port: %d\n",
port_num);
@@ -754,8 +782,8 @@ static int cros_typec_handle_sop_prime_disc(struct cros_typec_data *typec, int p
int ret = 0;
memset(disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
- ret = cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
- disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ disc, EC_PROTO2_MAX_RESPONSE_SIZE);
if (ret < 0) {
dev_err(typec->dev, "Failed to get SOP' discovery data for port: %d\n", port_num);
goto sop_prime_disc_exit;
@@ -837,8 +865,8 @@ static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_nu
typec_partner_set_pd_revision(port->partner, pd_revision);
memset(sop_disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
- ret = cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
- sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
if (ret < 0) {
dev_err(typec->dev, "Failed to get SOP discovery data for port: %d\n", port_num);
goto disc_exit;
@@ -870,8 +898,8 @@ static int cros_typec_send_clear_event(struct cros_typec_data *typec, int port_n
.clear_events_mask = events_mask,
};
- return cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
- sizeof(req), NULL, 0);
+ return cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
+ sizeof(req), NULL, 0);
}
static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num)
@@ -882,8 +910,8 @@ static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num
};
int ret;
- ret = cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
- &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
+ &resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
return;
@@ -960,9 +988,9 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
req.mux = USB_PD_CTRL_MUX_NO_CHANGE;
req.swap = USB_PD_CTRL_SWAP_NONE;
- ret = cros_ec_command(typec->ec, typec->pd_ctrl_ver,
- EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
- &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, typec->pd_ctrl_ver,
+ EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
+ &resp, sizeof(resp));
if (ret < 0)
return ret;
@@ -997,9 +1025,8 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
/* We're interested in the PD control command version. */
req_v1.cmd = EC_CMD_USB_PD_CONTROL;
- ret = cros_ec_command(typec->ec, 1, EC_CMD_GET_CMD_VERSIONS,
- &req_v1, sizeof(req_v1), &resp,
- sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 1, EC_CMD_GET_CMD_VERSIONS,
+ &req_v1, sizeof(req_v1), &resp, sizeof(resp));
if (ret < 0)
return ret;
@@ -1090,8 +1117,8 @@ static int cros_typec_probe(struct platform_device *pdev)
typec->typec_cmd_supported = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_CMD);
typec->needs_mux_ack = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
- ret = cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
- &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
+ &resp, sizeof(resp));
if (ret < 0)
return ret;
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
index aa409f0201fb..793fd3f1015d 100644
--- a/drivers/platform/chrome/cros_kbd_led_backlight.c
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -4,24 +4,60 @@
// Copyright (C) 2012 Google, Inc.
#include <linux/acpi.h>
-#include <linux/leds.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
+struct keyboard_led {
+ struct led_classdev cdev;
+ struct cros_ec_device *ec;
+};
+
+/**
+ * struct keyboard_led_drvdata - keyboard LED driver data.
+ * @init: Init function.
+ * @brightness_get: Get LED brightness level.
+ * @brightness_set: Set LED brightness level. Must not sleep.
+ * @brightness_set_blocking: Set LED brightness level. It can block the
+ * caller for the time required for accessing a
+ * LED device register
+ * @max_brightness: Maximum brightness.
+ *
+ * See struct led_classdev in include/linux/leds.h for more details.
+ */
+struct keyboard_led_drvdata {
+ int (*init)(struct platform_device *pdev);
+
+ enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
+
+ void (*brightness_set)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+ int (*brightness_set_blocking)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+
+ enum led_brightness max_brightness;
+};
+
+#define KEYBOARD_BACKLIGHT_MAX 100
+
+#ifdef CONFIG_ACPI
+
/* Keyboard LED ACPI Device must be defined in firmware */
#define ACPI_KEYBOARD_BACKLIGHT_DEVICE "\\_SB.KBLT"
#define ACPI_KEYBOARD_BACKLIGHT_READ ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC"
#define ACPI_KEYBOARD_BACKLIGHT_WRITE ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM"
-#define ACPI_KEYBOARD_BACKLIGHT_MAX 100
-
-static void keyboard_led_set_brightness(struct led_classdev *cdev,
- enum led_brightness brightness)
+static void keyboard_led_set_brightness_acpi(struct led_classdev *cdev,
+ enum led_brightness brightness)
{
union acpi_object param;
struct acpi_object_list input;
@@ -40,7 +76,7 @@ static void keyboard_led_set_brightness(struct led_classdev *cdev,
}
static enum led_brightness
-keyboard_led_get_brightness(struct led_classdev *cdev)
+keyboard_led_get_brightness_acpi(struct led_classdev *cdev)
{
unsigned long long brightness;
acpi_status status;
@@ -56,12 +92,10 @@ keyboard_led_get_brightness(struct led_classdev *cdev)
return brightness;
}
-static int keyboard_led_probe(struct platform_device *pdev)
+static int keyboard_led_init_acpi(struct platform_device *pdev)
{
- struct led_classdev *cdev;
acpi_handle handle;
acpi_status status;
- int error;
/* Look for the keyboard LED ACPI Device */
status = acpi_get_handle(ACPI_ROOT_OBJECT,
@@ -73,33 +107,151 @@ static int keyboard_led_probe(struct platform_device *pdev)
return -ENXIO;
}
- cdev = devm_kzalloc(&pdev->dev, sizeof(*cdev), GFP_KERNEL);
- if (!cdev)
+ return 0;
+}
+
+static const struct keyboard_led_drvdata keyboard_led_drvdata_acpi = {
+ .init = keyboard_led_init_acpi,
+ .brightness_set = keyboard_led_set_brightness_acpi,
+ .brightness_get = keyboard_led_get_brightness_acpi,
+ .max_brightness = KEYBOARD_BACKLIGHT_MAX,
+};
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_CROS_EC)
+
+static int
+keyboard_led_set_brightness_ec_pwm(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_pwm_set_keyboard_backlight params;
+ } __packed buf;
+ struct ec_params_pwm_set_keyboard_backlight *params = &buf.params;
+ struct cros_ec_command *msg = &buf.msg;
+ struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->command = EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT;
+ msg->outsize = sizeof(*params);
+
+ params->percent = brightness;
+
+ return cros_ec_cmd_xfer_status(keyboard_led->ec, msg);
+}
+
+static enum led_brightness
+keyboard_led_get_brightness_ec_pwm(struct led_classdev *cdev)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_pwm_get_keyboard_backlight resp;
+ } __packed buf;
+ struct ec_response_pwm_get_keyboard_backlight *resp = &buf.resp;
+ struct cros_ec_command *msg = &buf.msg;
+ struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
+ int ret;
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->command = EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT;
+ msg->insize = sizeof(*resp);
+
+ ret = cros_ec_cmd_xfer_status(keyboard_led->ec, msg);
+ if (ret < 0)
+ return ret;
+
+ return resp->percent;
+}
+
+static int keyboard_led_init_ec_pwm(struct platform_device *pdev)
+{
+ struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
+
+ keyboard_led->ec = dev_get_drvdata(pdev->dev.parent);
+ if (!keyboard_led->ec) {
+ dev_err(&pdev->dev, "no parent EC device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {
+ .init = keyboard_led_init_ec_pwm,
+ .brightness_set_blocking = keyboard_led_set_brightness_ec_pwm,
+ .brightness_get = keyboard_led_get_brightness_ec_pwm,
+ .max_brightness = KEYBOARD_BACKLIGHT_MAX,
+};
+
+#else /* IS_ENABLED(CONFIG_CROS_EC) */
+
+static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {};
+
+#endif /* IS_ENABLED(CONFIG_CROS_EC) */
+
+static int keyboard_led_probe(struct platform_device *pdev)
+{
+ const struct keyboard_led_drvdata *drvdata;
+ struct keyboard_led *keyboard_led;
+ int error;
+
+ drvdata = device_get_match_data(&pdev->dev);
+ if (!drvdata)
+ return -EINVAL;
+
+ keyboard_led = devm_kzalloc(&pdev->dev, sizeof(*keyboard_led), GFP_KERNEL);
+ if (!keyboard_led)
return -ENOMEM;
+ platform_set_drvdata(pdev, keyboard_led);
+
+ if (drvdata->init) {
+ error = drvdata->init(pdev);
+ if (error)
+ return error;
+ }
- cdev->name = "chromeos::kbd_backlight";
- cdev->max_brightness = ACPI_KEYBOARD_BACKLIGHT_MAX;
- cdev->flags |= LED_CORE_SUSPENDRESUME;
- cdev->brightness_set = keyboard_led_set_brightness;
- cdev->brightness_get = keyboard_led_get_brightness;
+ keyboard_led->cdev.name = "chromeos::kbd_backlight";
+ keyboard_led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ keyboard_led->cdev.max_brightness = drvdata->max_brightness;
+ keyboard_led->cdev.brightness_set = drvdata->brightness_set;
+ keyboard_led->cdev.brightness_set_blocking = drvdata->brightness_set_blocking;
+ keyboard_led->cdev.brightness_get = drvdata->brightness_get;
- error = devm_led_classdev_register(&pdev->dev, cdev);
+ error = devm_led_classdev_register(&pdev->dev, &keyboard_led->cdev);
if (error)
return error;
return 0;
}
-static const struct acpi_device_id keyboard_led_id[] = {
- { "GOOG0002", 0 },
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id keyboard_led_acpi_match[] = {
+ { "GOOG0002", (kernel_ulong_t)&keyboard_led_drvdata_acpi },
{ }
};
-MODULE_DEVICE_TABLE(acpi, keyboard_led_id);
+MODULE_DEVICE_TABLE(acpi, keyboard_led_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id keyboard_led_of_match[] = {
+ {
+ .compatible = "google,cros-kbd-led-backlight",
+ .data = &keyboard_led_drvdata_ec_pwm,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, keyboard_led_of_match);
+#endif
static struct platform_driver keyboard_led_driver = {
.driver = {
.name = "chromeos-keyboard-leds",
- .acpi_match_table = ACPI_PTR(keyboard_led_id),
+ .acpi_match_table = ACPI_PTR(keyboard_led_acpi_match),
+ .of_match_table = of_match_ptr(keyboard_led_of_match),
},
.probe = keyboard_led_probe,
};
diff --git a/drivers/platform/chrome/cros_kunit_util.c b/drivers/platform/chrome/cros_kunit_util.c
new file mode 100644
index 000000000000..f0fda96b11bd
--- /dev/null
+++ b/drivers/platform/chrome/cros_kunit_util.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CrOS Kunit tests utilities.
+ */
+
+#include <kunit/test.h>
+
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include "cros_ec.h"
+#include "cros_kunit_util.h"
+
+int cros_kunit_ec_xfer_mock_default_result;
+int cros_kunit_ec_xfer_mock_default_ret;
+int cros_kunit_ec_cmd_xfer_mock_called;
+int cros_kunit_ec_pkt_xfer_mock_called;
+
+static struct list_head cros_kunit_ec_xfer_mock_in;
+static struct list_head cros_kunit_ec_xfer_mock_out;
+
+int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_in, struct ec_xfer_mock, list);
+ if (!mock) {
+ msg->result = cros_kunit_ec_xfer_mock_default_result;
+ return cros_kunit_ec_xfer_mock_default_ret;
+ }
+
+ list_del(&mock->list);
+
+ memcpy(&mock->msg, msg, sizeof(*msg));
+ if (msg->outsize) {
+ mock->i_data = kunit_kzalloc(mock->test, msg->outsize, GFP_KERNEL);
+ if (mock->i_data)
+ memcpy(mock->i_data, msg->data, msg->outsize);
+ }
+
+ msg->result = mock->result;
+ if (msg->insize)
+ memcpy(msg->data, mock->o_data, min(msg->insize, mock->o_data_len));
+
+ list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_out);
+
+ return mock->ret;
+}
+
+int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ ++cros_kunit_ec_cmd_xfer_mock_called;
+ return cros_kunit_ec_xfer_mock(ec_dev, msg);
+}
+
+int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ ++cros_kunit_ec_pkt_xfer_mock_called;
+ return cros_kunit_ec_xfer_mock(ec_dev, msg);
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size)
+{
+ return cros_kunit_ec_xfer_mock_addx(test, size, EC_RES_SUCCESS, size);
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test,
+ int ret, int result, size_t size)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ if (!mock)
+ return NULL;
+
+ list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_in);
+ mock->test = test;
+
+ mock->ret = ret;
+ mock->result = result;
+ mock->o_data = kunit_kzalloc(test, size, GFP_KERNEL);
+ if (!mock->o_data)
+ return NULL;
+ mock->o_data_len = size;
+
+ return mock;
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_out, struct ec_xfer_mock, list);
+ if (mock)
+ list_del(&mock->list);
+
+ return mock;
+}
+
+int cros_kunit_readmem_mock_offset;
+u8 *cros_kunit_readmem_mock_data;
+int cros_kunit_readmem_mock_ret;
+
+int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset,
+ unsigned int bytes, void *dest)
+{
+ cros_kunit_readmem_mock_offset = offset;
+
+ memcpy(dest, cros_kunit_readmem_mock_data, bytes);
+
+ return cros_kunit_readmem_mock_ret;
+}
+
+void cros_kunit_mock_reset(void)
+{
+ cros_kunit_ec_xfer_mock_default_result = 0;
+ cros_kunit_ec_xfer_mock_default_ret = 0;
+ cros_kunit_ec_cmd_xfer_mock_called = 0;
+ cros_kunit_ec_pkt_xfer_mock_called = 0;
+ INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_in);
+ INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_out);
+
+ cros_kunit_readmem_mock_offset = 0;
+ cros_kunit_readmem_mock_data = NULL;
+ cros_kunit_readmem_mock_ret = 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_kunit_util.h b/drivers/platform/chrome/cros_kunit_util.h
new file mode 100644
index 000000000000..414002271c9c
--- /dev/null
+++ b/drivers/platform/chrome/cros_kunit_util.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CrOS Kunit tests utilities.
+ */
+
+#ifndef _CROS_KUNIT_UTIL_H_
+#define _CROS_KUNIT_UTIL_H_
+
+#include <linux/platform_data/cros_ec_proto.h>
+
+struct ec_xfer_mock {
+ struct list_head list;
+ struct kunit *test;
+
+ /* input */
+ struct cros_ec_command msg;
+ void *i_data;
+
+ /* output */
+ int ret;
+ int result;
+ void *o_data;
+ u32 o_data_len;
+};
+
+extern int cros_kunit_ec_xfer_mock_default_result;
+extern int cros_kunit_ec_xfer_mock_default_ret;
+extern int cros_kunit_ec_cmd_xfer_mock_called;
+extern int cros_kunit_ec_pkt_xfer_mock_called;
+
+int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test,
+ int ret, int result, size_t size);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void);
+
+extern int cros_kunit_readmem_mock_offset;
+extern u8 *cros_kunit_readmem_mock_data;
+extern int cros_kunit_readmem_mock_ret;
+
+int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset,
+ unsigned int bytes, void *dest);
+
+void cros_kunit_mock_reset(void);
+
+#endif
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
index 91ce6be91aac..4b5a81c9dc6d 100644
--- a/drivers/platform/chrome/cros_usbpd_notify.c
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -71,8 +71,8 @@ static void cros_usbpd_get_event_and_notify(struct device *dev,
}
/* Check for PD host events on EC. */
- ret = cros_ec_command(ec_dev, 0, EC_CMD_PD_HOST_EVENT_STATUS,
- NULL, 0, &host_event_status, sizeof(host_event_status));
+ ret = cros_ec_cmd(ec_dev, 0, EC_CMD_PD_HOST_EVENT_STATUS,
+ NULL, 0, &host_event_status, sizeof(host_event_status));
if (ret < 0) {
dev_warn(dev, "Can't get host event status (err: %d)\n", ret);
goto send_notify;
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
index 814518509739..32e400590be5 100644
--- a/drivers/platform/chrome/wilco_ec/event.c
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -343,7 +343,7 @@ static __poll_t event_poll(struct file *filp, poll_table *wait)
*
* Removes the first event from the queue, places it in the passed buffer.
*
- * If there are no events in the the queue, then one of two things happens,
+ * If there are no events in the queue, then one of two things happens,
* depending on if the file was opened in nonblocking mode: If in nonblocking
* mode, then return -EAGAIN to say there's no data. If in blocking mode, then
* block until an event is available.
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 38800e86ed8a..1ae3c56b66b0 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -959,6 +959,8 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
goto error;
}
+ vq->num_max = vring->num;
+
vqs[i] = vq;
vring->vq = vq;
vq->priv = vring;
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index 2c2686d5c2fc..ddc08abf398c 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -31,6 +31,7 @@
* @group: sysfs attribute group;
* @groups: list of sysfs attribute group for hwmon registration;
* @regsize: size of a register value;
+ * @io_lock: user access locking;
*/
struct mlxreg_io_priv_data {
struct platform_device *pdev;
@@ -41,6 +42,7 @@ struct mlxreg_io_priv_data {
struct attribute_group group;
const struct attribute_group *groups[2];
int regsize;
+ struct mutex io_lock; /* Protects user access. */
};
static int
@@ -116,14 +118,19 @@ mlxreg_io_attr_show(struct device *dev, struct device_attribute *attr,
u32 regval = 0;
int ret;
+ mutex_lock(&priv->io_lock);
+
ret = mlxreg_io_get_reg(priv->pdata->regmap, data, 0, true,
priv->regsize, &regval);
if (ret)
goto access_error;
+ mutex_unlock(&priv->io_lock);
+
return sprintf(buf, "%u\n", regval);
access_error:
+ mutex_unlock(&priv->io_lock);
return ret;
}
@@ -145,6 +152,8 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ mutex_lock(&priv->io_lock);
+
ret = mlxreg_io_get_reg(priv->pdata->regmap, data, input_val, false,
priv->regsize, &regval);
if (ret)
@@ -154,9 +163,12 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
if (ret)
goto access_error;
+ mutex_unlock(&priv->io_lock);
+
return len;
access_error:
+ mutex_unlock(&priv->io_lock);
dev_err(&priv->pdev->dev, "Bus access error\n");
return ret;
}
@@ -246,16 +258,27 @@ static int mlxreg_io_probe(struct platform_device *pdev)
return PTR_ERR(priv->hwmon);
}
+ mutex_init(&priv->io_lock);
dev_set_drvdata(&pdev->dev, priv);
return 0;
}
+static int mlxreg_io_remove(struct platform_device *pdev)
+{
+ struct mlxreg_io_priv_data *priv = dev_get_drvdata(&pdev->dev);
+
+ mutex_destroy(&priv->io_lock);
+
+ return 0;
+}
+
static struct platform_driver mlxreg_io_driver = {
.driver = {
.name = "mlxreg-io",
},
.probe = mlxreg_io_probe,
+ .remove = mlxreg_io_remove,
};
module_platform_driver(mlxreg_io_driver);
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
index c897a2f15840..55834ccb4ac7 100644
--- a/drivers/platform/mellanox/mlxreg-lc.c
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -716,8 +716,12 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
switch (regval) {
case MLXREG_LC_SN4800_C16:
err = mlxreg_lc_sn4800_c16_config_init(mlxreg_lc, regmap, data);
- if (err)
+ if (err) {
+ dev_err(dev, "Failed to config client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
return err;
+ }
break;
default:
return -ENODEV;
@@ -730,8 +734,11 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
mlxreg_lc->mux = platform_device_register_resndata(dev, "i2c-mux-mlxcpld", data->hpdev.nr,
NULL, 0, mlxreg_lc->mux_data,
sizeof(*mlxreg_lc->mux_data));
- if (IS_ERR(mlxreg_lc->mux))
+ if (IS_ERR(mlxreg_lc->mux)) {
+ dev_err(dev, "Failed to create mux infra for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
return PTR_ERR(mlxreg_lc->mux);
+ }
/* Register IO access driver. */
if (mlxreg_lc->io_data) {
@@ -740,6 +747,9 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
platform_device_register_resndata(dev, "mlxreg-io", data->hpdev.nr, NULL, 0,
mlxreg_lc->io_data, sizeof(*mlxreg_lc->io_data));
if (IS_ERR(mlxreg_lc->io_regs)) {
+ dev_err(dev, "Failed to create regio for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
err = PTR_ERR(mlxreg_lc->io_regs);
goto fail_register_io;
}
@@ -753,6 +763,9 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
mlxreg_lc->led_data,
sizeof(*mlxreg_lc->led_data));
if (IS_ERR(mlxreg_lc->led)) {
+ dev_err(dev, "Failed to create LED objects for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
err = PTR_ERR(mlxreg_lc->led);
goto fail_register_led;
}
@@ -809,7 +822,8 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (!data->hpdev.adapter) {
dev_err(&pdev->dev, "Failed to get adapter for bus %d\n",
data->hpdev.nr);
- return -EFAULT;
+ err = -EFAULT;
+ goto i2c_get_adapter_fail;
}
/* Create device at the top of line card I2C tree.*/
@@ -818,32 +832,40 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (IS_ERR(data->hpdev.client)) {
dev_err(&pdev->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
-
- i2c_put_adapter(data->hpdev.adapter);
- data->hpdev.adapter = NULL;
- return PTR_ERR(data->hpdev.client);
+ err = PTR_ERR(data->hpdev.client);
+ goto i2c_new_device_fail;
}
regmap = devm_regmap_init_i2c(data->hpdev.client,
&mlxreg_lc_regmap_conf);
if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to create regmap for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
err = PTR_ERR(regmap);
- goto mlxreg_lc_probe_fail;
+ goto devm_regmap_init_i2c_fail;
}
/* Set default registers. */
for (i = 0; i < mlxreg_lc_regmap_conf.num_reg_defaults; i++) {
err = regmap_write(regmap, mlxreg_lc_regmap_default[i].reg,
mlxreg_lc_regmap_default[i].def);
- if (err)
- goto mlxreg_lc_probe_fail;
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set default regmap %d for client %s at bus %d at addr 0x%02x\n",
+ i, data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
+ goto regmap_write_fail;
+ }
}
/* Sync registers with hardware. */
regcache_mark_dirty(regmap);
err = regcache_sync(regmap);
- if (err)
- goto mlxreg_lc_probe_fail;
+ if (err) {
+ dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+ err = PTR_ERR(regmap);
+ goto regcache_sync_fail;
+ }
par_pdata = data->hpdev.brdinfo->platform_data;
mlxreg_lc->par_regmap = par_pdata->regmap;
@@ -854,12 +876,27 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
/* Configure line card. */
err = mlxreg_lc_config_init(mlxreg_lc, regmap, data);
if (err)
- goto mlxreg_lc_probe_fail;
+ goto mlxreg_lc_config_init_fail;
return err;
-mlxreg_lc_probe_fail:
+mlxreg_lc_config_init_fail:
+regcache_sync_fail:
+regmap_write_fail:
+devm_regmap_init_i2c_fail:
+ if (data->hpdev.client) {
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
+ }
+i2c_new_device_fail:
i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+i2c_get_adapter_fail:
+ /* Clear event notification callback and handle. */
+ if (data->notifier) {
+ data->notifier->user_handler = NULL;
+ data->notifier->handle = NULL;
+ }
return err;
}
@@ -868,11 +905,18 @@ static int mlxreg_lc_remove(struct platform_device *pdev)
struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
- /* Clear event notification callback. */
- if (data->notifier) {
- data->notifier->user_handler = NULL;
- data->notifier->handle = NULL;
- }
+ /*
+ * Probing and removing are invoked by hotplug events raised upon line card insertion and
+ * removing. If probing procedure fails all data is cleared. However, hotplug event still
+ * will be raised on line card removing and activate removing procedure. In this case there
+ * is nothing to remove.
+ */
+ if (!data->notifier || !data->notifier->handle)
+ return 0;
+
+ /* Clear event notification callback and handle. */
+ data->notifier->user_handler = NULL;
+ data->notifier->handle = NULL;
/* Destroy static I2C device feeding by main power. */
mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 4ff5c3a12991..921520475ff6 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -264,7 +264,7 @@ static ssize_t ec_dbgfs_cmd_write(struct file *file, const char __user *buf,
int i, m;
unsigned char ec_cmd[EC_MAX_CMD_ARGS];
unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
- char cmdbuf[64];
+ char cmdbuf[64] = "";
int ec_cmd_bytes;
mutex_lock(&ec_dbgfs_lock);
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index eb79fbed8059..b629e82af97c 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -72,18 +72,45 @@ config SURFACE_AGGREGATOR_CDEV
The provided interface is intended for debugging and development only,
and should not be used otherwise.
+config SURFACE_AGGREGATOR_HUB
+ tristate "Surface System Aggregator Module Subsystem Device Hubs"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ help
+ Device-hub drivers for Surface System Aggregator Module (SSAM) subsystem
+ devices.
+
+ Provides subsystem hub drivers which manage client devices on various
+ SSAM subsystems. In some subsystems, notably the BAS subsystem managing
+ devices contained in the base of the Surface Book 3 and the KIP subsystem
+ managing type-cover devices in the Surface Pro 8 and Surface Pro X,
+ devices can be (hot-)removed. Hub devices and drivers are required to
+ manage these subdevices.
+
+ Devices managed via these hubs are:
+ - Battery/AC devices (Surface Book 3).
+ - HID input devices (7th-generation and later models with detachable
+ input devices).
+
+ Select M (recommended) or Y here if you want support for the above
+ mentioned devices on the corresponding Surface models. Without this
+ module, the respective devices mentioned above will not be instantiated
+ and thus any functionality provided by them will be missing, even when
+ drivers for these devices are present. This module only provides the
+ respective subsystem hubs. Both drivers and device specification (e.g.
+ via the Surface Aggregator Registry) for these devices still need to be
+ selected via other options.
+
config SURFACE_AGGREGATOR_REGISTRY
tristate "Surface System Aggregator Module Device Registry"
depends on SURFACE_AGGREGATOR
depends on SURFACE_AGGREGATOR_BUS
help
- Device-registry and device-hubs for Surface System Aggregator Module
- (SSAM) devices.
+ Device-registry for Surface System Aggregator Module (SSAM) devices.
Provides a module and driver which act as a device-registry for SSAM
client devices that cannot be detected automatically, e.g. via ACPI.
- Such devices are instead provided via this registry and attached via
- device hubs, also provided in this module.
+ Such devices are instead provided and managed via this registry.
Devices provided via this registry are:
- Platform profile (performance-/cooling-mode) device (5th- and later
@@ -99,6 +126,29 @@ config SURFACE_AGGREGATOR_REGISTRY
the respective client devices. Drivers for these devices still need to
be selected via the other options.
+config SURFACE_AGGREGATOR_TABLET_SWITCH
+ tristate "Surface Aggregator Generic Tablet-Mode Switch Driver"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ depends on INPUT
+ help
+ Provides a tablet-mode switch input device on Microsoft Surface models
+ using the KIP subsystem for detachable keyboards (e.g. keyboard covers)
+ or the POS subsystem for device/screen posture changes.
+
+ The KIP subsystem is used on newer Surface generations to handle
+ detachable input peripherals, specifically the keyboard cover (containing
+ keyboard and touchpad) on the Surface Pro 8 and Surface Pro X. The POS
+ subsystem is used for device posture change notifications on the Surface
+ Laptop Studio. This module provides a driver to let user-space know when
+ the device should be considered in tablet-mode due to the keyboard cover
+ being detached or folded back (essentially signaling when the keyboard is
+ not available for input). It does so by creating a tablet-mode switch
+ input device, sending the standard SW_TABLET_MODE event on mode change.
+
+ Select M or Y here, if you want to provide tablet-mode switch input
+ events on the Surface Pro 8, Surface Pro X, and Surface Laptop Studio.
+
config SURFACE_DTX
tristate "Surface DTX (Detachment System) Driver"
depends on SURFACE_AGGREGATOR
diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
index 0fc9cd3e4dd9..53344330939b 100644
--- a/drivers/platform/surface/Makefile
+++ b/drivers/platform/surface/Makefile
@@ -9,7 +9,9 @@ obj-$(CONFIG_SURFACE_3_POWER_OPREGION) += surface3_power.o
obj-$(CONFIG_SURFACE_ACPI_NOTIFY) += surface_acpi_notify.o
obj-$(CONFIG_SURFACE_AGGREGATOR) += aggregator/
obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_HUB) += surface_aggregator_hub.o
obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_TABLET_SWITCH) += surface_aggregator_tabletsw.o
obj-$(CONFIG_SURFACE_DTX) += surface_dtx.o
obj-$(CONFIG_SURFACE_GPE) += surface_gpe.o
obj-$(CONFIG_SURFACE_HOTPLUG) += surface_hotplug.o
diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
index cab020324256..c114f9dd5fe1 100644
--- a/drivers/platform/surface/aggregator/Kconfig
+++ b/drivers/platform/surface/aggregator/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
-# Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+# Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
menuconfig SURFACE_AGGREGATOR
tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile
index c0d550eda5cd..fdf664a217f9 100644
--- a/drivers/platform/surface/aggregator/Makefile
+++ b/drivers/platform/surface/aggregator/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
-# Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+# Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
# For include/trace/define_trace.h to include trace.h
CFLAGS_core.o = -I$(src)
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
index abbbb5b08b07..de539938896e 100644
--- a/drivers/platform/surface/aggregator/bus.c
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -2,10 +2,11 @@
/*
* Surface System Aggregator Module bus and device integration.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/surface_aggregator/controller.h>
@@ -14,6 +15,9 @@
#include "bus.h"
#include "controller.h"
+
+/* -- Device and bus functions. --------------------------------------------- */
+
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -46,6 +50,7 @@ static void ssam_device_release(struct device *dev)
struct ssam_device *sdev = to_ssam_device(dev);
ssam_controller_put(sdev->ctrl);
+ fwnode_handle_put(sdev->dev.fwnode);
kfree(sdev);
}
@@ -363,6 +368,134 @@ void ssam_device_driver_unregister(struct ssam_device_driver *sdrv)
}
EXPORT_SYMBOL_GPL(ssam_device_driver_unregister);
+
+/* -- Bus registration. ----------------------------------------------------- */
+
+/**
+ * ssam_bus_register() - Register and set-up the SSAM client device bus.
+ */
+int ssam_bus_register(void)
+{
+ return bus_register(&ssam_bus_type);
+}
+
+/**
+ * ssam_bus_unregister() - Unregister the SSAM client device bus.
+ */
+void ssam_bus_unregister(void)
+{
+ return bus_unregister(&ssam_bus_type);
+}
+
+
+/* -- Helpers for controller and hub devices. ------------------------------- */
+
+static int ssam_device_uid_from_string(const char *str, struct ssam_device_uid *uid)
+{
+ u8 d, tc, tid, iid, fn;
+ int n;
+
+ n = sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
+ if (n != 5)
+ return -EINVAL;
+
+ uid->domain = d;
+ uid->category = tc;
+ uid->target = tid;
+ uid->instance = iid;
+ uid->function = fn;
+
+ return 0;
+}
+
+static int ssam_get_uid_for_node(struct fwnode_handle *node, struct ssam_device_uid *uid)
+{
+ const char *str = fwnode_get_name(node);
+
+ /*
+ * To simplify definitions of firmware nodes, we set the device name
+ * based on the UID of the device, prefixed with "ssam:".
+ */
+ if (strncmp(str, "ssam:", strlen("ssam:")) != 0)
+ return -ENODEV;
+
+ str += strlen("ssam:");
+ return ssam_device_uid_from_string(str, uid);
+}
+
+static int ssam_add_client_device(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct ssam_device_uid uid;
+ struct ssam_device *sdev;
+ int status;
+
+ status = ssam_get_uid_for_node(node, &uid);
+ if (status)
+ return status;
+
+ sdev = ssam_device_alloc(ctrl, uid);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->dev.parent = parent;
+ sdev->dev.fwnode = fwnode_handle_get(node);
+
+ status = ssam_device_add(sdev);
+ if (status)
+ ssam_device_put(sdev);
+
+ return status;
+}
+
+/**
+ * __ssam_register_clients() - Register client devices defined under the
+ * given firmware node as children of the given device.
+ * @parent: The parent device under which clients should be registered.
+ * @ctrl: The controller with which client should be registered.
+ * @node: The firmware node holding definitions of the devices to be added.
+ *
+ * Register all clients that have been defined as children of the given root
+ * firmware node as children of the given parent device. The respective child
+ * firmware nodes will be associated with the correspondingly created child
+ * devices.
+ *
+ * The given controller will be used to instantiate the new devices. See
+ * ssam_device_add() for details.
+ *
+ * Note that, generally, the use of either ssam_device_register_clients() or
+ * ssam_register_clients() should be preferred as they directly use the
+ * firmware node and/or controller associated with the given device. This
+ * function is only intended for use when different device specifications (e.g.
+ * ACPI and firmware nodes) need to be combined (as is done in the platform hub
+ * of the device registry).
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct fwnode_handle *child;
+ int status;
+
+ fwnode_for_each_child_node(node, child) {
+ /*
+ * Try to add the device specified in the firmware node. If
+ * this fails with -ENODEV, the node does not specify any SSAM
+ * device, so ignore it and continue with the next one.
+ */
+ status = ssam_add_client_device(parent, ctrl, child);
+ if (status && status != -ENODEV)
+ goto err;
+ }
+
+ return 0;
+err:
+ ssam_remove_clients(parent);
+ return status;
+}
+EXPORT_SYMBOL_GPL(__ssam_register_clients);
+
static int ssam_remove_device(struct device *dev, void *_data)
{
struct ssam_device *sdev = to_ssam_device(dev);
@@ -387,19 +520,3 @@ void ssam_remove_clients(struct device *dev)
device_for_each_child_reverse(dev, NULL, ssam_remove_device);
}
EXPORT_SYMBOL_GPL(ssam_remove_clients);
-
-/**
- * ssam_bus_register() - Register and set-up the SSAM client device bus.
- */
-int ssam_bus_register(void)
-{
- return bus_register(&ssam_bus_type);
-}
-
-/**
- * ssam_bus_unregister() - Unregister the SSAM client device bus.
- */
-void ssam_bus_unregister(void)
-{
- return bus_unregister(&ssam_bus_type);
-}
diff --git a/drivers/platform/surface/aggregator/bus.h b/drivers/platform/surface/aggregator/bus.h
index 6964ee84e79c..5b4dbf21906c 100644
--- a/drivers/platform/surface/aggregator/bus.h
+++ b/drivers/platform/surface/aggregator/bus.h
@@ -2,7 +2,7 @@
/*
* Surface System Aggregator Module bus and device integration.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_BUS_H
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index b8c377b3f932..43e765199137 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -2,7 +2,7 @@
/*
* Main SSAM/SSH controller structure and functionality.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
@@ -2199,16 +2199,26 @@ static int ssam_nf_refcount_enable(struct ssam_controller *ctrl,
}
/**
- * ssam_nf_refcount_disable_free() - Disable event for reference count entry if it is
- * no longer in use and free the corresponding entry.
+ * ssam_nf_refcount_disable_free() - Disable event for reference count entry if
+ * it is no longer in use and free the corresponding entry.
* @ctrl: The controller to disable the event on.
* @entry: The reference count entry for the event to be disabled.
* @flags: The flags used for enabling the event on the EC.
+ * @ec: Flag specifying if the event should actually be disabled on the EC.
*
- * If the reference count equals zero, i.e. the event is no longer requested by
- * any client, the event will be disabled and the corresponding reference count
- * entry freed. The reference count entry must not be used any more after a
- * call to this function.
+ * If ``ec`` equals ``true`` and the reference count equals zero (i.e. the
+ * event is no longer requested by any client), the specified event will be
+ * disabled on the EC via the corresponding request.
+ *
+ * If ``ec`` equals ``false``, no request will be sent to the EC and the event
+ * can be considered in a detached state (i.e. no longer used but still
+ * enabled). Disabling an event via this method may be required for
+ * hot-removable devices, where event disable requests may time out after the
+ * device has been physically removed.
+ *
+ * In both cases, if the reference count equals zero, the corresponding
+ * reference count entry will be freed. The reference count entry must not be
+ * used any more after a call to this function.
*
* Also checks if the flags used for disabling the event match the flags used
* for enabling the event and warns if they do not (regardless of reference
@@ -2223,7 +2233,7 @@ static int ssam_nf_refcount_enable(struct ssam_controller *ctrl,
* returns the status of the event-enable EC command.
*/
static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
- struct ssam_nf_refcount_entry *entry, u8 flags)
+ struct ssam_nf_refcount_entry *entry, u8 flags, bool ec)
{
const struct ssam_event_registry reg = entry->key.reg;
const struct ssam_event_id id = entry->key.id;
@@ -2232,8 +2242,9 @@ static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
lockdep_assert_held(&nf->lock);
- ssam_dbg(ctrl, "disabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
- reg.target_category, id.target_category, id.instance, entry->refcount);
+ ssam_dbg(ctrl, "%s event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
+ ec ? "disabling" : "detaching", reg.target_category, id.target_category,
+ id.instance, entry->refcount);
if (entry->flags != flags) {
ssam_warn(ctrl,
@@ -2242,7 +2253,7 @@ static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
id.instance);
}
- if (entry->refcount == 0) {
+ if (ec && entry->refcount == 0) {
status = ssam_ssh_event_disable(ctrl, reg, id, flags);
kfree(entry);
}
@@ -2322,20 +2333,26 @@ int ssam_notifier_register(struct ssam_controller *ctrl, struct ssam_event_notif
EXPORT_SYMBOL_GPL(ssam_notifier_register);
/**
- * ssam_notifier_unregister() - Unregister an event notifier.
- * @ctrl: The controller the notifier has been registered on.
- * @n: The event notifier to unregister.
+ * __ssam_notifier_unregister() - Unregister an event notifier.
+ * @ctrl: The controller the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ * @disable: Whether to disable the corresponding event on the EC.
*
* Unregister an event notifier. Decrement the usage counter of the associated
* SAM event if the notifier is not marked as an observer. If the usage counter
- * reaches zero, the event will be disabled.
+ * reaches zero and ``disable`` equals ``true``, the event will be disabled.
+ *
+ * Useful for hot-removable devices, where communication may fail once the
+ * device has been physically removed. In that case, specifying ``disable`` as
+ * ``false`` avoids communication with the EC.
*
* Return: Returns zero on success, %-ENOENT if the given notifier block has
* not been registered on the controller. If the given notifier block was the
* last one associated with its specific event, returns the status of the
* event-disable EC-command.
*/
-int ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n)
+int __ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n,
+ bool disable)
{
u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
struct ssam_nf_refcount_entry *entry;
@@ -2373,7 +2390,7 @@ int ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_not
goto remove;
}
- status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags);
+ status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags, disable);
}
remove:
@@ -2383,7 +2400,7 @@ remove:
return status;
}
-EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
+EXPORT_SYMBOL_GPL(__ssam_notifier_unregister);
/**
* ssam_controller_event_enable() - Enable the specified event.
@@ -2477,7 +2494,7 @@ int ssam_controller_event_disable(struct ssam_controller *ctrl,
return -ENOENT;
}
- status = ssam_nf_refcount_disable_free(ctrl, entry, flags);
+ status = ssam_nf_refcount_disable_free(ctrl, entry, flags, true);
mutex_unlock(&nf->lock);
return status;
diff --git a/drivers/platform/surface/aggregator/controller.h b/drivers/platform/surface/aggregator/controller.h
index a0963c3562ff..f0d987abc51e 100644
--- a/drivers/platform/surface/aggregator/controller.h
+++ b/drivers/platform/surface/aggregator/controller.h
@@ -2,7 +2,7 @@
/*
* Main SSAM/SSH controller structure and functionality.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_CONTROLLER_H
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index a62c5dfe42d6..1a6373dea109 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -7,7 +7,7 @@
* Handles communication via requests as well as enabling, disabling, and
* relaying of events.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
diff --git a/drivers/platform/surface/aggregator/ssh_msgb.h b/drivers/platform/surface/aggregator/ssh_msgb.h
index e562958ffdf0..f3ecad92eefd 100644
--- a/drivers/platform/surface/aggregator/ssh_msgb.h
+++ b/drivers/platform/surface/aggregator/ssh_msgb.h
@@ -2,7 +2,7 @@
/*
* SSH message builder functions.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
index 8a4451c1ffe5..6748fe4ac5d5 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
@@ -2,7 +2,7 @@
/*
* SSH packet transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.h b/drivers/platform/surface/aggregator/ssh_packet_layer.h
index 2eb329f0b91a..64633522f971 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.h
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.h
@@ -2,7 +2,7 @@
/*
* SSH packet transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
diff --git a/drivers/platform/surface/aggregator/ssh_parser.c b/drivers/platform/surface/aggregator/ssh_parser.c
index b77912f8f13b..a6f668694365 100644
--- a/drivers/platform/surface/aggregator/ssh_parser.c
+++ b/drivers/platform/surface/aggregator/ssh_parser.c
@@ -2,7 +2,7 @@
/*
* SSH message parser.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/surface/aggregator/ssh_parser.h b/drivers/platform/surface/aggregator/ssh_parser.h
index 3bd6e180fd16..801d8fa69fb5 100644
--- a/drivers/platform/surface/aggregator/ssh_parser.h
+++ b/drivers/platform/surface/aggregator/ssh_parser.h
@@ -2,7 +2,7 @@
/*
* SSH message parser.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_PARSER_H
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
index 790f7f0eee98..f5565570f16c 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
@@ -2,7 +2,7 @@
/*
* SSH request transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.h b/drivers/platform/surface/aggregator/ssh_request_layer.h
index 9c3cbae2d4bd..4e387a031351 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.h
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.h
@@ -2,7 +2,7 @@
/*
* SSH request transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h
index de64cf169060..2a2c17771d01 100644
--- a/drivers/platform/surface/aggregator/trace.h
+++ b/drivers/platform/surface/aggregator/trace.h
@@ -2,7 +2,7 @@
/*
* Trace points for SSAM/SSH.
*
- * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#undef TRACE_SYSTEM
@@ -76,7 +76,7 @@ TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
-TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC0);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
@@ -85,6 +85,11 @@ TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SPT);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SYS);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC1);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SHB);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_POS);
#define SSAM_PTR_UID_LEN 9
#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1)
@@ -229,40 +234,45 @@ static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
#define ssam_show_ssh_tc(rqid) \
__print_symbolic(rqid, \
- { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
- { SSAM_SSH_TC_SAM, "SAM" }, \
- { SSAM_SSH_TC_BAT, "BAT" }, \
- { SSAM_SSH_TC_TMP, "TMP" }, \
- { SSAM_SSH_TC_PMC, "PMC" }, \
- { SSAM_SSH_TC_FAN, "FAN" }, \
- { SSAM_SSH_TC_PoM, "PoM" }, \
- { SSAM_SSH_TC_DBG, "DBG" }, \
- { SSAM_SSH_TC_KBD, "KBD" }, \
- { SSAM_SSH_TC_FWU, "FWU" }, \
- { SSAM_SSH_TC_UNI, "UNI" }, \
- { SSAM_SSH_TC_LPC, "LPC" }, \
- { SSAM_SSH_TC_TCL, "TCL" }, \
- { SSAM_SSH_TC_SFL, "SFL" }, \
- { SSAM_SSH_TC_KIP, "KIP" }, \
- { SSAM_SSH_TC_EXT, "EXT" }, \
- { SSAM_SSH_TC_BLD, "BLD" }, \
- { SSAM_SSH_TC_BAS, "BAS" }, \
- { SSAM_SSH_TC_SEN, "SEN" }, \
- { SSAM_SSH_TC_SRQ, "SRQ" }, \
- { SSAM_SSH_TC_MCU, "MCU" }, \
- { SSAM_SSH_TC_HID, "HID" }, \
- { SSAM_SSH_TC_TCH, "TCH" }, \
- { SSAM_SSH_TC_BKL, "BKL" }, \
- { SSAM_SSH_TC_TAM, "TAM" }, \
- { SSAM_SSH_TC_ACC, "ACC" }, \
- { SSAM_SSH_TC_UFI, "UFI" }, \
- { SSAM_SSH_TC_USC, "USC" }, \
- { SSAM_SSH_TC_PEN, "PEN" }, \
- { SSAM_SSH_TC_VID, "VID" }, \
- { SSAM_SSH_TC_AUD, "AUD" }, \
- { SSAM_SSH_TC_SMC, "SMC" }, \
- { SSAM_SSH_TC_KPD, "KPD" }, \
- { SSAM_SSH_TC_REG, "REG" } \
+ { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
+ { SSAM_SSH_TC_SAM, "SAM" }, \
+ { SSAM_SSH_TC_BAT, "BAT" }, \
+ { SSAM_SSH_TC_TMP, "TMP" }, \
+ { SSAM_SSH_TC_PMC, "PMC" }, \
+ { SSAM_SSH_TC_FAN, "FAN" }, \
+ { SSAM_SSH_TC_PoM, "PoM" }, \
+ { SSAM_SSH_TC_DBG, "DBG" }, \
+ { SSAM_SSH_TC_KBD, "KBD" }, \
+ { SSAM_SSH_TC_FWU, "FWU" }, \
+ { SSAM_SSH_TC_UNI, "UNI" }, \
+ { SSAM_SSH_TC_LPC, "LPC" }, \
+ { SSAM_SSH_TC_TCL, "TCL" }, \
+ { SSAM_SSH_TC_SFL, "SFL" }, \
+ { SSAM_SSH_TC_KIP, "KIP" }, \
+ { SSAM_SSH_TC_EXT, "EXT" }, \
+ { SSAM_SSH_TC_BLD, "BLD" }, \
+ { SSAM_SSH_TC_BAS, "BAS" }, \
+ { SSAM_SSH_TC_SEN, "SEN" }, \
+ { SSAM_SSH_TC_SRQ, "SRQ" }, \
+ { SSAM_SSH_TC_MCU, "MCU" }, \
+ { SSAM_SSH_TC_HID, "HID" }, \
+ { SSAM_SSH_TC_TCH, "TCH" }, \
+ { SSAM_SSH_TC_BKL, "BKL" }, \
+ { SSAM_SSH_TC_TAM, "TAM" }, \
+ { SSAM_SSH_TC_ACC0, "ACC0" }, \
+ { SSAM_SSH_TC_UFI, "UFI" }, \
+ { SSAM_SSH_TC_USC, "USC" }, \
+ { SSAM_SSH_TC_PEN, "PEN" }, \
+ { SSAM_SSH_TC_VID, "VID" }, \
+ { SSAM_SSH_TC_AUD, "AUD" }, \
+ { SSAM_SSH_TC_SMC, "SMC" }, \
+ { SSAM_SSH_TC_KPD, "KPD" }, \
+ { SSAM_SSH_TC_REG, "REG" }, \
+ { SSAM_SSH_TC_SPT, "SPT" }, \
+ { SSAM_SSH_TC_SYS, "SYS" }, \
+ { SSAM_SSH_TC_ACC1, "ACC1" }, \
+ { SSAM_SSH_TC_SHB, "SMB" }, \
+ { SSAM_SSH_TC_POS, "POS" } \
)
DECLARE_EVENT_CLASS(ssam_frame_class,
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index 7b758f8cc137..44e317970557 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -8,7 +8,7 @@
* notifications sent from ACPI via the SAN interface by providing them to any
* registered external driver.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
@@ -37,6 +37,7 @@ struct san_data {
#define to_san_data(ptr, member) \
container_of(ptr, struct san_data, member)
+static struct workqueue_struct *san_wq;
/* -- dGPU notifier interface. ---------------------------------------------- */
@@ -356,7 +357,7 @@ static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
- schedule_delayed_work(&work->work, delay);
+ queue_delayed_work(san_wq, &work->work, delay);
return SSAM_NOTIF_HANDLED;
}
@@ -861,7 +862,7 @@ static int san_remove(struct platform_device *pdev)
* We have unregistered our event sources. Now we need to ensure that
* all delayed works they may have spawned are run to completion.
*/
- flush_scheduled_work();
+ flush_workqueue(san_wq);
return 0;
}
@@ -881,7 +882,27 @@ static struct platform_driver surface_acpi_notify = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
-module_platform_driver(surface_acpi_notify);
+
+static int __init san_init(void)
+{
+ int ret;
+
+ san_wq = alloc_workqueue("san_wq", 0, 0);
+ if (!san_wq)
+ return -ENOMEM;
+ ret = platform_driver_register(&surface_acpi_notify);
+ if (ret)
+ destroy_workqueue(san_wq);
+ return ret;
+}
+module_init(san_init);
+
+static void __exit san_exit(void)
+{
+ platform_driver_unregister(&surface_acpi_notify);
+ destroy_workqueue(san_wq);
+}
+module_exit(san_exit);
MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
MODULE_DESCRIPTION("Surface ACPI Notify driver for Surface System Aggregator Module");
diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
index 30fb50fde450..492c82e69182 100644
--- a/drivers/platform/surface/surface_aggregator_cdev.c
+++ b/drivers/platform/surface/surface_aggregator_cdev.c
@@ -3,7 +3,7 @@
* Provides user-space access to the SSAM EC via the /dev/surface/aggregator
* misc device. Intended for debugging and development.
*
- * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/fs.h>
diff --git a/drivers/platform/surface/surface_aggregator_hub.c b/drivers/platform/surface/surface_aggregator_hub.c
new file mode 100644
index 000000000000..43061514be38
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_hub.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Surface System Aggregator Module (SSAM) subsystem device hubs.
+ *
+ * Provides a driver for SSAM subsystems device hubs. This driver performs
+ * instantiation of the devices managed by said hubs and takes care of
+ * (hot-)removal.
+ *
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- SSAM generic subsystem hub driver framework. -------------------------- */
+
+enum ssam_hub_state {
+ SSAM_HUB_UNINITIALIZED, /* Only set during initialization. */
+ SSAM_HUB_CONNECTED,
+ SSAM_HUB_DISCONNECTED,
+};
+
+enum ssam_hub_flags {
+ SSAM_HUB_HOT_REMOVED,
+};
+
+struct ssam_hub;
+
+struct ssam_hub_ops {
+ int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state);
+};
+
+struct ssam_hub {
+ struct ssam_device *sdev;
+
+ enum ssam_hub_state state;
+ unsigned long flags;
+
+ struct delayed_work update_work;
+ unsigned long connect_delay;
+
+ struct ssam_event_notifier notif;
+ struct ssam_hub_ops ops;
+};
+
+struct ssam_hub_desc {
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ } event;
+
+ struct {
+ u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event);
+ int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state);
+ } ops;
+
+ unsigned long connect_delay_ms;
+};
+
+static void ssam_hub_update_workfn(struct work_struct *work)
+{
+ struct ssam_hub *hub = container_of(work, struct ssam_hub, update_work.work);
+ enum ssam_hub_state state;
+ int status = 0;
+
+ status = hub->ops.get_state(hub, &state);
+ if (status)
+ return;
+
+ /*
+ * There is a small possibility that hub devices were hot-removed and
+ * re-added before we were able to remove them here. In that case, both
+ * the state returned by get_state() and the state of the hub will
+ * equal SSAM_HUB_CONNECTED and we would bail early below, which would
+ * leave child devices without proper (re-)initialization and the
+ * hot-remove flag set.
+ *
+ * Therefore, we check whether devices have been hot-removed via an
+ * additional flag on the hub and, in this case, override the returned
+ * hub state. In case of a missed disconnect (i.e. get_state returned
+ * "connected"), we further need to re-schedule this work (with the
+ * appropriate delay) as the actual connect work submission might have
+ * been merged with this one.
+ *
+ * This then leads to one of two cases: Either we submit an unnecessary
+ * work item (which will get ignored via either the queue or the state
+ * checks) or, in the unlikely case that the work is actually required,
+ * double the normal connect delay.
+ */
+ if (test_and_clear_bit(SSAM_HUB_HOT_REMOVED, &hub->flags)) {
+ if (state == SSAM_HUB_CONNECTED)
+ schedule_delayed_work(&hub->update_work, hub->connect_delay);
+
+ state = SSAM_HUB_DISCONNECTED;
+ }
+
+ if (hub->state == state)
+ return;
+ hub->state = state;
+
+ if (hub->state == SSAM_HUB_CONNECTED)
+ status = ssam_device_register_clients(hub->sdev);
+ else
+ ssam_remove_clients(&hub->sdev->dev);
+
+ if (status)
+ dev_err(&hub->sdev->dev, "failed to update hub child devices: %d\n", status);
+}
+
+static int ssam_hub_mark_hot_removed(struct device *dev, void *_data)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ if (is_ssam_device(dev))
+ ssam_device_mark_hot_removed(sdev);
+
+ return 0;
+}
+
+static void ssam_hub_update(struct ssam_hub *hub, bool connected)
+{
+ unsigned long delay;
+
+ /* Mark devices as hot-removed before we remove any. */
+ if (!connected) {
+ set_bit(SSAM_HUB_HOT_REMOVED, &hub->flags);
+ device_for_each_child_reverse(&hub->sdev->dev, NULL, ssam_hub_mark_hot_removed);
+ }
+
+ /*
+ * Delay update when the base/keyboard cover is being connected to give
+ * devices/EC some time to set up.
+ */
+ delay = connected ? hub->connect_delay : 0;
+
+ schedule_delayed_work(&hub->update_work, delay);
+}
+
+static int __maybe_unused ssam_hub_resume(struct device *dev)
+{
+ struct ssam_hub *hub = dev_get_drvdata(dev);
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_hub_pm_ops, NULL, ssam_hub_resume);
+
+static int ssam_hub_probe(struct ssam_device *sdev)
+{
+ const struct ssam_hub_desc *desc;
+ struct ssam_hub *hub;
+ int status;
+
+ desc = ssam_device_get_match_data(sdev);
+ if (!desc) {
+ WARN(1, "no driver match data specified");
+ return -EINVAL;
+ }
+
+ hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ hub->sdev = sdev;
+ hub->state = SSAM_HUB_UNINITIALIZED;
+
+ hub->notif.base.priority = INT_MAX; /* This notifier should run first. */
+ hub->notif.base.fn = desc->ops.notify;
+ hub->notif.event.reg = desc->event.reg;
+ hub->notif.event.id = desc->event.id;
+ hub->notif.event.mask = desc->event.mask;
+ hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ hub->connect_delay = msecs_to_jiffies(desc->connect_delay_ms);
+ hub->ops.get_state = desc->ops.get_state;
+
+ INIT_DELAYED_WORK(&hub->update_work, ssam_hub_update_workfn);
+
+ ssam_device_set_drvdata(sdev, hub);
+
+ status = ssam_device_notifier_register(sdev, &hub->notif);
+ if (status)
+ return status;
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+}
+
+static void ssam_hub_remove(struct ssam_device *sdev)
+{
+ struct ssam_hub *hub = ssam_device_get_drvdata(sdev);
+
+ ssam_device_notifier_unregister(sdev, &hub->notif);
+ cancel_delayed_work_sync(&hub->update_work);
+ ssam_remove_clients(&sdev->dev);
+}
+
+
+/* -- SSAM base-subsystem hub driver. --------------------------------------- */
+
+/*
+ * Some devices (especially battery) may need a bit of time to be fully usable
+ * after being (re-)connected. This delay has been determined via
+ * experimentation.
+ */
+#define SSAM_BASE_UPDATE_CONNECT_DELAY 2500
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+#define SSAM_BAS_OPMODE_TABLET 0x00
+#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
+
+static int ssam_base_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state)
+{
+ u8 opmode;
+ int status;
+
+ status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
+ if (status < 0) {
+ dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
+ return status;
+ }
+
+ if (opmode != SSAM_BAS_OPMODE_TABLET)
+ *state = SSAM_HUB_CONNECTED;
+ else
+ *state = SSAM_HUB_DISCONNECTED;
+
+ return 0;
+}
+
+static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif);
+
+ if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
+ return 0;
+
+ if (event->length < 1) {
+ dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+ return 0;
+ }
+
+ ssam_hub_update(hub, event->data[0]);
+
+ /*
+ * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
+ * consumed by the detachment system driver. We're just a (more or less)
+ * silent observer.
+ */
+ return 0;
+}
+
+static const struct ssam_hub_desc base_hub = {
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_BAS,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_NONE,
+ },
+ .ops = {
+ .notify = ssam_base_hub_notif,
+ .get_state = ssam_base_hub_query_state,
+ },
+ .connect_delay_ms = SSAM_BASE_UPDATE_CONNECT_DELAY,
+};
+
+
+/* -- SSAM KIP-subsystem hub driver. ---------------------------------------- */
+
+/*
+ * Some devices may need a bit of time to be fully usable after being
+ * (re-)connected. This delay has been determined via experimentation.
+ */
+#define SSAM_KIP_UPDATE_CONNECT_DELAY 250
+
+#define SSAM_EVENT_KIP_CID_CONNECTION 0x2c
+
+SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_query_state, u8, {
+ .target_category = SSAM_SSH_TC_KIP,
+ .target_id = 0x01,
+ .command_id = 0x2c,
+ .instance_id = 0x00,
+});
+
+static int ssam_kip_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state)
+{
+ int status;
+ u8 connected;
+
+ status = ssam_retry(__ssam_kip_query_state, hub->sdev->ctrl, &connected);
+ if (status < 0) {
+ dev_err(&hub->sdev->dev, "failed to query KIP connection state: %d\n", status);
+ return status;
+ }
+
+ *state = connected ? SSAM_HUB_CONNECTED : SSAM_HUB_DISCONNECTED;
+ return 0;
+}
+
+static u32 ssam_kip_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif);
+
+ if (event->command_id != SSAM_EVENT_KIP_CID_CONNECTION)
+ return 0; /* Return "unhandled". */
+
+ if (event->length < 1) {
+ dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+ return 0;
+ }
+
+ ssam_hub_update(hub, event->data[0]);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_hub_desc kip_hub = {
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_KIP,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+ .ops = {
+ .notify = ssam_kip_hub_notif,
+ .get_state = ssam_kip_hub_query_state,
+ },
+ .connect_delay_ms = SSAM_KIP_UPDATE_CONNECT_DELAY,
+};
+
+
+/* -- Driver registration. -------------------------------------------------- */
+
+static const struct ssam_device_id ssam_hub_match[] = {
+ { SSAM_VDEV(HUB, 0x01, SSAM_SSH_TC_KIP, 0x00), (unsigned long)&kip_hub },
+ { SSAM_VDEV(HUB, 0x02, SSAM_SSH_TC_BAS, 0x00), (unsigned long)&base_hub },
+ { }
+};
+MODULE_DEVICE_TABLE(ssam, ssam_hub_match);
+
+static struct ssam_device_driver ssam_subsystem_hub_driver = {
+ .probe = ssam_hub_probe,
+ .remove = ssam_hub_remove,
+ .match_table = ssam_hub_match,
+ .driver = {
+ .name = "surface_aggregator_subsystem_hub",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_hub_pm_ops,
+ },
+};
+module_ssam_device_driver(ssam_subsystem_hub_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Subsystem device hub driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index ce2bd88feeaa..d5655f6a4a41 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -6,19 +6,16 @@
* cannot be auto-detected. Provides device-hubs and performs instantiation
* for these devices.
*
- * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
-#include <linux/limits.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
-#include <linux/surface_aggregator/controller.h>
#include <linux/surface_aggregator/device.h>
@@ -41,9 +38,15 @@ static const struct software_node ssam_node_root = {
.name = "ssam_platform_hub",
};
+/* KIP device hub (connects keyboard cover devices on Surface Pro 8). */
+static const struct software_node ssam_node_hub_kip = {
+ .name = "ssam:00:00:01:0e:00",
+ .parent = &ssam_node_root,
+};
+
/* Base device hub (devices attached to Surface Book 3 base). */
static const struct software_node ssam_node_hub_base = {
- .name = "ssam:00:00:02:00:00",
+ .name = "ssam:00:00:02:11:00",
.parent = &ssam_node_root,
};
@@ -71,6 +74,12 @@ static const struct software_node ssam_node_tmp_pprof = {
.parent = &ssam_node_root,
};
+/* Tablet-mode switch via KIP subsystem. */
+static const struct software_node ssam_node_kip_tablet_switch = {
+ .name = "ssam:01:0e:01:00:01",
+ .parent = &ssam_node_root,
+};
+
/* DTX / detachment-system device (Surface Book 3). */
static const struct software_node ssam_node_bas_dtx = {
.name = "ssam:01:11:01:00:00",
@@ -155,6 +164,36 @@ static const struct software_node ssam_node_hid_base_iid6 = {
.parent = &ssam_node_hub_base,
};
+/* HID keyboard (KIP hub). */
+static const struct software_node ssam_node_hid_kip_keyboard = {
+ .name = "ssam:01:15:02:01:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID pen stash (KIP hub; pen taken / stashed away evens). */
+static const struct software_node ssam_node_hid_kip_penstash = {
+ .name = "ssam:01:15:02:02:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID touchpad (KIP hub). */
+static const struct software_node ssam_node_hid_kip_touchpad = {
+ .name = "ssam:01:15:02:03:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID device instance 5 (KIP hub, unknown HID device). */
+static const struct software_node ssam_node_hid_kip_iid5 = {
+ .name = "ssam:01:15:02:05:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* Tablet-mode switch via POS subsystem. */
+static const struct software_node ssam_node_pos_tablet_switch = {
+ .name = "ssam:01:26:01:00:01",
+ .parent = &ssam_node_root,
+};
+
/*
* Devices for 5th- and 6th-generations models:
* - Surface Book 2,
@@ -201,6 +240,7 @@ static const struct software_node *ssam_node_group_sls[] = {
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
+ &ssam_node_pos_tablet_switch,
&ssam_node_hid_tid1_keyboard,
&ssam_node_hid_tid1_penstash,
&ssam_node_hid_tid1_touchpad,
@@ -230,289 +270,18 @@ static const struct software_node *ssam_node_group_sp7[] = {
static const struct software_node *ssam_node_group_sp8[] = {
&ssam_node_root,
+ &ssam_node_hub_kip,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
- /* TODO: Add support for keyboard cover. */
- NULL,
-};
-
-
-/* -- Device registry helper functions. ------------------------------------- */
-
-static int ssam_uid_from_string(const char *str, struct ssam_device_uid *uid)
-{
- u8 d, tc, tid, iid, fn;
- int n;
-
- n = sscanf(str, "ssam:%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
- if (n != 5)
- return -EINVAL;
-
- uid->domain = d;
- uid->category = tc;
- uid->target = tid;
- uid->instance = iid;
- uid->function = fn;
-
- return 0;
-}
-
-static int ssam_hub_add_device(struct device *parent, struct ssam_controller *ctrl,
- struct fwnode_handle *node)
-{
- struct ssam_device_uid uid;
- struct ssam_device *sdev;
- int status;
-
- status = ssam_uid_from_string(fwnode_get_name(node), &uid);
- if (status)
- return status;
-
- sdev = ssam_device_alloc(ctrl, uid);
- if (!sdev)
- return -ENOMEM;
-
- sdev->dev.parent = parent;
- sdev->dev.fwnode = node;
-
- status = ssam_device_add(sdev);
- if (status)
- ssam_device_put(sdev);
-
- return status;
-}
-
-static int ssam_hub_register_clients(struct device *parent, struct ssam_controller *ctrl,
- struct fwnode_handle *node)
-{
- struct fwnode_handle *child;
- int status;
-
- fwnode_for_each_child_node(node, child) {
- /*
- * Try to add the device specified in the firmware node. If
- * this fails with -EINVAL, the node does not specify any SSAM
- * device, so ignore it and continue with the next one.
- */
-
- status = ssam_hub_add_device(parent, ctrl, child);
- if (status && status != -EINVAL)
- goto err;
- }
-
- return 0;
-err:
- ssam_remove_clients(parent);
- return status;
-}
-
-
-/* -- SSAM base-hub driver. ------------------------------------------------- */
-
-/*
- * Some devices (especially battery) may need a bit of time to be fully usable
- * after being (re-)connected. This delay has been determined via
- * experimentation.
- */
-#define SSAM_BASE_UPDATE_CONNECT_DELAY msecs_to_jiffies(2500)
-
-enum ssam_base_hub_state {
- SSAM_BASE_HUB_UNINITIALIZED,
- SSAM_BASE_HUB_CONNECTED,
- SSAM_BASE_HUB_DISCONNECTED,
-};
-
-struct ssam_base_hub {
- struct ssam_device *sdev;
-
- enum ssam_base_hub_state state;
- struct delayed_work update_work;
-
- struct ssam_event_notifier notif;
-};
-
-SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
- .target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
- .command_id = 0x0d,
- .instance_id = 0x00,
-});
-
-#define SSAM_BAS_OPMODE_TABLET 0x00
-#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
-
-static int ssam_base_hub_query_state(struct ssam_base_hub *hub, enum ssam_base_hub_state *state)
-{
- u8 opmode;
- int status;
-
- status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
- if (status < 0) {
- dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
- return status;
- }
-
- if (opmode != SSAM_BAS_OPMODE_TABLET)
- *state = SSAM_BASE_HUB_CONNECTED;
- else
- *state = SSAM_BASE_HUB_DISCONNECTED;
-
- return 0;
-}
-
-static ssize_t ssam_base_hub_state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ssam_base_hub *hub = dev_get_drvdata(dev);
- bool connected = hub->state == SSAM_BASE_HUB_CONNECTED;
-
- return sysfs_emit(buf, "%d\n", connected);
-}
-
-static struct device_attribute ssam_base_hub_attr_state =
- __ATTR(state, 0444, ssam_base_hub_state_show, NULL);
-
-static struct attribute *ssam_base_hub_attrs[] = {
- &ssam_base_hub_attr_state.attr,
+ &ssam_node_kip_tablet_switch,
+ &ssam_node_hid_kip_keyboard,
+ &ssam_node_hid_kip_penstash,
+ &ssam_node_hid_kip_touchpad,
+ &ssam_node_hid_kip_iid5,
NULL,
};
-static const struct attribute_group ssam_base_hub_group = {
- .attrs = ssam_base_hub_attrs,
-};
-
-static void ssam_base_hub_update_workfn(struct work_struct *work)
-{
- struct ssam_base_hub *hub = container_of(work, struct ssam_base_hub, update_work.work);
- struct fwnode_handle *node = dev_fwnode(&hub->sdev->dev);
- enum ssam_base_hub_state state;
- int status = 0;
-
- status = ssam_base_hub_query_state(hub, &state);
- if (status)
- return;
-
- if (hub->state == state)
- return;
- hub->state = state;
-
- if (hub->state == SSAM_BASE_HUB_CONNECTED)
- status = ssam_hub_register_clients(&hub->sdev->dev, hub->sdev->ctrl, node);
- else
- ssam_remove_clients(&hub->sdev->dev);
-
- if (status)
- dev_err(&hub->sdev->dev, "failed to update base-hub devices: %d\n", status);
-}
-
-static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
-{
- struct ssam_base_hub *hub = container_of(nf, struct ssam_base_hub, notif);
- unsigned long delay;
-
- if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
- return 0;
-
- if (event->length < 1) {
- dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
- return 0;
- }
-
- /*
- * Delay update when the base is being connected to give devices/EC
- * some time to set up.
- */
- delay = event->data[0] ? SSAM_BASE_UPDATE_CONNECT_DELAY : 0;
-
- schedule_delayed_work(&hub->update_work, delay);
-
- /*
- * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
- * consumed by the detachment system driver. We're just a (more or less)
- * silent observer.
- */
- return 0;
-}
-
-static int __maybe_unused ssam_base_hub_resume(struct device *dev)
-{
- struct ssam_base_hub *hub = dev_get_drvdata(dev);
-
- schedule_delayed_work(&hub->update_work, 0);
- return 0;
-}
-static SIMPLE_DEV_PM_OPS(ssam_base_hub_pm_ops, NULL, ssam_base_hub_resume);
-
-static int ssam_base_hub_probe(struct ssam_device *sdev)
-{
- struct ssam_base_hub *hub;
- int status;
-
- hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
- if (!hub)
- return -ENOMEM;
-
- hub->sdev = sdev;
- hub->state = SSAM_BASE_HUB_UNINITIALIZED;
-
- hub->notif.base.priority = INT_MAX; /* This notifier should run first. */
- hub->notif.base.fn = ssam_base_hub_notif;
- hub->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
- hub->notif.event.id.target_category = SSAM_SSH_TC_BAS,
- hub->notif.event.id.instance = 0,
- hub->notif.event.mask = SSAM_EVENT_MASK_NONE;
- hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
-
- INIT_DELAYED_WORK(&hub->update_work, ssam_base_hub_update_workfn);
-
- ssam_device_set_drvdata(sdev, hub);
-
- status = ssam_notifier_register(sdev->ctrl, &hub->notif);
- if (status)
- return status;
-
- status = sysfs_create_group(&sdev->dev.kobj, &ssam_base_hub_group);
- if (status)
- goto err;
-
- schedule_delayed_work(&hub->update_work, 0);
- return 0;
-
-err:
- ssam_notifier_unregister(sdev->ctrl, &hub->notif);
- cancel_delayed_work_sync(&hub->update_work);
- ssam_remove_clients(&sdev->dev);
- return status;
-}
-
-static void ssam_base_hub_remove(struct ssam_device *sdev)
-{
- struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
-
- sysfs_remove_group(&sdev->dev.kobj, &ssam_base_hub_group);
-
- ssam_notifier_unregister(sdev->ctrl, &hub->notif);
- cancel_delayed_work_sync(&hub->update_work);
- ssam_remove_clients(&sdev->dev);
-}
-
-static const struct ssam_device_id ssam_base_hub_match[] = {
- { SSAM_VDEV(HUB, 0x02, SSAM_ANY_IID, 0x00) },
- { },
-};
-
-static struct ssam_device_driver ssam_base_hub_driver = {
- .probe = ssam_base_hub_probe,
- .remove = ssam_base_hub_remove,
- .match_table = ssam_base_hub_match,
- .driver = {
- .name = "surface_aggregator_base_hub",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &ssam_base_hub_pm_ops,
- },
-};
-
/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
@@ -597,7 +366,7 @@ static int ssam_platform_hub_probe(struct platform_device *pdev)
set_secondary_fwnode(&pdev->dev, root);
- status = ssam_hub_register_clients(&pdev->dev, ctrl, root);
+ status = __ssam_register_clients(&pdev->dev, ctrl, root);
if (status) {
set_secondary_fwnode(&pdev->dev, NULL);
software_node_unregister_node_group(nodes);
@@ -626,32 +395,7 @@ static struct platform_driver ssam_platform_hub_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
-
-
-/* -- Module initialization. ------------------------------------------------ */
-
-static int __init ssam_device_hub_init(void)
-{
- int status;
-
- status = platform_driver_register(&ssam_platform_hub_driver);
- if (status)
- return status;
-
- status = ssam_device_driver_register(&ssam_base_hub_driver);
- if (status)
- platform_driver_unregister(&ssam_platform_hub_driver);
-
- return status;
-}
-module_init(ssam_device_hub_init);
-
-static void __exit ssam_device_hub_exit(void)
-{
- ssam_device_driver_unregister(&ssam_base_hub_driver);
- platform_driver_unregister(&ssam_platform_hub_driver);
-}
-module_exit(ssam_device_hub_exit);
+module_platform_driver(ssam_platform_hub_driver);
MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c
new file mode 100644
index 000000000000..27d95a6a7851
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_tabletsw.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) tablet mode switch driver.
+ *
+ * Copyright (C) 2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- SSAM generic tablet switch driver framework. -------------------------- */
+
+struct ssam_tablet_sw;
+
+struct ssam_tablet_sw_ops {
+ int (*get_state)(struct ssam_tablet_sw *sw, u32 *state);
+ const char *(*state_name)(struct ssam_tablet_sw *sw, u32 state);
+ bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, u32 state);
+};
+
+struct ssam_tablet_sw {
+ struct ssam_device *sdev;
+
+ u32 state;
+ struct work_struct update_work;
+ struct input_dev *mode_switch;
+
+ struct ssam_tablet_sw_ops ops;
+ struct ssam_event_notifier notif;
+};
+
+struct ssam_tablet_sw_desc {
+ struct {
+ const char *name;
+ const char *phys;
+ } dev;
+
+ struct {
+ u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event);
+ int (*get_state)(struct ssam_tablet_sw *sw, u32 *state);
+ const char *(*state_name)(struct ssam_tablet_sw *sw, u32 state);
+ bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, u32 state);
+ } ops;
+
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ u8 flags;
+ } event;
+};
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ssam_tablet_sw *sw = dev_get_drvdata(dev);
+ const char *state = sw->ops.state_name(sw, sw->state);
+
+ return sysfs_emit(buf, "%s\n", state);
+}
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *ssam_tablet_sw_attrs[] = {
+ &dev_attr_state.attr,
+ NULL,
+};
+
+static const struct attribute_group ssam_tablet_sw_group = {
+ .attrs = ssam_tablet_sw_attrs,
+};
+
+static void ssam_tablet_sw_update_workfn(struct work_struct *work)
+{
+ struct ssam_tablet_sw *sw = container_of(work, struct ssam_tablet_sw, update_work);
+ int tablet, status;
+ u32 state;
+
+ status = sw->ops.get_state(sw, &state);
+ if (status)
+ return;
+
+ if (sw->state == state)
+ return;
+ sw->state = state;
+
+ /* Send SW_TABLET_MODE event. */
+ tablet = sw->ops.state_is_tablet_mode(sw, state);
+ input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet);
+ input_sync(sw->mode_switch);
+}
+
+static int __maybe_unused ssam_tablet_sw_resume(struct device *dev)
+{
+ struct ssam_tablet_sw *sw = dev_get_drvdata(dev);
+
+ schedule_work(&sw->update_work);
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_tablet_sw_pm_ops, NULL, ssam_tablet_sw_resume);
+
+static int ssam_tablet_sw_probe(struct ssam_device *sdev)
+{
+ const struct ssam_tablet_sw_desc *desc;
+ struct ssam_tablet_sw *sw;
+ int tablet, status;
+
+ desc = ssam_device_get_match_data(sdev);
+ if (!desc) {
+ WARN(1, "no driver match data specified");
+ return -EINVAL;
+ }
+
+ sw = devm_kzalloc(&sdev->dev, sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return -ENOMEM;
+
+ sw->sdev = sdev;
+
+ sw->ops.get_state = desc->ops.get_state;
+ sw->ops.state_name = desc->ops.state_name;
+ sw->ops.state_is_tablet_mode = desc->ops.state_is_tablet_mode;
+
+ INIT_WORK(&sw->update_work, ssam_tablet_sw_update_workfn);
+
+ ssam_device_set_drvdata(sdev, sw);
+
+ /* Get initial state. */
+ status = sw->ops.get_state(sw, &sw->state);
+ if (status)
+ return status;
+
+ /* Set up tablet mode switch. */
+ sw->mode_switch = devm_input_allocate_device(&sdev->dev);
+ if (!sw->mode_switch)
+ return -ENOMEM;
+
+ sw->mode_switch->name = desc->dev.name;
+ sw->mode_switch->phys = desc->dev.phys;
+ sw->mode_switch->id.bustype = BUS_HOST;
+ sw->mode_switch->dev.parent = &sdev->dev;
+
+ tablet = sw->ops.state_is_tablet_mode(sw, sw->state);
+ input_set_capability(sw->mode_switch, EV_SW, SW_TABLET_MODE);
+ input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet);
+
+ status = input_register_device(sw->mode_switch);
+ if (status)
+ return status;
+
+ /* Set up notifier. */
+ sw->notif.base.priority = 0;
+ sw->notif.base.fn = desc->ops.notify;
+ sw->notif.event.reg = desc->event.reg;
+ sw->notif.event.id = desc->event.id;
+ sw->notif.event.mask = desc->event.mask;
+ sw->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_device_notifier_register(sdev, &sw->notif);
+ if (status)
+ return status;
+
+ status = sysfs_create_group(&sdev->dev.kobj, &ssam_tablet_sw_group);
+ if (status)
+ goto err;
+
+ /* We might have missed events during setup, so check again. */
+ schedule_work(&sw->update_work);
+ return 0;
+
+err:
+ ssam_device_notifier_unregister(sdev, &sw->notif);
+ cancel_work_sync(&sw->update_work);
+ return status;
+}
+
+static void ssam_tablet_sw_remove(struct ssam_device *sdev)
+{
+ struct ssam_tablet_sw *sw = ssam_device_get_drvdata(sdev);
+
+ sysfs_remove_group(&sdev->dev.kobj, &ssam_tablet_sw_group);
+
+ ssam_device_notifier_unregister(sdev, &sw->notif);
+ cancel_work_sync(&sw->update_work);
+}
+
+
+/* -- SSAM KIP tablet switch implementation. -------------------------------- */
+
+#define SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED 0x1d
+
+enum ssam_kip_cover_state {
+ SSAM_KIP_COVER_STATE_DISCONNECTED = 0x01,
+ SSAM_KIP_COVER_STATE_CLOSED = 0x02,
+ SSAM_KIP_COVER_STATE_LAPTOP = 0x03,
+ SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04,
+ SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05,
+};
+
+static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_KIP_COVER_STATE_DISCONNECTED:
+ return "disconnected";
+
+ case SSAM_KIP_COVER_STATE_CLOSED:
+ return "closed";
+
+ case SSAM_KIP_COVER_STATE_LAPTOP:
+ return "laptop";
+
+ case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
+ return "folded-canvas";
+
+ case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+ return "folded-back";
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state);
+ return "<unknown>";
+ }
+}
+
+static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_KIP_COVER_STATE_DISCONNECTED:
+ case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
+ case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+ return true;
+
+ case SSAM_KIP_COVER_STATE_CLOSED:
+ case SSAM_KIP_COVER_STATE_LAPTOP:
+ return false;
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown KIP cover state: %d\n", sw->state);
+ return true;
+ }
+}
+
+SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_get_cover_state, u8, {
+ .target_category = SSAM_SSH_TC_KIP,
+ .target_id = 0x01,
+ .command_id = 0x1d,
+ .instance_id = 0x00,
+});
+
+static int ssam_kip_get_cover_state(struct ssam_tablet_sw *sw, u32 *state)
+{
+ int status;
+ u8 raw;
+
+ status = ssam_retry(__ssam_kip_get_cover_state, sw->sdev->ctrl, &raw);
+ if (status < 0) {
+ dev_err(&sw->sdev->dev, "failed to query KIP lid state: %d\n", status);
+ return status;
+ }
+
+ *state = raw;
+ return 0;
+}
+
+static u32 ssam_kip_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif);
+
+ if (event->command_id != SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED)
+ return 0; /* Return "unhandled". */
+
+ if (event->length < 1)
+ dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length);
+
+ schedule_work(&sw->update_work);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_tablet_sw_desc ssam_kip_sw_desc = {
+ .dev = {
+ .name = "Microsoft Surface KIP Tablet Mode Switch",
+ .phys = "ssam/01:0e:01:00:01/input0",
+ },
+ .ops = {
+ .notify = ssam_kip_sw_notif,
+ .get_state = ssam_kip_get_cover_state,
+ .state_name = ssam_kip_cover_state_name,
+ .state_is_tablet_mode = ssam_kip_cover_state_is_tablet_mode,
+ },
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_KIP,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+};
+
+
+/* -- SSAM POS tablet switch implementation. -------------------------------- */
+
+static bool tablet_mode_in_slate_state = true;
+module_param(tablet_mode_in_slate_state, bool, 0644);
+MODULE_PARM_DESC(tablet_mode_in_slate_state, "Enable tablet mode in slate device posture, default is 'true'");
+
+#define SSAM_EVENT_POS_CID_POSTURE_CHANGED 0x03
+#define SSAM_POS_MAX_SOURCES 4
+
+enum ssam_pos_state {
+ SSAM_POS_POSTURE_LID_CLOSED = 0x00,
+ SSAM_POS_POSTURE_LAPTOP = 0x01,
+ SSAM_POS_POSTURE_SLATE = 0x02,
+ SSAM_POS_POSTURE_TABLET = 0x03,
+};
+
+struct ssam_sources_list {
+ __le32 count;
+ __le32 id[SSAM_POS_MAX_SOURCES];
+} __packed;
+
+static const char *ssam_pos_state_name(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_POS_POSTURE_LID_CLOSED:
+ return "closed";
+
+ case SSAM_POS_POSTURE_LAPTOP:
+ return "laptop";
+
+ case SSAM_POS_POSTURE_SLATE:
+ return "slate";
+
+ case SSAM_POS_POSTURE_TABLET:
+ return "tablet";
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown device posture: %u\n", state);
+ return "<unknown>";
+ }
+}
+
+static bool ssam_pos_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_POS_POSTURE_LAPTOP:
+ case SSAM_POS_POSTURE_LID_CLOSED:
+ return false;
+
+ case SSAM_POS_POSTURE_SLATE:
+ return tablet_mode_in_slate_state;
+
+ case SSAM_POS_POSTURE_TABLET:
+ return true;
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown device posture: %u\n", state);
+ return true;
+ }
+}
+
+static int ssam_pos_get_sources_list(struct ssam_tablet_sw *sw, struct ssam_sources_list *sources)
+{
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status;
+
+ rqst.target_category = SSAM_SSH_TC_POS;
+ rqst.target_id = 0x01;
+ rqst.command_id = 0x01;
+ rqst.instance_id = 0x00;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = 0;
+ rqst.payload = NULL;
+
+ rsp.capacity = sizeof(*sources);
+ rsp.length = 0;
+ rsp.pointer = (u8 *)sources;
+
+ status = ssam_retry(ssam_request_sync_onstack, sw->sdev->ctrl, &rqst, &rsp, 0);
+ if (status)
+ return status;
+
+ /* We need at least the 'sources->count' field. */
+ if (rsp.length < sizeof(__le32)) {
+ dev_err(&sw->sdev->dev, "received source list response is too small\n");
+ return -EPROTO;
+ }
+
+ /* Make sure 'sources->count' matches with the response length. */
+ if (get_unaligned_le32(&sources->count) * sizeof(__le32) + sizeof(__le32) != rsp.length) {
+ dev_err(&sw->sdev->dev, "mismatch between number of sources and response size\n");
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int ssam_pos_get_source(struct ssam_tablet_sw *sw, u32 *source_id)
+{
+ struct ssam_sources_list sources = {};
+ int status;
+
+ status = ssam_pos_get_sources_list(sw, &sources);
+ if (status)
+ return status;
+
+ if (get_unaligned_le32(&sources.count) == 0) {
+ dev_err(&sw->sdev->dev, "no posture sources found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We currently don't know what to do with more than one posture
+ * source. At the moment, only one source seems to be used/provided.
+ * The WARN_ON() here should hopefully let us know quickly once there
+ * is a device that provides multiple sources, at which point we can
+ * then try to figure out how to handle them.
+ */
+ WARN_ON(get_unaligned_le32(&sources.count) > 1);
+
+ *source_id = get_unaligned_le32(&sources.id[0]);
+ return 0;
+}
+
+SSAM_DEFINE_SYNC_REQUEST_WR(__ssam_pos_get_posture_for_source, __le32, __le32, {
+ .target_category = SSAM_SSH_TC_POS,
+ .target_id = 0x01,
+ .command_id = 0x02,
+ .instance_id = 0x00,
+});
+
+static int ssam_pos_get_posture_for_source(struct ssam_tablet_sw *sw, u32 source_id, u32 *posture)
+{
+ __le32 source_le = cpu_to_le32(source_id);
+ __le32 rspval_le = 0;
+ int status;
+
+ status = ssam_retry(__ssam_pos_get_posture_for_source, sw->sdev->ctrl,
+ &source_le, &rspval_le);
+ if (status)
+ return status;
+
+ *posture = le32_to_cpu(rspval_le);
+ return 0;
+}
+
+static int ssam_pos_get_posture(struct ssam_tablet_sw *sw, u32 *state)
+{
+ u32 source_id;
+ int status;
+
+ status = ssam_pos_get_source(sw, &source_id);
+ if (status) {
+ dev_err(&sw->sdev->dev, "failed to get posture source ID: %d\n", status);
+ return status;
+ }
+
+ status = ssam_pos_get_posture_for_source(sw, source_id, state);
+ if (status) {
+ dev_err(&sw->sdev->dev, "failed to get posture value for source %u: %d\n",
+ source_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+static u32 ssam_pos_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif);
+
+ if (event->command_id != SSAM_EVENT_POS_CID_POSTURE_CHANGED)
+ return 0; /* Return "unhandled". */
+
+ if (event->length != sizeof(__le32) * 3)
+ dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length);
+
+ schedule_work(&sw->update_work);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_tablet_sw_desc ssam_pos_sw_desc = {
+ .dev = {
+ .name = "Microsoft Surface POS Tablet Mode Switch",
+ .phys = "ssam/01:26:01:00:01/input0",
+ },
+ .ops = {
+ .notify = ssam_pos_sw_notif,
+ .get_state = ssam_pos_get_posture,
+ .state_name = ssam_pos_state_name,
+ .state_is_tablet_mode = ssam_pos_state_is_tablet_mode,
+ },
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_POS,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+};
+
+
+/* -- Driver registration. -------------------------------------------------- */
+
+static const struct ssam_device_id ssam_tablet_sw_match[] = {
+ { SSAM_SDEV(KIP, 0x01, 0x00, 0x01), (unsigned long)&ssam_kip_sw_desc },
+ { SSAM_SDEV(POS, 0x01, 0x00, 0x01), (unsigned long)&ssam_pos_sw_desc },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, ssam_tablet_sw_match);
+
+static struct ssam_device_driver ssam_tablet_sw_driver = {
+ .probe = ssam_tablet_sw_probe,
+ .remove = ssam_tablet_sw_remove,
+ .match_table = ssam_tablet_sw_match,
+ .driver = {
+ .name = "surface_aggregator_tablet_mode_switch",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_tablet_sw_pm_ops,
+ },
+};
+module_ssam_device_driver(ssam_tablet_sw_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Tablet mode switch driver for Surface devices using the Surface Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index 1203b9a82993..ed36944467f9 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -8,7 +8,7 @@
* acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
* use), or request detachment via user-space.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/fs.h>
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
index ec66fde28e75..c219b840d491 100644
--- a/drivers/platform/surface/surface_gpe.c
+++ b/drivers/platform/surface/surface_gpe.c
@@ -4,7 +4,7 @@
* properly configuring the respective GPEs. Required for wakeup via lid on
* newer Intel-based Microsoft Surface devices.
*
- * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -172,6 +172,18 @@ static const struct dmi_system_id dmi_lid_device_table[] = {
.driver_data = (void *)lid_device_props_l4D,
},
{
+ .ident = "Surface Laptop 4 (Intel 13\")",
+ .matches = {
+ /*
+ * We match for SKU here due to different variants: The
+ * AMD (15") version does not rely on GPEs.
+ */
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_4_1950:1951"),
+ },
+ .driver_data = (void *)lid_device_props_l4B,
+ },
+ {
.ident = "Surface Laptop Studio",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
diff --git a/drivers/platform/surface/surface_hotplug.c b/drivers/platform/surface/surface_hotplug.c
index cfcc15cfbacb..f004a2495201 100644
--- a/drivers/platform/surface/surface_hotplug.c
+++ b/drivers/platform/surface/surface_hotplug.c
@@ -10,7 +10,7 @@
* Event signaling is handled via ACPI, which will generate the appropriate
* device-check notifications to be picked up by the PCIe hot-plug driver.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index 6373d3b5eb7f..fbf2e11fd6ce 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -3,7 +3,7 @@
* Surface Platform Profile / Performance Mode driver for Surface System
* Aggregator Module (thermal subsystem).
*
- * Copyright (C) 2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2021-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index bc4013e950ed..f2f98e942cf2 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -177,17 +177,15 @@ config ACER_WIRELESS
config ACER_WMI
tristate "Acer WMI Laptop Extras"
- depends on ACPI
- select LEDS_CLASS
- select NEW_LEDS
depends on BACKLIGHT_CLASS_DEVICE
depends on SERIO_I8042
depends on INPUT
depends on RFKILL || RFKILL = n
depends on ACPI_WMI
+ select ACPI_VIDEO
select INPUT_SPARSEKMAP
- # Acer WMI depends on ACPI_VIDEO when ACPI is enabled
- select ACPI_VIDEO if ACPI
+ select LEDS_CLASS
+ select NEW_LEDS
help
This is a driver for newer Acer (and Wistron) laptops. It adds
wireless radio and bluetooth control, and on some laptops,
@@ -196,32 +194,7 @@ config ACER_WMI
If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
here.
-config AMD_PMC
- tristate "AMD SoC PMC driver"
- depends on ACPI && PCI && RTC_CLASS
- help
- The driver provides support for AMD Power Management Controller
- primarily responsible for S2Idle transactions that are driven from
- a platform firmware running on SMU. This driver also provides a debug
- mechanism to investigate the S2Idle transactions and failures.
-
- Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU.
-
- If you choose to compile this driver as a module the module will be
- called amd-pmc.
-
-config AMD_HSMP
- tristate "AMD HSMP Driver"
- depends on AMD_NB && X86_64
- help
- The driver provides a way for user space tools to monitor and manage
- system management functionality on EPYC server CPUs from AMD.
-
- Host System Management Port (HSMP) interface is a mailbox interface
- between the x86 core and the System Management Unit (SMU) firmware.
-
- If you choose to compile this driver as a module the module will be
- called amd_hsmp.
+source "drivers/platform/x86/amd/Kconfig"
config ADV_SWBUTTON
tristate "Advantech ACPI Software Button Driver"
@@ -300,6 +273,8 @@ config ASUS_WMI
select INPUT_SPARSEKMAP
select LEDS_CLASS
select NEW_LEDS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
select ACPI_PLATFORM_PROFILE
help
Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new
@@ -1164,7 +1139,14 @@ config WINMATE_FM07_KEYS
endif # X86_PLATFORM_DEVICES
-config PMC_ATOM
- def_bool y
- depends on PCI
- select COMMON_CLK
+config P2SB
+ bool "Primary to Sideband (P2SB) bridge access support"
+ depends on PCI && X86
+ help
+ The Primary to Sideband (P2SB) bridge is an interface to some
+ PCI devices connected through it. In particular, SPI NOR controller
+ in Intel Apollo Lake SoC is one of such devices.
+
+ The main purpose of this library is to unhide P2SB device in case
+ firmware kept it hidden on some platforms in order to access devices
+ behind it.
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 4a59f47a46e2..5a428caa654a 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -23,8 +23,7 @@ obj-$(CONFIG_ACER_WIRELESS) += acer-wireless.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
# AMD
-obj-$(CONFIG_AMD_PMC) += amd-pmc.o
-obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
+obj-y += amd/
# Advantech
obj-$(CONFIG_ADV_SWBUTTON) += adv_swbutton.o
@@ -120,13 +119,17 @@ obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets.o
# Intel uncore drivers
obj-$(CONFIG_INTEL_IPS) += intel_ips.o
+# Intel miscellaneous drivers
+intel_p2sb-y := p2sb.o
+obj-$(CONFIG_P2SB) += intel_p2sb.o
+
# Intel PMIC / PMC / P-Unit devices
obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
obj-$(CONFIG_INTEL_SCU_PCI) += intel_scu_pcidrv.o
obj-$(CONFIG_INTEL_SCU_PLATFORM) += intel_scu_pltdrv.o
obj-$(CONFIG_INTEL_SCU_WDT) += intel_scu_wdt.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
-obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
+obj-$(CONFIG_X86_INTEL_LPSS) += pmc_atom.o
# Siemens Simatic Industrial PCs
obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 9c6943e401a6..e0230ea0cb7e 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1615,12 +1615,7 @@ static int read_brightness(struct backlight_device *bd)
static int update_bl_status(struct backlight_device *bd)
{
- int intensity = bd->props.brightness;
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
+ int intensity = backlight_get_brightness(bd);
set_u32(intensity, ACER_CAP_BRIGHTNESS);
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
new file mode 100644
index 000000000000..c0d0a3c5170c
--- /dev/null
+++ b/drivers/platform/x86/amd/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD x86 Platform Specific Drivers
+#
+
+config AMD_PMC
+ tristate "AMD SoC PMC driver"
+ depends on ACPI && PCI && RTC_CLASS
+ help
+ The driver provides support for AMD Power Management Controller
+ primarily responsible for S2Idle transactions that are driven from
+ a platform firmware running on SMU. This driver also provides a debug
+ mechanism to investigate the S2Idle transactions and failures.
+
+ Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU.
+
+ If you choose to compile this driver as a module the module will be
+ called amd-pmc.
+
+config AMD_HSMP
+ tristate "AMD HSMP Driver"
+ depends on AMD_NB && X86_64
+ help
+ The driver provides a way for user space tools to monitor and manage
+ system management functionality on EPYC server CPUs from AMD.
+
+ Host System Management Port (HSMP) interface is a mailbox interface
+ between the x86 core and the System Management Unit (SMU) firmware.
+
+ If you choose to compile this driver as a module the module will be
+ called amd_hsmp.
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
new file mode 100644
index 000000000000..a03fbb08e808
--- /dev/null
+++ b/drivers/platform/x86/amd/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for drivers/platform/x86/amd
+# AMD x86 Platform-Specific Drivers
+#
+
+amd-pmc-y := pmc.o
+obj-$(CONFIG_AMD_PMC) += amd-pmc.o
+amd_hsmp-y := hsmp.o
+obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
diff --git a/drivers/platform/x86/amd_hsmp.c b/drivers/platform/x86/amd/hsmp.c
index a0c54b838c11..a0c54b838c11 100644
--- a/drivers/platform/x86/amd_hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd/pmc.c
index 700eb19e8450..700eb19e8450 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 57553f9b4d1d..ffe98a18440b 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -291,10 +291,7 @@ static int gmux_get_brightness(struct backlight_device *bd)
static int gmux_update_status(struct backlight_device *bd)
{
struct apple_gmux_data *gmux_data = bl_get_data(bd);
- u32 brightness = bd->props.brightness;
-
- if (bd->props.state & BL_CORE_SUSPENDED)
- return 0;
+ u32 brightness = backlight_get_brightness(bd);
gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 62ce198a3463..89b604e04d7f 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -208,6 +208,7 @@ struct asus_wmi {
int kbd_led_wk;
struct led_classdev lightbar_led;
int lightbar_led_wk;
+ struct led_classdev micmute_led;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
struct work_struct wlan_led_work;
@@ -1028,12 +1029,23 @@ static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev)
return result & ASUS_WMI_DSTS_LIGHTBAR_MASK;
}
+static int micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int state = brightness != LED_OFF;
+ int err;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MICMUTE_LED, state, NULL);
+ return err < 0 ? err : 0;
+}
+
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
led_classdev_unregister(&asus->kbd_led);
led_classdev_unregister(&asus->tpd_led);
led_classdev_unregister(&asus->wlan_led);
led_classdev_unregister(&asus->lightbar_led);
+ led_classdev_unregister(&asus->micmute_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
@@ -1105,6 +1117,19 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
&asus->lightbar_led);
}
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
+ asus->micmute_led.name = "asus::micmute";
+ asus->micmute_led.max_brightness = 1;
+ asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ asus->micmute_led.brightness_set_blocking = micmute_led_set;
+ asus->micmute_led.default_trigger = "audio-micmute";
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->micmute_led);
+ if (rv)
+ goto error;
+ }
+
error:
if (rv)
asus_wmi_led_exit(asus);
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index ab610376fdad..0942f50bd793 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -324,9 +324,7 @@ static int bl_update_status(struct backlight_device *b)
if (ret)
return ret;
- set_backlight_state((b->props.power == FB_BLANK_UNBLANK)
- && !(b->props.state & BL_CORE_SUSPENDED)
- && !(b->props.state & BL_CORE_FBBLANK));
+ set_backlight_state(!backlight_is_blank(b));
return 0;
}
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index fe224a54f24c..25421e061c47 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -5,7 +5,6 @@
menuconfig X86_PLATFORM_DRIVERS_DELL
bool "Dell X86 Platform Specific Device Drivers"
- depends on X86_PLATFORM_DEVICES
help
Say Y here to get to see options for device drivers for various
Dell x86 platforms, including vendor-specific laptop extension drivers.
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 1c9e3f3ea41c..53d7fd2943b4 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -20,25 +20,16 @@
#define PMT_XA_MAX INT_MAX
#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
-/*
- * Early implementations of PMT on client platforms have some
- * differences from the server platforms (which use the Out Of Band
- * Management Services Module OOBMSM). This list tracks those
- * platforms as needed to handle those differences. Newer client
- * platforms are expected to be fully compatible with server.
- */
-static const struct pci_device_id pmt_telem_early_client_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */
- { PCI_VDEVICE(INTEL, 0x490e) }, /* DG1 */
- { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */
- { }
-};
-
bool intel_pmt_is_early_client_hw(struct device *dev)
{
- struct pci_dev *parent = to_pci_dev(dev->parent);
+ struct intel_vsec_device *ivdev = dev_to_ivdev(dev);
- return !!pci_match_id(pmt_telem_early_client_pci_ids, parent);
+ /*
+ * Early implementations of PMT on client platforms have some
+ * differences from the server platforms (which use the Out Of Band
+ * Management Services Module OOBMSM).
+ */
+ return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW);
}
EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index f73ecfd4a309..5e4009c05ecf 100644
--- a/drivers/platform/x86/intel/pmt/telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -23,12 +23,19 @@
#define TELEM_GUID_OFFSET 0x4
#define TELEM_BASE_OFFSET 0x8
#define TELEM_ACCESS(v) ((v) & GENMASK(3, 0))
+#define TELEM_TYPE(v) (((v) & GENMASK(7, 4)) >> 4)
/* size is in bytes */
#define TELEM_SIZE(v) (((v) & GENMASK(27, 12)) >> 10)
/* Used by client hardware to identify a fixed telemetry entry*/
#define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000
+enum telem_type {
+ TELEM_TYPE_PUNIT = 0,
+ TELEM_TYPE_CRASHLOG,
+ TELEM_TYPE_PUNIT_FIXED,
+};
+
struct pmt_telem_priv {
int num_entries;
struct intel_pmt_entry entry[];
@@ -39,10 +46,15 @@ static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
{
u32 guid = readl(entry->disc_table + TELEM_GUID_OFFSET);
- if (guid != TELEM_CLIENT_FIXED_BLOCK_GUID)
- return false;
+ if (intel_pmt_is_early_client_hw(dev)) {
+ u32 type = TELEM_TYPE(readl(entry->disc_table));
+
+ if ((type == TELEM_TYPE_PUNIT_FIXED) ||
+ (guid == TELEM_CLIENT_FIXED_BLOCK_GUID))
+ return true;
+ }
- return intel_pmt_is_early_client_hw(dev);
+ return false;
}
static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index e8424e70d81d..fd102678c75f 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -277,29 +277,38 @@ static int isst_if_get_platform_info(void __user *argp)
return 0;
}
+#define ISST_MAX_BUS_NUMBER 2
struct isst_if_cpu_info {
/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
- int bus_info[2];
- struct pci_dev *pci_dev[2];
+ int bus_info[ISST_MAX_BUS_NUMBER];
+ struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
int punit_cpu_id;
int numa_node;
};
+struct isst_if_pkg_info {
+ struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
+};
+
static struct isst_if_cpu_info *isst_cpu_info;
+static struct isst_if_pkg_info *isst_pkg_info;
+
#define ISST_MAX_PCI_DOMAINS 8
static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
{
struct pci_dev *matched_pci_dev = NULL;
struct pci_dev *pci_dev = NULL;
- int no_matches = 0;
+ int no_matches = 0, pkg_id;
int i, bus_number;
- if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
- cpu >= num_possible_cpus())
+ if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
return NULL;
+ pkg_id = topology_physical_package_id(cpu);
+
bus_number = isst_cpu_info[cpu].bus_info[bus_no];
if (bus_number < 0)
return NULL;
@@ -324,6 +333,8 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
}
if (node == isst_cpu_info[cpu].numa_node) {
+ isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
+
pci_dev = _pci_dev;
break;
}
@@ -342,6 +353,10 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
if (!pci_dev && no_matches == 1)
pci_dev = matched_pci_dev;
+ /* Return pci_dev pointer for any matched CPU in the package */
+ if (!pci_dev)
+ pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
+
return pci_dev;
}
@@ -361,8 +376,8 @@ struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
{
struct pci_dev *pci_dev;
- if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
- cpu >= num_possible_cpus())
+ if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
return NULL;
pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
@@ -417,10 +432,19 @@ static int isst_if_cpu_info_init(void)
if (!isst_cpu_info)
return -ENOMEM;
+ isst_pkg_info = kcalloc(topology_max_packages(),
+ sizeof(*isst_pkg_info),
+ GFP_KERNEL);
+ if (!isst_pkg_info) {
+ kfree(isst_cpu_info);
+ return -ENOMEM;
+ }
+
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"platform/x86/isst-if:online",
isst_if_cpu_online, NULL);
if (ret < 0) {
+ kfree(isst_pkg_info);
kfree(isst_cpu_info);
return ret;
}
@@ -433,6 +457,7 @@ static int isst_if_cpu_info_init(void)
static void isst_if_cpu_info_exit(void)
{
cpuhp_remove_state(isst_if_online_id);
+ kfree(isst_pkg_info);
kfree(isst_cpu_info);
};
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index bed436bf181f..bb81b8b1f7e9 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -15,6 +15,7 @@
#include <linux/auxiliary_bus.h>
#include <linux/bits.h>
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/idr.h>
#include <linux/module.h>
@@ -30,9 +31,13 @@
#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
#define TABLE_OFFSET_SHIFT 3
+#define PMT_XA_START 0
+#define PMT_XA_MAX INT_MAX
+#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
static DEFINE_IDA(intel_vsec_ida);
static DEFINE_IDA(intel_vsec_sdsi_ida);
+static DEFINE_XARRAY_ALLOC(auxdev_array);
/**
* struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
@@ -54,12 +59,6 @@ struct intel_vsec_header {
u32 offset;
};
-/* Platform specific data */
-struct intel_vsec_platform_info {
- struct intel_vsec_header **capabilities;
- unsigned long quirks;
-};
-
enum intel_vsec_id {
VSEC_ID_TELEMETRY = 2,
VSEC_ID_WATCHER = 3,
@@ -138,7 +137,7 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
const char *name)
{
struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
- int ret;
+ int ret, id;
ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
if (ret < 0) {
@@ -165,14 +164,26 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
return ret;
}
- return devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux, auxdev);
+ ret = devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux,
+ auxdev);
+ if (ret < 0)
+ return ret;
+
+ /* Add auxdev to list */
+ ret = xa_alloc(&auxdev_array, &id, intel_vsec_dev, PMT_XA_LIMIT,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
- unsigned long quirks)
+ struct intel_vsec_platform_info *info)
{
struct intel_vsec_device *intel_vsec_dev;
struct resource *res, *tmp;
+ unsigned long quirks = info->quirks;
int i;
if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks))
@@ -216,7 +227,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
intel_vsec_dev->pcidev = pdev;
intel_vsec_dev->resource = res;
intel_vsec_dev->num_resources = header->num_entries;
- intel_vsec_dev->quirks = quirks;
+ intel_vsec_dev->info = info;
if (header->id == VSEC_ID_SDSI)
intel_vsec_dev->ida = &intel_vsec_sdsi_ida;
@@ -226,14 +237,15 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id));
}
-static bool intel_vsec_walk_header(struct pci_dev *pdev, unsigned long quirks,
- struct intel_vsec_header **header)
+static bool intel_vsec_walk_header(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
{
+ struct intel_vsec_header **header = info->capabilities;
bool have_devices = false;
int ret;
for ( ; *header; header++) {
- ret = intel_vsec_add_dev(pdev, *header, quirks);
+ ret = intel_vsec_add_dev(pdev, *header, info);
if (ret)
dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n",
(*header)->id);
@@ -244,7 +256,8 @@ static bool intel_vsec_walk_header(struct pci_dev *pdev, unsigned long quirks,
return have_devices;
}
-static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
+static bool intel_vsec_walk_dvsec(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
{
bool have_devices = false;
int pos = 0;
@@ -283,7 +296,7 @@ static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr);
header.id = PCI_DVSEC_HEADER2_ID(hdr);
- ret = intel_vsec_add_dev(pdev, &header, quirks);
+ ret = intel_vsec_add_dev(pdev, &header, info);
if (ret)
continue;
@@ -293,7 +306,8 @@ static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
return have_devices;
}
-static bool intel_vsec_walk_vsec(struct pci_dev *pdev, unsigned long quirks)
+static bool intel_vsec_walk_vsec(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
{
bool have_devices = false;
int pos = 0;
@@ -327,7 +341,7 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, unsigned long quirks)
header.tbir = INTEL_DVSEC_TABLE_BAR(table);
header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
- ret = intel_vsec_add_dev(pdev, &header, quirks);
+ ret = intel_vsec_add_dev(pdev, &header, info);
if (ret)
continue;
@@ -341,25 +355,25 @@ static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct intel_vsec_platform_info *info;
bool have_devices = false;
- unsigned long quirks = 0;
int ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
+ pci_save_state(pdev);
info = (struct intel_vsec_platform_info *)id->driver_data;
- if (info)
- quirks = info->quirks;
+ if (!info)
+ return -EINVAL;
- if (intel_vsec_walk_dvsec(pdev, quirks))
+ if (intel_vsec_walk_dvsec(pdev, info))
have_devices = true;
- if (intel_vsec_walk_vsec(pdev, quirks))
+ if (intel_vsec_walk_vsec(pdev, info))
have_devices = true;
if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) &&
- intel_vsec_walk_header(pdev, quirks, info->capabilities))
+ intel_vsec_walk_header(pdev, info))
have_devices = true;
if (!have_devices)
@@ -370,7 +384,8 @@ static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id
/* TGL info */
static const struct intel_vsec_platform_info tgl_info = {
- .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG | VSEC_QUIRK_TABLE_SHIFT,
+ .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG |
+ VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW,
};
/* DG1 info */
@@ -390,26 +405,89 @@ static struct intel_vsec_header *dg1_capabilities[] = {
static const struct intel_vsec_platform_info dg1_info = {
.capabilities = dg1_capabilities,
- .quirks = VSEC_QUIRK_NO_DVSEC,
+ .quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW,
};
#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
- { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, NULL) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &(struct intel_vsec_platform_info) {}) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
+static pci_ers_result_t intel_vsec_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_NEED_RESET;
+
+ dev_info(&pdev->dev, "PCI error detected, state %d", state);
+
+ if (state == pci_channel_io_perm_failure)
+ status = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+static pci_ers_result_t intel_vsec_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct intel_vsec_device *intel_vsec_dev;
+ pci_ers_result_t status = PCI_ERS_RESULT_DISCONNECT;
+ const struct pci_device_id *pci_dev_id;
+ unsigned long index;
+
+ dev_info(&pdev->dev, "Resetting PCI slot\n");
+
+ msleep(2000);
+ if (pci_enable_device(pdev)) {
+ dev_info(&pdev->dev,
+ "Failed to re-enable PCI device after reset.\n");
+ goto out;
+ }
+
+ status = PCI_ERS_RESULT_RECOVERED;
+
+ xa_for_each(&auxdev_array, index, intel_vsec_dev) {
+ /* check if pdev doesn't match */
+ if (pdev != intel_vsec_dev->pcidev)
+ continue;
+ devm_release_action(&pdev->dev, intel_vsec_remove_aux,
+ &intel_vsec_dev->auxdev);
+ }
+ pci_disable_device(pdev);
+ pci_restore_state(pdev);
+ pci_dev_id = pci_match_id(intel_vsec_pci_ids, pdev);
+ intel_vsec_pci_probe(pdev, pci_dev_id);
+
+out:
+ return status;
+}
+
+static void intel_vsec_pci_resume(struct pci_dev *pdev)
+{
+ dev_info(&pdev->dev, "Done resuming PCI device\n");
+}
+
+static const struct pci_error_handlers intel_vsec_pci_err_handlers = {
+ .error_detected = intel_vsec_pci_error_detected,
+ .slot_reset = intel_vsec_pci_slot_reset,
+ .resume = intel_vsec_pci_resume,
+};
+
static struct pci_driver intel_vsec_pci_driver = {
.name = "intel_vsec",
.id_table = intel_vsec_pci_ids,
.probe = intel_vsec_pci_probe,
+ .err_handler = &intel_vsec_pci_err_handlers,
};
module_pci_driver(intel_vsec_pci_driver);
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
index 4cc36678e8c5..3deeb05cf394 100644
--- a/drivers/platform/x86/intel/vsec.h
+++ b/drivers/platform/x86/intel/vsec.h
@@ -20,6 +20,15 @@ enum intel_vsec_quirks {
/* DVSEC not present (provided in driver data) */
VSEC_QUIRK_NO_DVSEC = BIT(3),
+
+ /* Platforms requiring quirk in the auxiliary driver */
+ VSEC_QUIRK_EARLY_HW = BIT(4),
+};
+
+/* Platform specific data */
+struct intel_vsec_platform_info {
+ struct intel_vsec_header **capabilities;
+ unsigned long quirks;
};
struct intel_vsec_device {
@@ -27,7 +36,7 @@ struct intel_vsec_device {
struct pci_dev *pcidev;
struct resource *resource;
struct ida *ida;
- unsigned long quirks;
+ struct intel_vsec_platform_info *info;
int num_resources;
};
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 447044fdcb77..5e072a0666f4 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -34,6 +34,7 @@
#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET 0x09
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET 0x0a
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET 0x0b
+#define MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET 0x19
#define MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET 0x1c
#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e
@@ -66,9 +67,15 @@
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
+#define MLXPLAT_CPLD_LPC_REG_GWP_OFFSET 0x4a
+#define MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET 0x4b
+#define MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET 0x4c
#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET 0x53
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET 0x54
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET 0x55
#define MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET 0x56
#define MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET 0x57
#define MLXPLAT_CPLD_LPC_REG_PSU_OFFSET 0x58
@@ -143,6 +150,7 @@
#define MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET 0xfa
#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
+#define MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET 0xfd
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
@@ -193,6 +201,7 @@
MLXPLAT_CPLD_AGGR_MASK_LC_ACT | \
MLXPLAT_CPLD_AGGR_MASK_LC_SDWN)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc1
+#define MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2 BIT(2)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
@@ -204,6 +213,7 @@
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
+#define MLXPLAT_CPLD_GWP_MASK GENMASK(0, 0)
#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
@@ -588,6 +598,15 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_asic_items_data[] = {
},
};
+static struct mlxreg_core_data mlxplat_mlxcpld_default_asic2_items_data[] = {
+ {
+ .label = "asic2",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
{
.data = mlxplat_mlxcpld_default_psu_items_data,
@@ -1151,6 +1170,15 @@ static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
.inversed = 0,
.health = true,
},
+ {
+ .data = mlxplat_mlxcpld_default_asic2_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic2_items_data),
+ .inversed = 0,
+ .health = true,
+ }
};
static
@@ -1160,7 +1188,7 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
- .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2,
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_pwr_items_data[] = {
@@ -2004,6 +2032,38 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_modular_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+/* Platform hotplug for NVLink blade systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_global_wp_items_data[] = {
+ {
+ .label = "global_wp_grant",
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = MLXPLAT_CPLD_GWP_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_nvlink_blade_items[] = {
+ {
+ .data = mlxplat_mlxcpld_global_wp_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = MLXPLAT_CPLD_GWP_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_global_wp_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_nvlink_blade_data = {
+ .items = mlxplat_mlxcpld_nvlink_blade_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_nvlink_blade_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
@@ -2102,6 +2162,25 @@ static struct mlxreg_core_platform_data mlxplat_default_led_wc_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_wc_data),
};
+/* Platform led default data for water cooling Ethernet switch blade */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_led_eth_wc_blade_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_led_eth_wc_blade_data = {
+ .data = mlxplat_mlxcpld_default_led_eth_wc_blade_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_eth_wc_blade_data),
+};
+
/* Platform led MSN21xx system family data */
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_led_data[] = {
{
@@ -2857,6 +2936,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "asic_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "asic2_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -2996,6 +3087,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "asic2_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+ {
.label = "fan_dir",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION,
.bit = GENMASK(7, 0),
@@ -3057,6 +3155,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "ufm_version",
.reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
.bit = GENMASK(7, 0),
@@ -3535,6 +3639,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_modular_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "ufm_version",
.reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
.bit = GENMASK(7, 0),
@@ -3547,6 +3657,209 @@ static struct mlxreg_core_platform_data mlxplat_modular_regs_io_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_regs_io_data),
};
+/* Platform register access for NVLink blade systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_nvlink_blade_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_from_comex",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_voltmon_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_system",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_pwr_off",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_reload_bios",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_ac_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "global_wp_request",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "jtag_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "comm_chnl_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0200,
+ },
+ {
+ .label = "bios_safe_mode",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_active_image",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_auth_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "global_wp_response",
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_nvlink_blade_regs_io_data = {
+ .data = mlxplat_mlxcpld_nvlink_blade_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_nvlink_blade_regs_io_data),
+};
+
/* Platform FAN default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
{
@@ -3932,8 +4245,12 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
@@ -4023,9 +4340,15 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
@@ -4100,6 +4423,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
@@ -4150,9 +4474,15 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
@@ -4221,6 +4551,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
@@ -4417,6 +4748,31 @@ static int __init mlxplat_dmi_default_wc_matched(const struct dmi_system_id *dmi
return 1;
}
+static int __init mlxplat_dmi_default_eth_wc_blade_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_default_wc_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_led_eth_wc_blade_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
+
+ return 1;
+}
+
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
{
int i;
@@ -4579,6 +4935,28 @@ static int __init mlxplat_dmi_modular_matched(const struct dmi_system_id *dmi)
return 1;
}
+static int __init mlxplat_dmi_nvlink_blade_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_nvlink_blade_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_regs_io = &mlxplat_nvlink_blade_regs_io_data;
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return 1;
+}
+
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
.callback = mlxplat_dmi_default_wc_matched,
@@ -4612,6 +4990,13 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_default_eth_wc_blade_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI139"),
+ },
+ },
+ {
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
@@ -4642,6 +5027,12 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_nvlink_blade_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0015"),
+ },
+ },
+ {
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
@@ -4830,22 +5221,20 @@ static int __init mlxplat_init(void)
nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
if (mlxplat_i2c)
mlxplat_i2c->regmap = priv->regmap;
- priv->pdev_i2c = platform_device_register_resndata(
- &mlxplat_dev->dev, "i2c_mlxcpld",
- nr, mlxplat_mlxcpld_resources,
- ARRAY_SIZE(mlxplat_mlxcpld_resources),
- mlxplat_i2c, sizeof(*mlxplat_i2c));
+ priv->pdev_i2c = platform_device_register_resndata(&mlxplat_dev->dev, "i2c_mlxcpld",
+ nr, mlxplat_mlxcpld_resources,
+ ARRAY_SIZE(mlxplat_mlxcpld_resources),
+ mlxplat_i2c, sizeof(*mlxplat_i2c));
if (IS_ERR(priv->pdev_i2c)) {
err = PTR_ERR(priv->pdev_i2c);
goto fail_alloc;
}
for (i = 0; i < mlxplat_mux_num; i++) {
- priv->pdev_mux[i] = platform_device_register_resndata(
- &priv->pdev_i2c->dev,
- "i2c-mux-reg", i, NULL,
- 0, &mlxplat_mux_data[i],
- sizeof(mlxplat_mux_data[i]));
+ priv->pdev_mux[i] = platform_device_register_resndata(&priv->pdev_i2c->dev,
+ "i2c-mux-reg", i, NULL, 0,
+ &mlxplat_mux_data[i],
+ sizeof(mlxplat_mux_data[i]));
if (IS_ERR(priv->pdev_mux[i])) {
err = PTR_ERR(priv->pdev_mux[i]);
goto fail_platform_mux_register;
@@ -4853,16 +5242,18 @@ static int __init mlxplat_init(void)
}
/* Add hotplug driver */
- mlxplat_hotplug->regmap = priv->regmap;
- priv->pdev_hotplug = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlxreg-hotplug",
- PLATFORM_DEVID_NONE,
- mlxplat_mlxcpld_resources,
- ARRAY_SIZE(mlxplat_mlxcpld_resources),
- mlxplat_hotplug, sizeof(*mlxplat_hotplug));
- if (IS_ERR(priv->pdev_hotplug)) {
- err = PTR_ERR(priv->pdev_hotplug);
- goto fail_platform_mux_register;
+ if (mlxplat_hotplug) {
+ mlxplat_hotplug->regmap = priv->regmap;
+ priv->pdev_hotplug =
+ platform_device_register_resndata(&mlxplat_dev->dev,
+ "mlxreg-hotplug", PLATFORM_DEVID_NONE,
+ mlxplat_mlxcpld_resources,
+ ARRAY_SIZE(mlxplat_mlxcpld_resources),
+ mlxplat_hotplug, sizeof(*mlxplat_hotplug));
+ if (IS_ERR(priv->pdev_hotplug)) {
+ err = PTR_ERR(priv->pdev_hotplug);
+ goto fail_platform_mux_register;
+ }
}
/* Set default registers. */
@@ -4875,24 +5266,26 @@ static int __init mlxplat_init(void)
}
/* Add LED driver. */
- mlxplat_led->regmap = priv->regmap;
- priv->pdev_led = platform_device_register_resndata(
- &mlxplat_dev->dev, "leds-mlxreg",
- PLATFORM_DEVID_NONE, NULL, 0,
- mlxplat_led, sizeof(*mlxplat_led));
- if (IS_ERR(priv->pdev_led)) {
- err = PTR_ERR(priv->pdev_led);
- goto fail_platform_hotplug_register;
+ if (mlxplat_led) {
+ mlxplat_led->regmap = priv->regmap;
+ priv->pdev_led =
+ platform_device_register_resndata(&mlxplat_dev->dev, "leds-mlxreg",
+ PLATFORM_DEVID_NONE, NULL, 0, mlxplat_led,
+ sizeof(*mlxplat_led));
+ if (IS_ERR(priv->pdev_led)) {
+ err = PTR_ERR(priv->pdev_led);
+ goto fail_platform_hotplug_register;
+ }
}
/* Add registers io access driver. */
if (mlxplat_regs_io) {
mlxplat_regs_io->regmap = priv->regmap;
- priv->pdev_io_regs = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlxreg-io",
- PLATFORM_DEVID_NONE, NULL, 0,
- mlxplat_regs_io,
- sizeof(*mlxplat_regs_io));
+ priv->pdev_io_regs = platform_device_register_resndata(&mlxplat_dev->dev,
+ "mlxreg-io",
+ PLATFORM_DEVID_NONE, NULL,
+ 0, mlxplat_regs_io,
+ sizeof(*mlxplat_regs_io));
if (IS_ERR(priv->pdev_io_regs)) {
err = PTR_ERR(priv->pdev_io_regs);
goto fail_platform_led_register;
@@ -4902,11 +5295,10 @@ static int __init mlxplat_init(void)
/* Add FAN driver. */
if (mlxplat_fan) {
mlxplat_fan->regmap = priv->regmap;
- priv->pdev_fan = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlxreg-fan",
- PLATFORM_DEVID_NONE, NULL, 0,
- mlxplat_fan,
- sizeof(*mlxplat_fan));
+ priv->pdev_fan = platform_device_register_resndata(&mlxplat_dev->dev, "mlxreg-fan",
+ PLATFORM_DEVID_NONE, NULL, 0,
+ mlxplat_fan,
+ sizeof(*mlxplat_fan));
if (IS_ERR(priv->pdev_fan)) {
err = PTR_ERR(priv->pdev_fan);
goto fail_platform_io_regs_register;
@@ -4920,11 +5312,10 @@ static int __init mlxplat_init(void)
for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) {
if (mlxplat_wd_data[j]) {
mlxplat_wd_data[j]->regmap = priv->regmap;
- priv->pdev_wd[j] = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlx-wdt",
- j, NULL, 0,
- mlxplat_wd_data[j],
- sizeof(*mlxplat_wd_data[j]));
+ priv->pdev_wd[j] =
+ platform_device_register_resndata(&mlxplat_dev->dev, "mlx-wdt", j,
+ NULL, 0, mlxplat_wd_data[j],
+ sizeof(*mlxplat_wd_data[j]));
if (IS_ERR(priv->pdev_wd[j])) {
err = PTR_ERR(priv->pdev_wd[j]);
goto fail_platform_wd_register;
@@ -4949,9 +5340,11 @@ fail_platform_io_regs_register:
if (mlxplat_regs_io)
platform_device_unregister(priv->pdev_io_regs);
fail_platform_led_register:
- platform_device_unregister(priv->pdev_led);
+ if (mlxplat_led)
+ platform_device_unregister(priv->pdev_led);
fail_platform_hotplug_register:
- platform_device_unregister(priv->pdev_hotplug);
+ if (mlxplat_hotplug)
+ platform_device_unregister(priv->pdev_hotplug);
fail_platform_mux_register:
while (--i >= 0)
platform_device_unregister(priv->pdev_mux[i]);
@@ -4974,8 +5367,10 @@ static void __exit mlxplat_exit(void)
platform_device_unregister(priv->pdev_fan);
if (priv->pdev_io_regs)
platform_device_unregister(priv->pdev_io_regs);
- platform_device_unregister(priv->pdev_led);
- platform_device_unregister(priv->pdev_hotplug);
+ if (priv->pdev_led)
+ platform_device_unregister(priv->pdev_led);
+ if (priv->pdev_hotplug)
+ platform_device_unregister(priv->pdev_hotplug);
for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
platform_device_unregister(priv->pdev_mux[i]);
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
new file mode 100644
index 000000000000..fb2e141f3eb8
--- /dev/null
+++ b/drivers/platform/x86/p2sb.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Primary to Sideband (P2SB) bridge access support
+ *
+ * Copyright (c) 2017, 2021-2022 Intel Corporation.
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Jonathan Yong <jonathan.yong@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/p2sb.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define P2SBC 0xe0
+#define P2SBC_HIDE BIT(8)
+
+static const struct x86_cpu_id p2sb_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, PCI_DEVFN(31, 1)),
+ {}
+};
+
+static int p2sb_get_devfn(unsigned int *devfn)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(p2sb_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ *devfn = (unsigned int)id->driver_data;
+ return 0;
+}
+
+static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+{
+ /* Copy resource from the first BAR of the device in question */
+ *mem = pdev->resource[0];
+ return 0;
+}
+
+static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_scan_single_device(bus, devfn);
+ if (!pdev)
+ return -ENODEV;
+
+ ret = p2sb_read_bar0(pdev, mem);
+
+ pci_stop_and_remove_bus_device(pdev);
+ return ret;
+}
+
+/**
+ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+ * @bus: PCI bus to communicate with
+ * @devfn: PCI slot and function to communicate with
+ * @mem: memory resource to be filled in
+ *
+ * The BIOS prevents the P2SB device from being enumerated by the PCI
+ * subsystem, so we need to unhide and hide it back to lookup the BAR.
+ *
+ * if @bus is NULL, the bus 0 in domain 0 will be used.
+ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+ *
+ * Caller must provide a valid pointer to @mem.
+ *
+ * Locking is handled by pci_rescan_remove_lock mutex.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
+ */
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ struct pci_dev *pdev_p2sb;
+ unsigned int devfn_p2sb;
+ u32 value = P2SBC_HIDE;
+ int ret;
+
+ /* Get devfn for P2SB device itself */
+ ret = p2sb_get_devfn(&devfn_p2sb);
+ if (ret)
+ return ret;
+
+ /* if @bus is NULL, use bus 0 in domain 0 */
+ bus = bus ?: pci_find_bus(0, 0);
+
+ /*
+ * Prevent concurrent PCI bus scan from seeing the P2SB device and
+ * removing via sysfs while it is temporarily exposed.
+ */
+ pci_lock_rescan_remove();
+
+ /* Unhide the P2SB device, if needed */
+ pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
+ if (value & P2SBC_HIDE)
+ pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
+
+ pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
+ if (devfn)
+ ret = p2sb_scan_and_read(bus, devfn, mem);
+ else
+ ret = p2sb_read_bar0(pdev_p2sb, mem);
+ pci_stop_and_remove_bus_device(pdev_p2sb);
+
+ /* Hide the P2SB device, if it was hidden */
+ if (value & P2SBC_HIDE)
+ pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE);
+
+ pci_unlock_rescan_remove();
+
+ if (ret)
+ return ret;
+
+ if (mem->flags == 0)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(p2sb_bar);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 615e39cbbbf1..d9a095d2c0eb 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -998,19 +998,23 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
pr_err("Couldn't retrieve BIOS data\n");
goto out_input;
}
- /* initialize backlight */
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
- pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
- &pcc_backlight_ops, &props);
- if (IS_ERR(pcc->backlight)) {
- result = PTR_ERR(pcc->backlight);
- goto out_input;
- }
- /* read the initial brightness setting from the hardware */
- pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ /* initialize backlight */
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
+
+ pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
+ &pcc_backlight_ops, &props);
+ if (IS_ERR(pcc->backlight)) {
+ result = PTR_ERR(pcc->backlight);
+ goto out_input;
+ }
+
+ /* read the initial brightness setting from the hardware */
+ pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ }
/* Reset initial sticky key mode since the hardware register state is not consistent */
acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, 0);
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index b8b1ed1406de..154317e9910d 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -389,21 +389,16 @@ static const struct dmi_system_id critclk_systems[] = {
},
},
{
- /* pmc_plt_clk0 - 3 are used for the 4 ethernet controllers */
- .ident = "Lex 3I380D",
+ /*
+ * Lex System / Lex Computech Co. makes a lot of Bay Trail
+ * based embedded boards which often come with multiple
+ * ethernet controllers using multiple pmc_plt_clks. See:
+ * https://www.lex.com.tw/products/embedded-ipc-board/
+ */
+ .ident = "Lex BayTrail",
.callback = dmi_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
- DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Lex 2I385SW",
- .callback = dmi_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
- DMI_MATCH(DMI_PRODUCT_NAME, "2I385SW"),
},
},
{
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 1e8063b7c169..5362f1a7b77c 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -61,36 +61,35 @@ static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
default:
return 0;
}
-
if (ret < 0)
- dev_err_probe(&pdev->dev, ret, "Error requesting irq at index %d: %d\n",
- inst->irq_idx, ret);
+ return dev_err_probe(&pdev->dev, ret, "Error requesting irq at index %d\n",
+ inst->irq_idx);
return ret;
}
static void smi_devs_unregister(struct smi *smi)
{
- while (smi->i2c_num > 0)
- i2c_unregister_device(smi->i2c_devs[--smi->i2c_num]);
+ while (smi->i2c_num--)
+ i2c_unregister_device(smi->i2c_devs[smi->i2c_num]);
- while (smi->spi_num > 0)
- spi_unregister_device(smi->spi_devs[--smi->spi_num]);
+ while (smi->spi_num--)
+ spi_unregister_device(smi->spi_devs[smi->spi_num]);
}
/**
* smi_spi_probe - Instantiate multiple SPI devices from inst array
* @pdev: Platform device
- * @adev: ACPI device
* @smi: Internal struct for Serial multi instantiate driver
* @inst_array: Array of instances to probe
*
* Returns the number of SPI devices instantiate, Zero if none is found or a negative error code.
*/
-static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev, struct smi *smi,
+static int smi_spi_probe(struct platform_device *pdev, struct smi *smi,
const struct smi_instance *inst_array)
{
struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct spi_controller *ctlr;
struct spi_device *spi_dev;
char name[50];
@@ -99,8 +98,8 @@ static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev,
ret = acpi_spi_count_resources(adev);
if (ret < 0)
return ret;
- else if (!ret)
- return -ENODEV;
+ if (!ret)
+ return -ENOENT;
count = ret;
@@ -112,9 +111,8 @@ static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev,
spi_dev = acpi_spi_device_alloc(NULL, adev, i);
if (IS_ERR(spi_dev)) {
- ret = PTR_ERR(spi_dev);
- dev_err_probe(dev, ret, "failed to allocate SPI device %s from ACPI: %d\n",
- dev_name(&adev->dev), ret);
+ ret = dev_err_probe(dev, PTR_ERR(spi_dev), "failed to allocate SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
goto error;
}
@@ -135,9 +133,8 @@ static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev,
ret = spi_add_device(spi_dev);
if (ret) {
- dev_err_probe(&ctlr->dev, ret,
- "failed to add SPI device %s from ACPI: %d\n",
- dev_name(&adev->dev), ret);
+ dev_err_probe(&ctlr->dev, ret, "failed to add SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
spi_dev_put(spi_dev);
goto error;
}
@@ -166,25 +163,25 @@ error:
/**
* smi_i2c_probe - Instantiate multiple I2C devices from inst array
* @pdev: Platform device
- * @adev: ACPI device
* @smi: Internal struct for Serial multi instantiate driver
* @inst_array: Array of instances to probe
*
* Returns the number of I2C devices instantiate, Zero if none is found or a negative error code.
*/
-static int smi_i2c_probe(struct platform_device *pdev, struct acpi_device *adev, struct smi *smi,
+static int smi_i2c_probe(struct platform_device *pdev, struct smi *smi,
const struct smi_instance *inst_array)
{
struct i2c_board_info board_info = {};
struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
char name[32];
int i, ret, count;
ret = i2c_acpi_client_count(adev);
if (ret < 0)
return ret;
- else if (!ret)
- return -ENODEV;
+ if (!ret)
+ return -ENOENT;
count = ret;
@@ -230,12 +227,8 @@ static int smi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct smi_node *node;
- struct acpi_device *adev;
struct smi *smi;
-
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENODEV;
+ int ret;
node = device_get_match_data(dev);
if (!node) {
@@ -251,19 +244,25 @@ static int smi_probe(struct platform_device *pdev)
switch (node->bus_type) {
case SMI_I2C:
- return smi_i2c_probe(pdev, adev, smi, node->instances);
+ return smi_i2c_probe(pdev, smi, node->instances);
case SMI_SPI:
- return smi_spi_probe(pdev, adev, smi, node->instances);
+ return smi_spi_probe(pdev, smi, node->instances);
case SMI_AUTO_DETECT:
- if (i2c_acpi_client_count(adev) > 0)
- return smi_i2c_probe(pdev, adev, smi, node->instances);
- else
- return smi_spi_probe(pdev, adev, smi, node->instances);
+ /*
+ * For backwards-compatibility with the existing nodes I2C
+ * is checked first and if such entries are found ONLY I2C
+ * devices are created. Since some existing nodes that were
+ * already handled by this driver could also contain unrelated
+ * SpiSerialBus nodes that were previously ignored, and this
+ * preserves that behavior.
+ */
+ ret = smi_i2c_probe(pdev, smi, node->instances);
+ if (ret != -ENOENT)
+ return ret;
+ return smi_spi_probe(pdev, smi, node->instances);
default:
return -EINVAL;
}
-
- return 0; /* never reached */
}
static int smi_remove(struct platform_device *pdev)
@@ -325,10 +324,11 @@ static const struct smi_node cs35l41_hda = {
static const struct acpi_device_id smi_acpi_ids[] = {
{ "BSG1160", (unsigned long)&bsg1160_data },
{ "BSG2150", (unsigned long)&bsg2150_data },
- { "INT3515", (unsigned long)&int3515_data },
{ "CSC3551", (unsigned long)&cs35l41_hda },
+ { "INT3515", (unsigned long)&int3515_data },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
+ { "CLSA0101", (unsigned long)&cs35l41_hda },
{ }
};
MODULE_DEVICE_TABLE(acpi, smi_acpi_ids);
diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
index b599cda5ba3c..ca3647b751d5 100644
--- a/drivers/platform/x86/simatic-ipc.c
+++ b/drivers/platform/x86/simatic-ipc.c
@@ -51,6 +51,7 @@ static int register_platform_devices(u32 station_id)
{
u8 ledmode = SIMATIC_IPC_DEVICE_NONE;
u8 wdtmode = SIMATIC_IPC_DEVICE_NONE;
+ char *pdevname = KBUILD_MODNAME "_leds";
int i;
platform_data.devmode = SIMATIC_IPC_DEVICE_NONE;
@@ -64,10 +65,12 @@ static int register_platform_devices(u32 station_id)
}
if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
+ if (ledmode == SIMATIC_IPC_DEVICE_127E)
+ pdevname = KBUILD_MODNAME "_leds_gpio";
platform_data.devmode = ledmode;
ipc_led_platform_device =
platform_device_register_data(NULL,
- KBUILD_MODNAME "_leds", PLATFORM_DEVID_NONE,
+ pdevname, PLATFORM_DEVID_NONE,
&platform_data,
sizeof(struct simatic_ipc_platform));
if (IS_ERR(ipc_led_platform_device))
@@ -101,44 +104,6 @@ static int register_platform_devices(u32 station_id)
return 0;
}
-/* FIXME: this should eventually be done with generic P2SB discovery code
- * the individual drivers for watchdogs and LEDs access memory that implements
- * GPIO, but pinctrl will not come up because of missing ACPI entries
- *
- * While there is no conflict a cleaner solution would be to somehow bring up
- * pinctrl even with these ACPI entries missing, and base the drivers on pinctrl.
- * After which the following function could be dropped, together with the code
- * poking the memory.
- */
-/*
- * Get membase address from PCI, used in leds and wdt module. Here we read
- * the bar0. The final address calculation is done in the appropriate modules
- */
-u32 simatic_ipc_get_membase0(unsigned int p2sb)
-{
- struct pci_bus *bus;
- u32 bar0 = 0;
- /*
- * The GPIO memory is in bar0 of the hidden P2SB device.
- * Unhide the device to have a quick look at it, before we hide it
- * again.
- * Also grab the pci rescan lock so that device does not get discovered
- * and remapped while it is visible.
- * This code is inspired by drivers/mfd/lpc_ich.c
- */
- bus = pci_find_bus(0, 0);
- pci_lock_rescan_remove();
- pci_bus_write_config_byte(bus, p2sb, 0xE1, 0x0);
- pci_bus_read_config_dword(bus, p2sb, PCI_BASE_ADDRESS_0, &bar0);
-
- bar0 &= ~0xf;
- pci_bus_write_config_byte(bus, p2sb, 0xE1, 0x1);
- pci_unlock_rescan_remove();
-
- return bar0;
-}
-EXPORT_SYMBOL(simatic_ipc_get_membase0);
-
static int __init simatic_ipc_init_module(void)
{
const struct dmi_system_id *match;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index d8d0c0bed5e9..07ef05f727a2 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4341,7 +4341,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
{
struct acpi_resource_irq *p = &resource->data.irq;
struct sony_pic_irq *interrupt = NULL;
- if (!p || !p->interrupt_count) {
+ if (!p->interrupt_count) {
/*
* IRQ descriptors may have no IRQ# bits set,
* particularly those those w/ _STA disabled
@@ -4374,11 +4374,6 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
struct acpi_resource_io *io = &resource->data.io;
struct sony_pic_ioport *ioport =
list_first_entry(&dev->ioports, struct sony_pic_ioport, list);
- if (!io) {
- dprintk("Blank IO resource\n");
- return AE_OK;
- }
-
if (!ioport->io1.minimum) {
memcpy(&ioport->io1, io, sizeof(*io));
dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum,
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
index 7299ad08c838..958df41ad509 100644
--- a/drivers/platform/x86/system76_acpi.c
+++ b/drivers/platform/x86/system76_acpi.c
@@ -339,7 +339,7 @@ static ssize_t kb_led_color_show(
struct led_classdev *led;
struct system76_data *data;
- led = (struct led_classdev *)dev->driver_data;
+ led = dev_get_drvdata(dev);
data = container_of(led, struct system76_data, kb_led);
return sysfs_emit(buf, "%06X\n", data->kb_color);
}
@@ -356,7 +356,7 @@ static ssize_t kb_led_color_store(
unsigned int val;
int ret;
- led = (struct led_classdev *)dev->driver_data;
+ led = dev_get_drvdata(dev);
data = container_of(led, struct system76_data, kb_led);
ret = kstrtouint(buf, 16, &val);
if (ret)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 502dcd1c33b7..22d4e8633e30 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -34,46 +34,51 @@
* thanks to Chris Wright <chrisw@osdl.org>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/sched/signal.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/nvram.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/sysfs.h>
+#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/fb.h>
-#include <linux/platform_device.h>
+#include <linux/freezer.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
#include <linux/input.h>
-#include <linux/leds.h>
-#include <linux/rfkill.h>
-#include <linux/dmi.h>
#include <linux/jiffies.h>
-#include <linux/workqueue.h>
-#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/leds.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvram.h>
#include <linux/pci.h>
-#include <linux/power_supply.h>
+#include <linux/platform_device.h>
#include <linux/platform_profile.h>
-#include <sound/core.h>
-#include <sound/control.h>
-#include <sound/initval.h>
+#include <linux/power_supply.h>
+#include <linux/proc_fs.h>
+#include <linux/rfkill.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
#include <acpi/battery.h>
#include <acpi/video.h>
+
#include <drm/drm_privacy_screen_driver.h>
+
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+
#include "dual_accel_detect.h"
/* ThinkPad CMOS commands */
@@ -159,6 +164,7 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */
TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
TP_HKEY_EV_PRIVACYGUARD_TOGGLE = 0x130f, /* Toggle priv.guard on/off */
+ TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
/* Reasons for waking up from S3/S4 */
TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
@@ -257,8 +263,6 @@ enum tpacpi_hkey_event_t {
#define TPACPI_DBG_BRGHT 0x0020
#define TPACPI_DBG_MIXER 0x0040
-#define onoff(status, bit) ((status) & (1 << (bit)) ? "on" : "off")
-#define enabled(status, bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
#define strlencmp(a, b) (strncmp((a), (b), strlen(b)))
@@ -1312,9 +1316,7 @@ static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, struct seq_file *
return status;
}
- seq_printf(m, "status:\t\t%s\n",
- (status == TPACPI_RFK_RADIO_ON) ?
- "enabled" : "disabled");
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status == TPACPI_RFK_RADIO_ON));
seq_printf(m, "commands:\tenable, disable\n");
}
@@ -1341,8 +1343,7 @@ static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
if (status != -1) {
tpacpi_disclose_usertask("procfs", "attempt to %s %s\n",
- (status == TPACPI_RFK_RADIO_ON) ?
- "enable" : "disable",
+ str_enable_disable(status == TPACPI_RFK_RADIO_ON),
tpacpi_rfkill_names[id]);
res = (tpacpi_rfkill_switches[id]->ops->set_status)(status);
tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[id]);
@@ -3499,8 +3500,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
radiosw_state = !!status;
- pr_info("radio switch found; radios are %s\n",
- enabled(status, 0));
+ pr_info("radio switch found; radios are %s\n", str_enabled_disabled(status & BIT(0)));
}
tabletsw_state = hotkey_init_tablet_mode();
@@ -3735,6 +3735,7 @@ static bool hotkey_notify_extended_hotkey(const u32 hkey)
switch (hkey) {
case TP_HKEY_EV_PRIVACYGUARD_TOGGLE:
+ case TP_HKEY_EV_AMT_TOGGLE:
tpacpi_driver_event(hkey);
return true;
}
@@ -4159,7 +4160,7 @@ static int hotkey_read(struct seq_file *m)
if (res)
return res;
- seq_printf(m, "status:\t\t%s\n", enabled(status, 0));
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status & BIT(0)));
if (hotkey_all_mask) {
seq_printf(m, "mask:\t\t0x%08x\n", hotkey_user_mask);
seq_printf(m, "commands:\tenable, disable, reset, <mask>\n");
@@ -4292,9 +4293,8 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
{
int status;
- vdbg_printk(TPACPI_DBG_RFKILL,
- "will attempt to %s bluetooth\n",
- (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s bluetooth\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_bluetoothemul) {
@@ -4659,9 +4659,8 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
{
int status;
- vdbg_printk(TPACPI_DBG_RFKILL,
- "will attempt to %s wwan\n",
- (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s wwan\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wwanemul) {
@@ -4837,9 +4836,8 @@ static int uwb_set_status(enum tpacpi_rfkill_state state)
{
int status;
- vdbg_printk(TPACPI_DBG_RFKILL,
- "will attempt to %s UWB\n",
- (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s UWB\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_uwbemul) {
@@ -5193,11 +5191,11 @@ static int video_read(struct seq_file *m)
return autosw;
seq_printf(m, "status:\t\tsupported\n");
- seq_printf(m, "lcd:\t\t%s\n", enabled(status, 0));
- seq_printf(m, "crt:\t\t%s\n", enabled(status, 1));
+ seq_printf(m, "lcd:\t\t%s\n", str_enabled_disabled(status & BIT(0)));
+ seq_printf(m, "crt:\t\t%s\n", str_enabled_disabled(status & BIT(1)));
if (video_supported == TPACPI_VIDEO_NEW)
- seq_printf(m, "dvi:\t\t%s\n", enabled(status, 3));
- seq_printf(m, "auto:\t\t%s\n", enabled(autosw, 0));
+ seq_printf(m, "dvi:\t\t%s\n", str_enabled_disabled(status & BIT(3)));
+ seq_printf(m, "auto:\t\t%s\n", str_enabled_disabled(autosw & BIT(0)));
seq_printf(m, "commands:\tlcd_enable, lcd_disable\n");
seq_printf(m, "commands:\tcrt_enable, crt_disable\n");
if (video_supported == TPACPI_VIDEO_NEW)
@@ -5628,7 +5626,7 @@ static int light_read(struct seq_file *m)
status = light_get_status();
if (status < 0)
return status;
- seq_printf(m, "status:\t\t%s\n", onoff(status, 0));
+ seq_printf(m, "status:\t\t%s\n", str_on_off(status & BIT(0)));
seq_printf(m, "commands:\ton, off\n");
}
@@ -6084,9 +6082,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
return 0;
}
-#define str_led_status(s) \
- ((s) == TPACPI_LED_OFF ? "off" : \
- ((s) == TPACPI_LED_ON ? "on" : "blinking"))
+#define str_led_status(s) ((s) >= TPACPI_LED_BLINK ? "blinking" : str_on_off(s))
static int led_read(struct seq_file *m)
{
@@ -6103,8 +6099,7 @@ static int led_read(struct seq_file *m)
status = led_get_status(i);
if (status < 0)
return -EIO;
- seq_printf(m, "%d:\t\t%s\n",
- i, str_led_status(status));
+ seq_printf(m, "%d:\t\t%s\n", i, str_led_status(status));
}
}
@@ -6797,10 +6792,7 @@ static int brightness_set(unsigned int value)
static int brightness_update_status(struct backlight_device *bd)
{
- unsigned int level =
- (bd->props.fb_blank == FB_BLANK_UNBLANK &&
- bd->props.power == FB_BLANK_UNBLANK) ?
- bd->props.brightness : 0;
+ int level = backlight_get_brightness(bd);
dbg_printk(TPACPI_DBG_BRGHT,
"backlight: attempt to set level to %d\n",
@@ -7830,8 +7822,7 @@ static int volume_read(struct seq_file *m)
seq_printf(m, "level:\t\t%d\n",
status & TP_EC_AUDIO_LVL_MSK);
- seq_printf(m, "mute:\t\t%s\n",
- onoff(status, TP_EC_AUDIO_MUTESW));
+ seq_printf(m, "mute:\t\t%s\n", str_on_off(status & BIT(TP_EC_AUDIO_MUTESW)));
if (volume_control_allowed) {
seq_printf(m, "commands:\tunmute, mute\n");
@@ -9060,7 +9051,7 @@ static int fan_read(struct seq_file *m)
seq_printf(m, "status:\t\t%s\n"
"level:\t\t%d\n",
- (status != 0) ? "enabled" : "disabled", status);
+ str_enabled_disabled(status), status);
break;
case TPACPI_FAN_RD_TPEC:
@@ -9069,8 +9060,7 @@ static int fan_read(struct seq_file *m)
if (rc)
return rc;
- seq_printf(m, "status:\t\t%s\n",
- (status != 0) ? "enabled" : "disabled");
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status));
rc = fan_get_speed(&speed);
if (rc < 0)
@@ -10268,6 +10258,7 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_CMD_FUNC_CAP 3 /* To get DYTC capabilities */
#define DYTC_FC_MMC 27 /* MMC Mode supported */
#define DYTC_FC_PSC 29 /* PSC Mode supported */
+#define DYTC_FC_AMT 31 /* AMT mode supported */
#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
#define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */
@@ -10280,6 +10271,10 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_FUNCTION_CQL 1 /* Function = 1, lap mode */
#define DYTC_FUNCTION_MMC 11 /* Function = 11, MMC mode */
#define DYTC_FUNCTION_PSC 13 /* Function = 13, PSC mode */
+#define DYTC_FUNCTION_AMT 15 /* Function = 15, AMT mode */
+
+#define DYTC_MODE_AMT_ENABLE 0x1 /* Enable AMT (in balanced mode) */
+#define DYTC_MODE_AMT_DISABLE 0xF /* Disable AMT (in other modes) */
#define DYTC_MODE_MMC_PERFORM 2 /* High power mode aka performance */
#define DYTC_MODE_MMC_LOWPOWER 3 /* Low power mode */
@@ -10300,6 +10295,8 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_DISABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 0)
#define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 1)
+static int dytc_control_amt(bool enable);
+static bool dytc_amt_active;
static enum platform_profile_option dytc_current_profile;
static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
@@ -10382,6 +10379,30 @@ static int dytc_profile_get(struct platform_profile_handler *pprof,
return 0;
}
+static int dytc_control_amt(bool enable)
+{
+ int dummy;
+ int err;
+ int cmd;
+
+ if (!(dytc_capabilities & BIT(DYTC_FC_AMT))) {
+ pr_warn("Attempting to toggle AMT on a system that doesn't advertise support\n");
+ return -ENODEV;
+ }
+
+ if (enable)
+ cmd = DYTC_SET_COMMAND(DYTC_FUNCTION_AMT, DYTC_MODE_AMT_ENABLE, enable);
+ else
+ cmd = DYTC_SET_COMMAND(DYTC_FUNCTION_AMT, DYTC_MODE_AMT_DISABLE, enable);
+
+ pr_debug("%sabling AMT (cmd 0x%x)", enable ? "en":"dis", cmd);
+ err = dytc_command(cmd, &dummy);
+ if (err)
+ return err;
+ dytc_amt_active = enable;
+ return 0;
+}
+
/*
* Helper function - check if we are in CQL mode and if we are
* - disable CQL,
@@ -10464,6 +10485,9 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
if (err)
goto unlock;
+ /* system supports AMT, activate it when on balanced */
+ if (dytc_capabilities & BIT(DYTC_FC_AMT))
+ dytc_control_amt(profile == PLATFORM_PROFILE_BALANCED);
}
/* Success - update current profile */
dytc_current_profile = profile;
@@ -10568,6 +10592,11 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
/* Ensure initial values are correct */
dytc_profile_refresh();
+ /* Set AMT correctly now we know current profile */
+ if ((dytc_capabilities & BIT(DYTC_FC_PSC)) &&
+ (dytc_capabilities & BIT(DYTC_FC_AMT)))
+ dytc_control_amt(dytc_current_profile == PLATFORM_PROFILE_BALANCED);
+
return 0;
}
@@ -11011,6 +11040,15 @@ static void tpacpi_driver_event(const unsigned int hkey_event)
if (changed)
drm_privacy_screen_call_notifier_chain(lcdshadow_dev);
}
+ if (hkey_event == TP_HKEY_EV_AMT_TOGGLE) {
+ /* If we're enabling AMT we need to force balanced mode */
+ if (!dytc_amt_active)
+ /* This will also set AMT mode enabled */
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
+ else
+ dytc_control_amt(!dytc_amt_active);
+ }
+
}
static void hotkey_driver_event(const unsigned int scancode)
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 2fa0f7d55259..8f7695624c8c 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -17,6 +17,7 @@
#include <asm/dma.h>
#include <asm/irq.h>
#include <linux/pci.h>
+#include <linux/libata.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -322,8 +323,8 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
* treat the compatibility IRQs as busy.
*/
if ((progif & 0x5) != 0x5)
- if (pci_get_legacy_ide_irq(pci, 0) == irq ||
- pci_get_legacy_ide_irq(pci, 1) == irq) {
+ if (ATA_PRIMARY_IRQ(pci) == irq ||
+ ATA_SECONDARY_IRQ(pci) == irq) {
pnp_dbg(&pnp->dev, " legacy IDE device %s "
"using irq %d\n", pci_name(pci), irq);
return 1;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 4b563db3ab3e..a8c46ba5878f 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -297,4 +297,10 @@ config NVMEM_REBOOT_MODE
then the bootloader can read it and take different
action according to the mode.
+config POWER_MLXBF
+ tristate "Mellanox BlueField power handling driver"
+ depends on (GPIO_MLXBF2 && ACPI)
+ help
+ This driver supports reset or low power mode handling for Mellanox BlueField.
+
endif
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index f606a2f60539..0a39424fc558 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -35,3 +35,4 @@ obj-$(CONFIG_REBOOT_MODE) += reboot-mode.o
obj-$(CONFIG_SYSCON_REBOOT_MODE) += syscon-reboot-mode.o
obj-$(CONFIG_POWER_RESET_SC27XX) += sc27xx-poweroff.o
obj-$(CONFIG_NVMEM_REBOOT_MODE) += nvmem-reboot-mode.o
+obj-$(CONFIG_POWER_MLXBF) += pwr-mlxbf.o
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 64def79d557a..741e44a017c3 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -17,10 +17,13 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
+#include <linux/reset-controller.h>
#include <soc/at91/at91sam9_ddrsdr.h>
#include <soc/at91/at91sam9_sdramc.h>
+#include <dt-bindings/reset/sama7g5-reset.h>
+
#define AT91_RSTC_CR 0x00 /* Reset Controller Control Register */
#define AT91_RSTC_PROCRST BIT(0) /* Processor Reset */
#define AT91_RSTC_PERRST BIT(2) /* Peripheral Reset */
@@ -39,6 +42,17 @@
#define AT91_RSTC_URSTIEN BIT(4) /* User Reset Interrupt Enable */
#define AT91_RSTC_ERSTL GENMASK(11, 8) /* External Reset Length */
+/**
+ * enum reset_type - reset types
+ * @RESET_TYPE_GENERAL: first power-up reset
+ * @RESET_TYPE_WAKEUP: return from backup mode
+ * @RESET_TYPE_WATCHDOG: watchdog fault
+ * @RESET_TYPE_SOFTWARE: processor reset required by software
+ * @RESET_TYPE_USER: NRST pin detected low
+ * @RESET_TYPE_CPU_FAIL: CPU clock failure detection
+ * @RESET_TYPE_XTAL_FAIL: 32KHz crystal failure dectection fault
+ * @RESET_TYPE_ULP2: ULP2 reset
+ */
enum reset_type {
RESET_TYPE_GENERAL = 0,
RESET_TYPE_WAKEUP = 1,
@@ -50,15 +64,48 @@ enum reset_type {
RESET_TYPE_ULP2 = 8,
};
+/**
+ * struct at91_reset - AT91 reset specific data structure
+ * @rstc_base: base address for system reset
+ * @ramc_base: array with base addresses of RAM controllers
+ * @dev_base: base address for devices reset
+ * @sclk: slow clock
+ * @data: platform specific reset data
+ * @rcdev: reset controller device
+ * @lock: lock for devices reset register access
+ * @nb: reset notifier block
+ * @args: SoC specific system reset arguments
+ * @ramc_lpr: SDRAM Controller Low Power Register
+ */
struct at91_reset {
void __iomem *rstc_base;
void __iomem *ramc_base[2];
+ void __iomem *dev_base;
struct clk *sclk;
+ const struct at91_reset_data *data;
+ struct reset_controller_dev rcdev;
+ spinlock_t lock;
struct notifier_block nb;
u32 args;
u32 ramc_lpr;
};
+#define to_at91_reset(r) container_of(r, struct at91_reset, rcdev)
+
+/**
+ * struct at91_reset_data - AT91 reset data
+ * @reset_args: SoC specific system reset arguments
+ * @n_device_reset: number of device resets
+ * @device_reset_min_id: min id for device reset
+ * @device_reset_max_id: max id for device reset
+ */
+struct at91_reset_data {
+ u32 reset_args;
+ u32 n_device_reset;
+ u8 device_reset_min_id;
+ u8 device_reset_max_id;
+};
+
/*
* unless the SDRAM is cleanly shutdown before we hit the
* reset register it can be left driving the data bus and
@@ -95,7 +142,7 @@ static int at91_reset(struct notifier_block *this, unsigned long mode,
"r" (reset->rstc_base),
"r" (1),
"r" cpu_to_le32(AT91_DDRSDRC_LPCB_POWER_DOWN),
- "r" (reset->args),
+ "r" (reset->data->reset_args),
"r" (reset->ramc_lpr)
: "r4");
@@ -153,34 +200,133 @@ static const struct of_device_id at91_ramc_of_match[] = {
{ /* sentinel */ }
};
+static const struct at91_reset_data sam9260 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST,
+};
+
+static const struct at91_reset_data samx7 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PROCRST,
+};
+
+static const struct at91_reset_data sama7g5 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PROCRST,
+ .n_device_reset = 3,
+ .device_reset_min_id = SAMA7G5_RESET_USB_PHY1,
+ .device_reset_max_id = SAMA7G5_RESET_USB_PHY3,
+};
+
static const struct of_device_id at91_reset_of_match[] = {
{
.compatible = "atmel,at91sam9260-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST),
+ .data = &sam9260,
},
{
.compatible = "atmel,at91sam9g45-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST)
+ .data = &sam9260,
},
{
.compatible = "atmel,sama5d3-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST)
+ .data = &sam9260,
},
{
.compatible = "atmel,samx7-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ .data = &samx7,
},
{
.compatible = "microchip,sam9x60-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ .data = &samx7,
+ },
+ {
+ .compatible = "microchip,sama7g5-rstc",
+ .data = &sama7g5,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_reset_of_match);
+static int at91_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&reset->lock, flags);
+ val = readl_relaxed(reset->dev_base);
+ if (assert)
+ val |= BIT(id);
+ else
+ val &= ~BIT(id);
+ writel_relaxed(val, reset->dev_base);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ return 0;
+}
+
+static int at91_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return at91_reset_update(rcdev, id, true);
+}
+
+static int at91_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return at91_reset_update(rcdev, id, false);
+}
+
+static int at91_reset_dev_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+ u32 val;
+
+ val = readl_relaxed(reset->dev_base);
+
+ return !!(val & BIT(id));
+}
+
+static const struct reset_control_ops at91_reset_ops = {
+ .assert = at91_reset_assert,
+ .deassert = at91_reset_deassert,
+ .status = at91_reset_dev_status,
+};
+
+static int at91_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+
+ if (!reset->data->n_device_reset ||
+ (reset_spec->args[0] < reset->data->device_reset_min_id ||
+ reset_spec->args[0] > reset->data->device_reset_max_id))
+ return -EINVAL;
+
+ return reset_spec->args[0];
+}
+
+static int at91_rcdev_init(struct at91_reset *reset,
+ struct platform_device *pdev)
+{
+ if (!reset->data->n_device_reset)
+ return 0;
+
+ reset->dev_base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 1,
+ NULL);
+ if (IS_ERR(reset->dev_base))
+ return -ENODEV;
+
+ spin_lock_init(&reset->lock);
+ reset->rcdev.ops = &at91_reset_ops;
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.of_node = pdev->dev.of_node;
+ reset->rcdev.nr_resets = reset->data->n_device_reset;
+ reset->rcdev.of_reset_n_cells = 1;
+ reset->rcdev.of_xlate = at91_reset_of_xlate;
+
+ return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+}
+
static int __init at91_reset_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -212,10 +358,12 @@ static int __init at91_reset_probe(struct platform_device *pdev)
}
}
- match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
+ reset->data = device_get_match_data(&pdev->dev);
+ if (!reset->data)
+ return -ENODEV;
+
reset->nb.notifier_call = at91_reset;
reset->nb.priority = 192;
- reset->args = (u32)match->data;
reset->sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(reset->sclk))
@@ -229,6 +377,10 @@ static int __init at91_reset_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, reset);
+ ret = at91_rcdev_init(reset, pdev);
+ if (ret)
+ goto disable_clk;
+
if (of_device_is_compatible(pdev->dev.of_node, "microchip,sam9x60-rstc")) {
u32 val = readl(reset->rstc_base + AT91_RSTC_MR);
@@ -237,14 +389,16 @@ static int __init at91_reset_probe(struct platform_device *pdev)
}
ret = register_restart_handler(&reset->nb);
- if (ret) {
- clk_disable_unprepare(reset->sclk);
- return ret;
- }
+ if (ret)
+ goto disable_clk;
at91_reset_status(pdev, reset->rstc_base);
return 0;
+
+disable_clk:
+ clk_disable_unprepare(reset->sclk);
+ return ret;
}
static int __exit at91_reset_remove(struct platform_device *pdev)
diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c
new file mode 100644
index 000000000000..12dedf841a44
--- /dev/null
+++ b/drivers/power/reset/pwr-mlxbf.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only or BSD-3-Clause
+
+/*
+ * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reboot.h>
+#include <linux/types.h>
+
+struct pwr_mlxbf {
+ struct work_struct send_work;
+ const char *hid;
+};
+
+static void pwr_mlxbf_send_work(struct work_struct *work)
+{
+ acpi_bus_generate_netlink_event("button/power.*", "Power Button", 0x80, 1);
+}
+
+static irqreturn_t pwr_mlxbf_irq(int irq, void *ptr)
+{
+ const char *rst_pwr_hid = "MLNXBF24";
+ const char *low_pwr_hid = "MLNXBF29";
+ struct pwr_mlxbf *priv = ptr;
+
+ if (!strncmp(priv->hid, rst_pwr_hid, 8))
+ emergency_restart();
+
+ if (!strncmp(priv->hid, low_pwr_hid, 8))
+ schedule_work(&priv->send_work);
+
+ return IRQ_HANDLED;
+}
+
+static int pwr_mlxbf_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev;
+ struct pwr_mlxbf *priv;
+ const char *hid;
+ int irq, err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENXIO;
+
+ hid = acpi_device_hid(adev);
+ priv->hid = hid;
+
+ irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Error getting %s irq.\n", priv->hid);
+
+ err = devm_work_autocancel(dev, &priv->send_work, pwr_mlxbf_send_work);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev, irq, pwr_mlxbf_irq, 0, hid, priv);
+ if (err)
+ dev_err(dev, "Failed request of %s irq\n", priv->hid);
+
+ return err;
+}
+
+static const struct acpi_device_id __maybe_unused pwr_mlxbf_acpi_match[] = {
+ { "MLNXBF24", 0 },
+ { "MLNXBF29", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, pwr_mlxbf_acpi_match);
+
+static struct platform_driver pwr_mlxbf_driver = {
+ .driver = {
+ .name = "pwr_mlxbf",
+ .acpi_match_table = pwr_mlxbf_acpi_match,
+ },
+ .probe = pwr_mlxbf_probe,
+};
+
+module_platform_driver(pwr_mlxbf_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField power driver");
+MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/power/supply/ab8500-chargalg.h b/drivers/power/supply/ab8500-chargalg.h
index f47a0061c36a..8534d067ba95 100644
--- a/drivers/power/supply/ab8500-chargalg.h
+++ b/drivers/power/supply/ab8500-chargalg.h
@@ -34,7 +34,6 @@ struct ux500_charger_ops {
* @max_out_volt_uv maximum output charger voltage in uV
* @max_out_curr_ua maximum output charger current in uA
* @enabled indicates if this charger is used or not
- * @external external charger unit (pm2xxx)
*/
struct ux500_charger {
struct power_supply *psy;
@@ -43,9 +42,6 @@ struct ux500_charger {
int max_out_curr_ua;
int wdt_refresh;
bool enabled;
- bool external;
};
-extern struct blocking_notifier_head charger_notifier_list;
-
#endif /* _AB8500_CHARGALG_H_ */
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index b7e842dff567..863fabe05bdc 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -697,7 +697,6 @@ static void ab8500_btemp_unbind(struct device *dev, struct device *master,
/* Delete the work queue */
destroy_workqueue(di->btemp_wq);
- flush_scheduled_work();
}
static const struct component_ops ab8500_btemp_component_ops = {
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 431bbc352d1b..ae4be553f424 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -246,9 +246,6 @@ struct ab8500_chargalg {
struct kobject chargalg_kobject;
};
-/*External charger prepare notifier*/
-BLOCKING_NOTIFIER_HEAD(charger_notifier_list);
-
/* Main battery properties */
static enum power_supply_property ab8500_chargalg_props[] = {
POWER_SUPPLY_PROP_STATUS,
@@ -343,8 +340,7 @@ static int ab8500_chargalg_check_charger_enable(struct ab8500_chargalg *di)
return di->usb_chg->ops.check_enable(di->usb_chg,
bi->constant_charge_voltage_max_uv,
bi->constant_charge_current_max_ua);
- } else if ((di->chg_info.charger_type & AC_CHG) &&
- !(di->ac_chg->external)) {
+ } else if (di->chg_info.charger_type & AC_CHG) {
return di->ac_chg->ops.check_enable(di->ac_chg,
bi->constant_charge_voltage_max_uv,
bi->constant_charge_current_max_ua);
@@ -473,15 +469,6 @@ static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
/* Check if charger exists and kick watchdog if charging */
if (di->ac_chg && di->ac_chg->ops.kick_wd &&
di->chg_info.online_chg & AC_CHG) {
- /*
- * If AB charger watchdog expired, pm2xxx charging
- * gets disabled. To be safe, kick both AB charger watchdog
- * and pm2xxx watchdog.
- */
- if (di->ac_chg->external &&
- di->usb_chg && di->usb_chg->ops.kick_wd)
- di->usb_chg->ops.kick_wd(di->usb_chg);
-
return di->ac_chg->ops.kick_wd(di->ac_chg);
} else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
di->chg_info.online_chg & USB_CHG)
@@ -517,14 +504,6 @@ static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
di->chg_info.ac_iset_ua = iset_ua;
di->chg_info.ac_vset_uv = vset_uv;
- /* Enable external charger */
- if (enable && di->ac_chg->external &&
- !ab8500_chargalg_ex_ac_enable_toggle) {
- blocking_notifier_call_chain(&charger_notifier_list,
- 0, di->dev);
- ab8500_chargalg_ex_ac_enable_toggle++;
- }
-
return di->ac_chg->ops.enable(di->ac_chg, enable, vset_uv, iset_ua);
}
@@ -1217,6 +1196,34 @@ static void ab8500_chargalg_external_power_changed(struct power_supply *psy)
}
/**
+ * ab8500_chargalg_time_to_restart() - time to restart CC/CV charging?
+ * @di: charging algorithm state
+ *
+ * This checks if the voltage or capacity of the battery has fallen so
+ * low that we need to restart the CC/CV charge cycle.
+ */
+static bool ab8500_chargalg_time_to_restart(struct ab8500_chargalg *di)
+{
+ struct power_supply_battery_info *bi = di->bm->bi;
+
+ /* Sanity check - these need to have some reasonable values */
+ if (!di->batt_data.volt_uv || !di->batt_data.percent)
+ return false;
+
+ /* Some batteries tell us at which voltage we should restart charging */
+ if (bi->charge_restart_voltage_uv > 0) {
+ if (di->batt_data.volt_uv <= bi->charge_restart_voltage_uv)
+ return true;
+ /* Else we restart as we reach a certain capacity */
+ } else {
+ if (di->batt_data.percent <= AB8500_RECHARGE_CAP)
+ return true;
+ }
+
+ return false;
+}
+
+/**
* ab8500_chargalg_algorithm() - Main function for the algorithm
* @di: pointer to the ab8500_chargalg structure
*
@@ -1459,7 +1466,7 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
fallthrough;
case STATE_WAIT_FOR_RECHARGE:
- if (di->batt_data.percent <= AB8500_RECHARGE_CAP)
+ if (ab8500_chargalg_time_to_restart(di))
ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
break;
@@ -1486,6 +1493,14 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
ab8500_chargalg_stop_maintenance_timer(di);
ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
}
+ /*
+ * This happens if the voltage drops too quickly during
+ * maintenance charging, especially in older batteries.
+ */
+ if (ab8500_chargalg_time_to_restart(di)) {
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ dev_info(di->dev, "restarted charging from maintenance state A - battery getting old?\n");
+ }
break;
case STATE_MAINTENANCE_B_INIT:
@@ -1510,6 +1525,14 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
ab8500_chargalg_stop_maintenance_timer(di);
ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
}
+ /*
+ * This happens if the voltage drops too quickly during
+ * maintenance charging, especially in older batteries.
+ */
+ if (ab8500_chargalg_time_to_restart(di)) {
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ dev_info(di->dev, "restarted charging from maintenance state B - battery getting old?\n");
+ }
break;
case STATE_TEMP_LOWHIGH_INIT:
@@ -1746,7 +1769,6 @@ static void ab8500_chargalg_unbind(struct device *dev, struct device *master,
/* Delete the work queue */
destroy_workqueue(di->chargalg_wq);
- flush_scheduled_work();
}
static const struct component_ops ab8500_chargalg_component_ops = {
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index d04d087caa50..c19c50442761 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -1716,29 +1716,6 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
return ret;
}
-static int ab8500_external_charger_prepare(struct notifier_block *charger_nb,
- unsigned long event, void *data)
-{
- int ret;
- struct device *dev = data;
- /*Toggle External charger control pin*/
- ret = abx500_set_register_interruptible(dev, AB8500_SYS_CTRL1_BLOCK,
- AB8500_SYS_CHARGER_CONTROL_REG,
- EXTERNAL_CHARGER_DISABLE_REG_VAL);
- if (ret < 0) {
- dev_err(dev, "write reg failed %d\n", ret);
- goto out;
- }
- ret = abx500_set_register_interruptible(dev, AB8500_SYS_CTRL1_BLOCK,
- AB8500_SYS_CHARGER_CONTROL_REG,
- EXTERNAL_CHARGER_ENABLE_REG_VAL);
- if (ret < 0)
- dev_err(dev, "Write reg failed %d\n", ret);
-
-out:
- return ret;
-}
-
/**
* ab8500_charger_usb_check_enable() - enable usb charging
* @charger: pointer to the ux500_charger structure
@@ -3316,10 +3293,6 @@ static int __maybe_unused ab8500_charger_suspend(struct device *dev)
return 0;
}
-static struct notifier_block charger_nb = {
- .notifier_call = ab8500_external_charger_prepare,
-};
-
static char *supply_interface[] = {
"ab8500_chargalg",
"ab8500_fg",
@@ -3378,6 +3351,7 @@ static int ab8500_charger_bind(struct device *dev)
ret = component_bind_all(dev, di);
if (ret) {
dev_err(dev, "can't bind component devices\n");
+ destroy_workqueue(di->charger_wq);
return ret;
}
@@ -3404,8 +3378,6 @@ static void ab8500_charger_unbind(struct device *dev)
/* Delete the work queue */
destroy_workqueue(di->charger_wq);
- flush_scheduled_work();
-
/* Unbind fg, btemp, algorithm */
component_unbind_all(dev, di);
}
@@ -3540,7 +3512,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
*/
if (!is_ab8505(di->parent))
di->ac_chg.enabled = true;
- di->ac_chg.external = false;
/* USB supply */
/* ux500_charger sub-class */
@@ -3553,7 +3524,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_chg.max_out_curr_ua =
ab8500_charge_output_curr_map[ARRAY_SIZE(ab8500_charge_output_curr_map) - 1];
di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
- di->usb_chg.external = false;
di->usb_state.usb_current_ua = -1;
mutex_init(&di->charger_attached_mutex);
@@ -3677,17 +3647,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
goto remove_ab8500_bm;
}
- /* Notifier for external charger enabling */
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_register(
- &charger_notifier_list, &charger_nb);
-
-
di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(di->usb_phy)) {
dev_err(dev, "failed to get usb transceiver\n");
ret = -EINVAL;
- goto out_charger_notifier;
+ goto remove_ab8500_bm;
}
di->nb.notifier_call = ab8500_charger_usb_notifier_call;
ret = usb_register_notifier(di->usb_phy, &di->nb);
@@ -3696,7 +3660,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
goto put_usb_phy;
}
-
ret = component_master_add_with_match(&pdev->dev,
&ab8500_charger_comp_ops,
match);
@@ -3711,10 +3674,6 @@ free_notifier:
usb_unregister_notifier(di->usb_phy, &di->nb);
put_usb_phy:
usb_put_phy(di->usb_phy);
-out_charger_notifier:
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_unregister(
- &charger_notifier_list, &charger_nb);
remove_ab8500_bm:
ab8500_bm_of_remove(di->usb_chg.psy, di->bm);
return ret;
@@ -3729,9 +3688,6 @@ static int ab8500_charger_remove(struct platform_device *pdev)
usb_unregister_notifier(di->usb_phy, &di->nb);
ab8500_bm_of_remove(di->usb_chg.psy, di->bm);
usb_put_phy(di->usb_phy);
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_unregister(
- &charger_notifier_list, &charger_nb);
return 0;
}
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 4339fa9ff009..c6c9804280db 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -412,7 +412,7 @@ static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
* ab8500_fg_clear_cap_samples() - Clear average filter
* @di: pointer to the ab8500_fg structure
*
- * The capacity filter is is reset to zero.
+ * The capacity filter is reset to zero.
*/
static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
{
@@ -3234,7 +3234,6 @@ static int ab8500_fg_remove(struct platform_device *pdev)
struct ab8500_fg *di = platform_get_drvdata(pdev);
destroy_workqueue(di->fg_wq);
- flush_scheduled_work();
component_del(&pdev->dev, &ab8500_fg_component_ops);
list_del(&di->node);
ab8500_fg_sysfs_exit(di);
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index 96cb3290bcaa..ecba9ab86faf 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -287,7 +287,7 @@ static int bq24257_set_input_current_limit(struct bq24257_device *bq,
{
/*
* Address the case where the user manually sets an input current limit
- * while the charger auto-detection mechanism is is active. In this
+ * while the charger auto-detection mechanism is active. In this
* case we want to abort and go straight to the user-specified value.
*/
if (bq->iilimit_autoset_enable)
diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c
index 9fe6d826148d..1379afd9698d 100644
--- a/drivers/power/supply/cros_peripheral_charger.c
+++ b/drivers/power/supply/cros_peripheral_charger.c
@@ -63,7 +63,7 @@ static int cros_pchg_ec_command(const struct charger_data *charger,
struct cros_ec_command *msg;
int ret;
- msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ msg = kzalloc(struct_size(msg, data, max(outsize, insize)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c
index bf1754355c9f..a58d713d75ce 100644
--- a/drivers/power/supply/goldfish_battery.c
+++ b/drivers/power/supply/goldfish_battery.c
@@ -221,10 +221,8 @@ static int goldfish_battery_probe(struct platform_device *pdev)
}
data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
+ if (data->irq < 0)
return -ENODEV;
- }
ret = devm_request_irq(&pdev->dev, data->irq,
goldfish_battery_interrupt,
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 397e5a03b7d9..56c57529c228 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -376,7 +376,7 @@ static int lp8788_update_charger_params(struct platform_device *pdev,
return 0;
}
- /* settting charging parameters */
+ /* setting charging parameters */
for (i = 0; i < pdata->num_chg_params; i++) {
param = pdata->chg_params + i;
diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
index 8b6c8cfa7503..4fed74511931 100644
--- a/drivers/power/supply/max77976_charger.c
+++ b/drivers/power/supply/max77976_charger.c
@@ -3,7 +3,7 @@
* max77976_charger.c - Driver for the Maxim MAX77976 battery charger
*
* Copyright (C) 2021 Luca Ceresoli
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/i2c.h>
@@ -504,6 +504,6 @@ static struct i2c_driver max77976_driver = {
};
module_i2c_driver(max77976_driver);
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_DESCRIPTION("Maxim MAX77976 charger driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index e0476ec06601..a5da20ffd685 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -635,6 +635,7 @@ static int olpc_battery_probe(struct platform_device *pdev)
struct power_supply_config bat_psy_cfg = {};
struct power_supply_config ac_psy_cfg = {};
struct olpc_battery_data *data;
+ struct device_node *np;
uint8_t status;
uint8_t ecver;
int ret;
@@ -649,7 +650,9 @@ static int olpc_battery_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec")) {
+ np = of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec");
+ if (np) {
+ of_node_put(np);
/* XO 1.75 */
data->new_proto = true;
data->little_endian = true;
diff --git a/drivers/power/supply/pm2301_charger.h b/drivers/power/supply/pm2301_charger.h
deleted file mode 100644
index 74397e377982..000000000000
--- a/drivers/power/supply/pm2301_charger.h
+++ /dev/null
@@ -1,492 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2012
- *
- * PM2301 power supply interface
- */
-
-#ifndef PM2301_CHARGER_H
-#define PM2301_CHARGER_H
-
-/* Watchdog timeout constant */
-#define WD_TIMER 0x30 /* 4min */
-#define WD_KICK_INTERVAL (30 * HZ)
-
-#define PM2XXX_NUM_INT_REG 0x6
-
-/* Constant voltage/current */
-#define PM2XXX_CONST_CURR 0x0
-#define PM2XXX_CONST_VOLT 0x1
-
-/* Lowest charger voltage is 3.39V -> 0x4E */
-#define LOW_VOLT_REG 0x4E
-
-#define PM2XXX_BATT_CTRL_REG1 0x00
-#define PM2XXX_BATT_CTRL_REG2 0x01
-#define PM2XXX_BATT_CTRL_REG3 0x02
-#define PM2XXX_BATT_CTRL_REG4 0x03
-#define PM2XXX_BATT_CTRL_REG5 0x04
-#define PM2XXX_BATT_CTRL_REG6 0x05
-#define PM2XXX_BATT_CTRL_REG7 0x06
-#define PM2XXX_BATT_CTRL_REG8 0x07
-#define PM2XXX_NTC_CTRL_REG1 0x08
-#define PM2XXX_NTC_CTRL_REG2 0x09
-#define PM2XXX_BATT_CTRL_REG9 0x0A
-#define PM2XXX_BATT_STAT_REG1 0x0B
-#define PM2XXX_INP_VOLT_VPWR2 0x11
-#define PM2XXX_INP_DROP_VPWR2 0x13
-#define PM2XXX_INP_VOLT_VPWR1 0x15
-#define PM2XXX_INP_DROP_VPWR1 0x17
-#define PM2XXX_INP_MODE_VPWR 0x18
-#define PM2XXX_BATT_WD_KICK 0x70
-#define PM2XXX_DEV_VER_STAT 0x0C
-#define PM2XXX_THERM_WARN_CTRL_REG 0x20
-#define PM2XXX_BATT_DISC_REG 0x21
-#define PM2XXX_BATT_LOW_LEV_COMP_REG 0x22
-#define PM2XXX_BATT_LOW_LEV_VAL_REG 0x23
-#define PM2XXX_I2C_PAD_CTRL_REG 0x24
-#define PM2XXX_SW_CTRL_REG 0x26
-#define PM2XXX_LED_CTRL_REG 0x28
-
-#define PM2XXX_REG_INT1 0x40
-#define PM2XXX_MASK_REG_INT1 0x50
-#define PM2XXX_SRCE_REG_INT1 0x60
-#define PM2XXX_REG_INT2 0x41
-#define PM2XXX_MASK_REG_INT2 0x51
-#define PM2XXX_SRCE_REG_INT2 0x61
-#define PM2XXX_REG_INT3 0x42
-#define PM2XXX_MASK_REG_INT3 0x52
-#define PM2XXX_SRCE_REG_INT3 0x62
-#define PM2XXX_REG_INT4 0x43
-#define PM2XXX_MASK_REG_INT4 0x53
-#define PM2XXX_SRCE_REG_INT4 0x63
-#define PM2XXX_REG_INT5 0x44
-#define PM2XXX_MASK_REG_INT5 0x54
-#define PM2XXX_SRCE_REG_INT5 0x64
-#define PM2XXX_REG_INT6 0x45
-#define PM2XXX_MASK_REG_INT6 0x55
-#define PM2XXX_SRCE_REG_INT6 0x65
-
-#define VPWR_OVV 0x0
-#define VSYSTEM_OVV 0x1
-
-/* control Reg 1 */
-#define PM2XXX_CH_RESUME_EN 0x1
-#define PM2XXX_CH_RESUME_DIS 0x0
-
-/* control Reg 2 */
-#define PM2XXX_CH_AUTO_RESUME_EN 0X2
-#define PM2XXX_CH_AUTO_RESUME_DIS 0X0
-#define PM2XXX_CHARGER_ENA 0x4
-#define PM2XXX_CHARGER_DIS 0x0
-
-/* control Reg 3 */
-#define PM2XXX_CH_WD_CC_PHASE_OFF 0x0
-#define PM2XXX_CH_WD_CC_PHASE_5MIN 0x1
-#define PM2XXX_CH_WD_CC_PHASE_10MIN 0x2
-#define PM2XXX_CH_WD_CC_PHASE_30MIN 0x3
-#define PM2XXX_CH_WD_CC_PHASE_60MIN 0x4
-#define PM2XXX_CH_WD_CC_PHASE_120MIN 0x5
-#define PM2XXX_CH_WD_CC_PHASE_240MIN 0x6
-#define PM2XXX_CH_WD_CC_PHASE_360MIN 0x7
-
-#define PM2XXX_CH_WD_CV_PHASE_OFF (0x0<<3)
-#define PM2XXX_CH_WD_CV_PHASE_5MIN (0x1<<3)
-#define PM2XXX_CH_WD_CV_PHASE_10MIN (0x2<<3)
-#define PM2XXX_CH_WD_CV_PHASE_30MIN (0x3<<3)
-#define PM2XXX_CH_WD_CV_PHASE_60MIN (0x4<<3)
-#define PM2XXX_CH_WD_CV_PHASE_120MIN (0x5<<3)
-#define PM2XXX_CH_WD_CV_PHASE_240MIN (0x6<<3)
-#define PM2XXX_CH_WD_CV_PHASE_360MIN (0x7<<3)
-
-/* control Reg 4 */
-#define PM2XXX_CH_WD_PRECH_PHASE_OFF 0x0
-#define PM2XXX_CH_WD_PRECH_PHASE_1MIN 0x1
-#define PM2XXX_CH_WD_PRECH_PHASE_5MIN 0x2
-#define PM2XXX_CH_WD_PRECH_PHASE_10MIN 0x3
-#define PM2XXX_CH_WD_PRECH_PHASE_30MIN 0x4
-#define PM2XXX_CH_WD_PRECH_PHASE_60MIN 0x5
-#define PM2XXX_CH_WD_PRECH_PHASE_120MIN 0x6
-#define PM2XXX_CH_WD_PRECH_PHASE_240MIN 0x7
-
-/* control Reg 5 */
-#define PM2XXX_CH_WD_AUTO_TIMEOUT_NONE 0x0
-#define PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN 0x1
-
-/* control Reg 6 */
-#define PM2XXX_DIR_CH_CC_CURRENT_MASK 0x0F
-#define PM2XXX_DIR_CH_CC_CURRENT_200MA 0x0
-#define PM2XXX_DIR_CH_CC_CURRENT_400MA 0x2
-#define PM2XXX_DIR_CH_CC_CURRENT_600MA 0x3
-#define PM2XXX_DIR_CH_CC_CURRENT_800MA 0x4
-#define PM2XXX_DIR_CH_CC_CURRENT_1000MA 0x5
-#define PM2XXX_DIR_CH_CC_CURRENT_1200MA 0x6
-#define PM2XXX_DIR_CH_CC_CURRENT_1400MA 0x7
-#define PM2XXX_DIR_CH_CC_CURRENT_1600MA 0x8
-#define PM2XXX_DIR_CH_CC_CURRENT_1800MA 0x9
-#define PM2XXX_DIR_CH_CC_CURRENT_2000MA 0xA
-#define PM2XXX_DIR_CH_CC_CURRENT_2200MA 0xB
-#define PM2XXX_DIR_CH_CC_CURRENT_2400MA 0xC
-#define PM2XXX_DIR_CH_CC_CURRENT_2600MA 0xD
-#define PM2XXX_DIR_CH_CC_CURRENT_2800MA 0xE
-#define PM2XXX_DIR_CH_CC_CURRENT_3000MA 0xF
-
-#define PM2XXX_CH_PRECH_CURRENT_MASK 0x30
-#define PM2XXX_CH_PRECH_CURRENT_25MA (0x0<<4)
-#define PM2XXX_CH_PRECH_CURRENT_50MA (0x1<<4)
-#define PM2XXX_CH_PRECH_CURRENT_75MA (0x2<<4)
-#define PM2XXX_CH_PRECH_CURRENT_100MA (0x3<<4)
-
-#define PM2XXX_CH_EOC_CURRENT_MASK 0xC0
-#define PM2XXX_CH_EOC_CURRENT_100MA (0x0<<6)
-#define PM2XXX_CH_EOC_CURRENT_150MA (0x1<<6)
-#define PM2XXX_CH_EOC_CURRENT_300MA (0x2<<6)
-#define PM2XXX_CH_EOC_CURRENT_400MA (0x3<<6)
-
-/* control Reg 7 */
-#define PM2XXX_CH_PRECH_VOL_2_5 0x0
-#define PM2XXX_CH_PRECH_VOL_2_7 0x1
-#define PM2XXX_CH_PRECH_VOL_2_9 0x2
-#define PM2XXX_CH_PRECH_VOL_3_1 0x3
-
-#define PM2XXX_CH_VRESUME_VOL_3_2 (0x0<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_4 (0x1<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_6 (0x2<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_8 (0x3<<2)
-
-/* control Reg 8 */
-#define PM2XXX_CH_VOLT_MASK 0x3F
-#define PM2XXX_CH_VOLT_3_5 0x0
-#define PM2XXX_CH_VOLT_3_5225 0x1
-#define PM2XXX_CH_VOLT_3_6 0x4
-#define PM2XXX_CH_VOLT_3_7 0x8
-#define PM2XXX_CH_VOLT_4_0 0x14
-#define PM2XXX_CH_VOLT_4_175 0x1B
-#define PM2XXX_CH_VOLT_4_2 0x1C
-#define PM2XXX_CH_VOLT_4_275 0x1F
-#define PM2XXX_CH_VOLT_4_3 0x20
-
-/*NTC control register 1*/
-#define PM2XXX_BTEMP_HIGH_TH_45 0x0
-#define PM2XXX_BTEMP_HIGH_TH_50 0x1
-#define PM2XXX_BTEMP_HIGH_TH_55 0x2
-#define PM2XXX_BTEMP_HIGH_TH_60 0x3
-#define PM2XXX_BTEMP_HIGH_TH_65 0x4
-
-#define PM2XXX_BTEMP_LOW_TH_N5 (0x0<<3)
-#define PM2XXX_BTEMP_LOW_TH_0 (0x1<<3)
-#define PM2XXX_BTEMP_LOW_TH_5 (0x2<<3)
-#define PM2XXX_BTEMP_LOW_TH_10 (0x3<<3)
-
-/*NTC control register 2*/
-#define PM2XXX_NTC_BETA_COEFF_3477 0x0
-#define PM2XXX_NTC_BETA_COEFF_3964 0x1
-
-#define PM2XXX_NTC_RES_10K (0x0<<2)
-#define PM2XXX_NTC_RES_47K (0x1<<2)
-#define PM2XXX_NTC_RES_100K (0x2<<2)
-#define PM2XXX_NTC_RES_NO_NTC (0x3<<2)
-
-/* control Reg 9 */
-#define PM2XXX_CH_CC_MODEDROP_EN 1
-#define PM2XXX_CH_CC_MODEDROP_DIS 0
-
-#define PM2XXX_CH_CC_REDUCED_CURRENT_100MA (0x0<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_200MA (0x1<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_400MA (0x2<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_IDENT (0x3<<1)
-
-#define PM2XXX_CHARCHING_INFO_DIS (0<<3)
-#define PM2XXX_CHARCHING_INFO_EN (1<<3)
-
-#define PM2XXX_CH_150MV_DROP_300MV (0<<4)
-#define PM2XXX_CH_150MV_DROP_150MV (1<<4)
-
-
-/* charger status register */
-#define PM2XXX_CHG_STATUS_OFF 0x0
-#define PM2XXX_CHG_STATUS_ON 0x1
-#define PM2XXX_CHG_STATUS_FULL 0x2
-#define PM2XXX_CHG_STATUS_ERR 0x3
-#define PM2XXX_CHG_STATUS_WAIT 0x4
-#define PM2XXX_CHG_STATUS_NOBAT 0x5
-
-/* Input charger voltage VPWR2 */
-#define PM2XXX_VPWR2_OVV_6_0 0x0
-#define PM2XXX_VPWR2_OVV_6_3 0x1
-#define PM2XXX_VPWR2_OVV_10 0x2
-#define PM2XXX_VPWR2_OVV_NONE 0x3
-
-/* Input charger drop VPWR2 */
-#define PM2XXX_VPWR2_HW_OPT_EN (0x1<<4)
-#define PM2XXX_VPWR2_HW_OPT_DIS (0x0<<4)
-
-#define PM2XXX_VPWR2_VALID_EN (0x1<<3)
-#define PM2XXX_VPWR2_VALID_DIS (0x0<<3)
-
-#define PM2XXX_VPWR2_DROP_EN (0x1<<2)
-#define PM2XXX_VPWR2_DROP_DIS (0x0<<2)
-
-/* Input charger voltage VPWR1 */
-#define PM2XXX_VPWR1_OVV_6_0 0x0
-#define PM2XXX_VPWR1_OVV_6_3 0x1
-#define PM2XXX_VPWR1_OVV_10 0x2
-#define PM2XXX_VPWR1_OVV_NONE 0x3
-
-/* Input charger drop VPWR1 */
-#define PM2XXX_VPWR1_HW_OPT_EN (0x1<<4)
-#define PM2XXX_VPWR1_HW_OPT_DIS (0x0<<4)
-
-#define PM2XXX_VPWR1_VALID_EN (0x1<<3)
-#define PM2XXX_VPWR1_VALID_DIS (0x0<<3)
-
-#define PM2XXX_VPWR1_DROP_EN (0x1<<2)
-#define PM2XXX_VPWR1_DROP_DIS (0x0<<2)
-
-/* Battery low level comparator control register */
-#define PM2XXX_VBAT_LOW_MONITORING_DIS 0x0
-#define PM2XXX_VBAT_LOW_MONITORING_ENA 0x1
-
-/* Battery low level value control register */
-#define PM2XXX_VBAT_LOW_LEVEL_2_3 0x0
-#define PM2XXX_VBAT_LOW_LEVEL_2_4 0x1
-#define PM2XXX_VBAT_LOW_LEVEL_2_5 0x2
-#define PM2XXX_VBAT_LOW_LEVEL_2_6 0x3
-#define PM2XXX_VBAT_LOW_LEVEL_2_7 0x4
-#define PM2XXX_VBAT_LOW_LEVEL_2_8 0x5
-#define PM2XXX_VBAT_LOW_LEVEL_2_9 0x6
-#define PM2XXX_VBAT_LOW_LEVEL_3_0 0x7
-#define PM2XXX_VBAT_LOW_LEVEL_3_1 0x8
-#define PM2XXX_VBAT_LOW_LEVEL_3_2 0x9
-#define PM2XXX_VBAT_LOW_LEVEL_3_3 0xA
-#define PM2XXX_VBAT_LOW_LEVEL_3_4 0xB
-#define PM2XXX_VBAT_LOW_LEVEL_3_5 0xC
-#define PM2XXX_VBAT_LOW_LEVEL_3_6 0xD
-#define PM2XXX_VBAT_LOW_LEVEL_3_7 0xE
-#define PM2XXX_VBAT_LOW_LEVEL_3_8 0xF
-#define PM2XXX_VBAT_LOW_LEVEL_3_9 0x10
-#define PM2XXX_VBAT_LOW_LEVEL_4_0 0x11
-#define PM2XXX_VBAT_LOW_LEVEL_4_1 0x12
-#define PM2XXX_VBAT_LOW_LEVEL_4_2 0x13
-
-/* SW CTRL */
-#define PM2XXX_SWCTRL_HW 0x0
-#define PM2XXX_SWCTRL_SW 0x1
-
-
-/* LED Driver Control */
-#define PM2XXX_LED_CURRENT_MASK 0x0C
-#define PM2XXX_LED_CURRENT_2_5MA (0X0<<2)
-#define PM2XXX_LED_CURRENT_1MA (0X1<<2)
-#define PM2XXX_LED_CURRENT_5MA (0X2<<2)
-#define PM2XXX_LED_CURRENT_10MA (0X3<<2)
-
-#define PM2XXX_LED_SELECT_MASK 0x02
-#define PM2XXX_LED_SELECT_EN (0X0<<1)
-#define PM2XXX_LED_SELECT_DIS (0X1<<1)
-
-#define PM2XXX_ANTI_OVERSHOOT_MASK 0x01
-#define PM2XXX_ANTI_OVERSHOOT_DIS 0X0
-#define PM2XXX_ANTI_OVERSHOOT_EN 0X1
-
-enum pm2xxx_reg_int1 {
- PM2XXX_INT1_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_ITVBATLOWR = 0x04,
- PM2XXX_INT1_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_mask_reg_int1 {
- PM2XXX_INT1_M_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_M_ITVBATLOWR = 0x04,
- PM2XXX_INT1_M_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_source_reg_int1 {
- PM2XXX_INT1_S_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_S_ITVBATLOWR = 0x04,
- PM2XXX_INT1_S_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_reg_int2 {
- PM2XXX_INT2_ITVPWR2PLUG = 0x01,
- PM2XXX_INT2_ITVPWR2UNPLUG = 0x02,
- PM2XXX_INT2_ITVPWR1PLUG = 0x04,
- PM2XXX_INT2_ITVPWR1UNPLUG = 0x08,
-};
-
-enum pm2xxx_mask_reg_int2 {
- PM2XXX_INT2_M_ITVPWR2PLUG = 0x01,
- PM2XXX_INT2_M_ITVPWR2UNPLUG = 0x02,
- PM2XXX_INT2_M_ITVPWR1PLUG = 0x04,
- PM2XXX_INT2_M_ITVPWR1UNPLUG = 0x08,
-};
-
-enum pm2xxx_source_reg_int2 {
- PM2XXX_INT2_S_ITVPWR2PLUG = 0x03,
- PM2XXX_INT2_S_ITVPWR1PLUG = 0x0c,
-};
-
-enum pm2xxx_reg_int3 {
- PM2XXX_INT3_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_ITCHCCWD = 0x02,
- PM2XXX_INT3_ITCHCVWD = 0x04,
- PM2XXX_INT3_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_mask_reg_int3 {
- PM2XXX_INT3_M_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_M_ITCHCCWD = 0x02,
- PM2XXX_INT3_M_ITCHCVWD = 0x04,
- PM2XXX_INT3_M_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_source_reg_int3 {
- PM2XXX_INT3_S_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_S_ITCHCCWD = 0x02,
- PM2XXX_INT3_S_ITCHCVWD = 0x04,
- PM2XXX_INT3_S_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_reg_int4 {
- PM2XXX_INT4_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_ITCHARGINGON = 0x10,
- PM2XXX_INT4_ITVRESUME = 0x20,
- PM2XXX_INT4_ITBATTFULL = 0x40,
- PM2XXX_INT4_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_mask_reg_int4 {
- PM2XXX_INT4_M_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_M_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_M_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_M_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_M_ITCHARGINGON = 0x10,
- PM2XXX_INT4_M_ITVRESUME = 0x20,
- PM2XXX_INT4_M_ITBATTFULL = 0x40,
- PM2XXX_INT4_M_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_source_reg_int4 {
- PM2XXX_INT4_S_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_S_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_S_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_S_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_S_ITCHARGINGON = 0x10,
- PM2XXX_INT4_S_ITVRESUME = 0x20,
- PM2XXX_INT4_S_ITBATTFULL = 0x40,
- PM2XXX_INT4_S_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_reg_int5 {
- PM2XXX_INT5_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_mask_reg_int5 {
- PM2XXX_INT5_M_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_M_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_M_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_M_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_M_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_source_reg_int5 {
- PM2XXX_INT5_S_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_S_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_S_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_S_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_S_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_reg_int6 {
- PM2XXX_INT6_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_ITVPWR1VALIDFALL = 0x20,
-};
-
-enum pm2xxx_mask_reg_int6 {
- PM2XXX_INT6_M_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_M_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_M_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_M_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_M_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_M_ITVPWR1VALIDFALL = 0x20,
-};
-
-enum pm2xxx_source_reg_int6 {
- PM2XXX_INT6_S_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_S_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_S_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_S_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_S_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_S_ITVPWR1VALIDFALL = 0x20,
-};
-
-struct pm2xxx_charger_info {
- int charger_connected;
- int charger_online;
- int cv_active;
- bool wd_expired;
-};
-
-struct pm2xxx_charger_event_flags {
- bool mainextchnotok;
- bool main_thermal_prot;
- bool ovv;
- bool chgwdexp;
-};
-
-struct pm2xxx_interrupts {
- u8 reg[PM2XXX_NUM_INT_REG];
- int (*handler[PM2XXX_NUM_INT_REG])(void *, int);
-};
-
-struct pm2xxx_config {
- struct i2c_client *pm2xxx_i2c;
- struct i2c_device_id *pm2xxx_id;
-};
-
-struct pm2xxx_irq {
- char *name;
- irqreturn_t (*isr)(int irq, void *data);
-};
-
-struct pm2xxx_charger {
- struct device *dev;
- u8 chip_id;
- bool vddadc_en_ac;
- struct pm2xxx_config config;
- bool ac_conn;
- unsigned int gpio_irq;
- int vbat;
- int old_vbat;
- int failure_case;
- int failure_input_ovv;
- unsigned int lpn_pin;
- struct pm2xxx_interrupts *pm2_int;
- struct regulator *regu;
- struct pm2xxx_bm_data *bat;
- struct mutex lock;
- struct ab8500 *parent;
- struct pm2xxx_charger_info ac;
- struct pm2xxx_charger_platform_data *pdata;
- struct workqueue_struct *charger_wq;
- struct delayed_work check_vbat_work;
- struct work_struct ac_work;
- struct work_struct check_main_thermal_prot_work;
- struct delayed_work check_hw_failure_work;
- struct ux500_charger ac_chg;
- struct power_supply_desc ac_chg_desc;
- struct pm2xxx_charger_event_flags flags;
-};
-
-#endif /* PM2301_CHARGER_H */
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 470253c337c7..4b5fb172fa99 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -263,13 +263,13 @@ static int power_supply_check_supplies(struct power_supply *psy)
return 0;
/* All supplies found, allocate char ** array for filling */
- psy->supplied_from = devm_kzalloc(&psy->dev, sizeof(psy->supplied_from),
+ psy->supplied_from = devm_kzalloc(&psy->dev, sizeof(*psy->supplied_from),
GFP_KERNEL);
if (!psy->supplied_from)
return -ENOMEM;
*psy->supplied_from = devm_kcalloc(&psy->dev,
- cnt - 1, sizeof(char *),
+ cnt - 1, sizeof(**psy->supplied_from),
GFP_KERNEL);
if (!*psy->supplied_from)
return -ENOMEM;
diff --git a/drivers/power/supply/surface_battery.c b/drivers/power/supply/surface_battery.c
index 5ec2e6bb2465..540707882bb0 100644
--- a/drivers/power/supply/surface_battery.c
+++ b/drivers/power/supply/surface_battery.c
@@ -802,7 +802,7 @@ static int spwr_battery_register(struct spwr_battery_device *bat)
if (IS_ERR(bat->psy))
return PTR_ERR(bat->psy);
- return ssam_notifier_register(bat->sdev->ctrl, &bat->notif);
+ return ssam_device_notifier_register(bat->sdev, &bat->notif);
}
@@ -837,7 +837,7 @@ static void surface_battery_remove(struct ssam_device *sdev)
{
struct spwr_battery_device *bat = ssam_device_get_drvdata(sdev);
- ssam_notifier_unregister(sdev->ctrl, &bat->notif);
+ ssam_device_notifier_unregister(sdev, &bat->notif);
cancel_delayed_work_sync(&bat->update_work);
}
diff --git a/drivers/power/supply/surface_charger.c b/drivers/power/supply/surface_charger.c
index a060c36c7766..59182d55742d 100644
--- a/drivers/power/supply/surface_charger.c
+++ b/drivers/power/supply/surface_charger.c
@@ -216,7 +216,7 @@ static int spwr_ac_register(struct spwr_ac_device *ac)
if (IS_ERR(ac->psy))
return PTR_ERR(ac->psy);
- return ssam_notifier_register(ac->sdev->ctrl, &ac->notif);
+ return ssam_device_notifier_register(ac->sdev, &ac->notif);
}
@@ -251,7 +251,7 @@ static void surface_ac_remove(struct ssam_device *sdev)
{
struct spwr_ac_device *ac = ssam_device_get_drvdata(sdev);
- ssam_notifier_unregister(sdev->ctrl, &ac->notif);
+ ssam_device_notifier_unregister(sdev, &ac->notif);
}
static const struct spwr_psy_properties spwr_psy_props_adp1 = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7150b1d0159e..d8373cb04f90 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4784,10 +4784,10 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].consumer = regulator_get(dev,
consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
- consumers[i].consumer = NULL;
ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
"Failed to get supply '%s'",
consumers[i].supply);
+ consumers[i].consumer = NULL;
goto err;
}
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
index c4754f3cf233..1591636f86c3 100644
--- a/drivers/regulator/cros-ec-regulator.c
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -22,36 +22,6 @@ struct cros_ec_regulator_data {
u16 num_voltages;
};
-static int cros_ec_cmd(struct cros_ec_device *ec, u32 version, u32 command,
- void *outdata, u32 outsize, void *indata, u32 insize)
-{
- struct cros_ec_command *msg;
- int ret;
-
- msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->version = version;
- msg->command = command;
- msg->outsize = outsize;
- msg->insize = insize;
-
- if (outdata && outsize > 0)
- memcpy(msg->data, outdata, outsize);
-
- ret = cros_ec_cmd_xfer_status(ec, msg);
- if (ret < 0)
- goto cleanup;
-
- if (insize)
- memcpy(indata, msg->data, insize);
-
-cleanup:
- kfree(msg);
- return ret;
-}
-
static int cros_ec_regulator_enable(struct regulator_dev *dev)
{
struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
@@ -61,7 +31,7 @@ static int cros_ec_regulator_enable(struct regulator_dev *dev)
};
return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_ENABLE, &cmd,
- sizeof(cmd), NULL, 0);
+ sizeof(cmd), NULL, 0);
}
static int cros_ec_regulator_disable(struct regulator_dev *dev)
@@ -73,7 +43,7 @@ static int cros_ec_regulator_disable(struct regulator_dev *dev)
};
return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_ENABLE, &cmd,
- sizeof(cmd), NULL, 0);
+ sizeof(cmd), NULL, 0);
}
static int cros_ec_regulator_is_enabled(struct regulator_dev *dev)
@@ -161,7 +131,7 @@ static int cros_ec_regulator_init_info(struct device *dev,
int ret;
ret = cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_GET_INFO, &cmd,
- sizeof(cmd), &resp, sizeof(resp));
+ sizeof(cmd), &resp, sizeof(resp));
if (ret < 0)
return ret;
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 4a3352821b1d..38383e7de3c1 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -594,16 +594,17 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
node = of_parse_phandle(np, "memory-region", a);
/* Not map vdevbuffer, vdevring region */
- if (!strncmp(node->name, "vdev", strlen("vdev")))
+ if (!strncmp(node->name, "vdev", strlen("vdev"))) {
+ of_node_put(node);
continue;
+ }
err = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
return err;
}
- of_node_put(node);
-
if (b >= IMX_RPROC_MEM_MAX)
break;
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 54781f553f4e..594a9b43b7ae 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -410,10 +410,9 @@ static int keystone_rproc_probe(struct platform_device *pdev)
/* enable clock for accessing DSP internal memories */
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "failed to enable clock, status = %d\n", ret);
- pm_runtime_put_noidle(dev);
goto disable_rpm;
}
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 47b2a40e1b4a..d421a2ccaa1e 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -401,6 +401,14 @@ static int mt8186_scp_before_load(struct mtk_scp *scp)
writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
+ /*
+ * Set I-cache and D-cache size before loading SCP FW.
+ * SCP SRAM logical address may change when cache size setting differs.
+ */
+ writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
+ scp->reg_base + MT8183_SCP_CACHE_CON);
+ writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+
return 0;
}
@@ -943,7 +951,19 @@ static const struct mtk_scp_of_data mt8186_of_data = {
.scp_da_to_va = mt8183_scp_da_to_va,
.host_to_scp_reg = MT8183_HOST_TO_SCP,
.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
- .ipi_buf_offset = 0x7bdb0,
+ .ipi_buf_offset = 0x3bdb0,
+};
+
+static const struct mtk_scp_of_data mt8188_of_data = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8192_scp_before_load,
+ .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_reset_assert = mt8192_scp_reset_assert,
+ .scp_reset_deassert = mt8192_scp_reset_deassert,
+ .scp_stop = mt8192_scp_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
};
static const struct mtk_scp_of_data mt8192_of_data = {
@@ -973,6 +993,7 @@ static const struct mtk_scp_of_data mt8195_of_data = {
static const struct of_device_id mtk_scp_of_match[] = {
{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
+ { .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
{},
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 32a588fefbdc..430fab0266ed 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -243,7 +243,7 @@ static inline int omap_rproc_get_timer_irq(struct omap_rproc_timer *timer)
* omap_rproc_ack_timer_irq() - acknowledge a timer irq
* @timer: handle to a OMAP rproc timer
*
- * This function is used to clear the irq associated with a watchdog timer. The
+ * This function is used to clear the irq associated with a watchdog timer.
* The function is called by the OMAP remoteproc upon a watchdog event on the
* remote processor to clear the interrupt status of the watchdog timer.
*/
@@ -303,7 +303,7 @@ static irqreturn_t omap_rproc_watchdog_isr(int irq, void *data)
* @configure: boolean flag used to acquire and configure the timer handle
*
* This function is used primarily to enable the timers associated with
- * a remoteproc. The configure flag is provided to allow the driver to
+ * a remoteproc. The configure flag is provided to allow the driver
* to either acquire and start a timer (during device initialization) or
* to just start a timer (during a resume operation).
*
@@ -443,7 +443,7 @@ free_timers:
* @configure: boolean flag used to release the timer handle
*
* This function is used primarily to disable the timers associated with
- * a remoteproc. The configure flag is provided to allow the driver to
+ * a remoteproc. The configure flag is provided to allow the driver
* to either stop and release a timer (during device shutdown) or to just
* stop a timer (during a suspend operation).
*
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 1777a01fa84e..128bf9912f2c 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -897,6 +897,7 @@ static const struct of_device_id pru_rproc_match[] = {
{ .compatible = "ti,j721e-pru", .data = &k3_pru_data },
{ .compatible = "ti,j721e-rtu", .data = &k3_rtu_data },
{ .compatible = "ti,j721e-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,am625-pru", .data = &k3_pru_data },
{},
};
MODULE_DEVICE_TABLE(of, pru_rproc_match);
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 4b91e3c9eafa..020349f8979d 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -50,7 +50,7 @@ struct minidump_region {
};
/**
- * struct minidump_subsystem_toc: Subsystem's SMEM Table of content
+ * struct minidump_subsystem - Subsystem's SMEM Table of content
* @status : Subsystem toc init status
* @enabled : if set to 1, this region would be copied during coredump
* @encryption_status: Encryption status for this subsystem
@@ -68,7 +68,7 @@ struct minidump_subsystem {
};
/**
- * struct minidump_global_toc: Global Table of Content
+ * struct minidump_global_toc - Global Table of Content
* @status : Global Minidump init status
* @md_revision : Minidump revision
* @enabled : Minidump enable status
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 5280ec9b5449..497acfb33f8f 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -112,6 +112,7 @@ static irqreturn_t q6v5_wdog_interrupt(int irq, void *data)
else
dev_err(q6v5->dev, "watchdog without message\n");
+ q6v5->running = false;
rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG);
return IRQ_HANDLED;
@@ -123,6 +124,9 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
size_t len;
char *msg;
+ if (!q6v5->running)
+ return IRQ_HANDLED;
+
msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
if (!IS_ERR(msg) && len > 0 && msg[0])
dev_err(q6v5->dev, "fatal error received: %s\n", msg);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 2f3b9f54251e..4c9a1b99cd51 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -175,9 +175,8 @@ static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
- ret = pm_runtime_get_sync(pds[i]);
+ ret = pm_runtime_resume_and_get(pds[i]);
if (ret < 0) {
- pm_runtime_put_noidle(pds[i]);
dev_pm_genpd_set_performance_state(pds[i], 0);
goto unroll_pd_votes;
}
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index af217de75e4d..fddb63cffee0 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/devcoredump.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -932,27 +933,52 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
const char *fw_name)
{
- unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
+ unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_KERNEL_MAPPING;
+ unsigned long flags = VM_DMA_COHERENT | VM_FLUSH_RESET_PERMS;
+ struct page **pages;
+ struct page *page;
dma_addr_t phys;
void *metadata;
int mdata_perm;
int xferop_ret;
size_t size;
- void *ptr;
+ void *vaddr;
+ int count;
int ret;
+ int i;
metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
if (IS_ERR(metadata))
return PTR_ERR(metadata);
- ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
- if (!ptr) {
+ page = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
+ if (!page) {
kfree(metadata);
dev_err(qproc->dev, "failed to allocate mdt buffer\n");
return -ENOMEM;
}
- memcpy(ptr, metadata, size);
+ count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ goto free_dma_attrs;
+ }
+
+ for (i = 0; i < count; i++)
+ pages[i] = nth_page(page, i);
+
+ vaddr = vmap(pages, count, flags, pgprot_dmacoherent(PAGE_KERNEL));
+ kfree(pages);
+ if (!vaddr) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", &phys, size);
+ ret = -EBUSY;
+ goto free_dma_attrs;
+ }
+
+ memcpy(vaddr, metadata, size);
+
+ vunmap(vaddr);
/* Hypervisor mapping to access metadata by modem */
mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
@@ -982,7 +1008,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
"mdt buffer not reclaimed system may become unstable\n");
free_dma_attrs:
- dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
+ dma_free_attrs(qproc->dev, size, page, phys, dma_attrs);
kfree(metadata);
return ret < 0 ? ret : 0;
@@ -1102,6 +1128,9 @@ static int q6v5_mba_load(struct q6v5 *qproc)
if (ret)
goto reclaim_mba;
+ if (qproc->has_mba_logs)
+ qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
+
ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "MBA boot timed out\n");
@@ -1594,11 +1623,19 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
return ret;
}
+static unsigned long q6v5_panic(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+
+ return qcom_q6v5_panic(&qproc->q6v5);
+}
+
static const struct rproc_ops q6v5_ops = {
.start = q6v5_start,
.stop = q6v5_stop,
.parse_fw = qcom_q6v5_register_dump_segments,
.load = q6v5_load,
+ .panic = q6v5_panic,
};
static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
@@ -2188,6 +2225,11 @@ static const struct rproc_hexagon_res msm8996_mss = {
"mnoc_axi",
NULL
},
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
.need_mem_protection = true,
.has_alt_reset = false,
.has_mba_logs = false,
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 6ae39c5653b1..6afd0941e552 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -29,6 +30,8 @@
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
+#define ADSP_DECRYPT_SHUTDOWN_DELAY_MS 100
+
struct adsp_data {
int crash_reason_smem;
const char *firmware_name;
@@ -36,6 +39,7 @@ struct adsp_data {
unsigned int minidump_id;
bool has_aggre2_clk;
bool auto_boot;
+ bool decrypt_shutdown;
char **proxy_pd_names;
@@ -65,6 +69,7 @@ struct qcom_adsp {
unsigned int minidump_id;
int crash_reason_smem;
bool has_aggre2_clk;
+ bool decrypt_shutdown;
const char *info_name;
struct completion start_done;
@@ -87,6 +92,9 @@ static void adsp_minidump(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
+ if (rproc->dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
qcom_minidump(rproc, adsp->minidump_id);
}
@@ -128,6 +136,19 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
}
}
+static int adsp_shutdown_poll_decrypt(struct qcom_adsp *adsp)
+{
+ unsigned int retry_num = 50;
+ int ret;
+
+ do {
+ msleep(ADSP_DECRYPT_SHUTDOWN_DELAY_MS);
+ ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ } while (ret == -EINVAL && --retry_num);
+
+ return ret;
+}
+
static int adsp_unprepare(struct rproc *rproc)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
@@ -185,13 +206,17 @@ static int adsp_start(struct rproc *rproc)
if (ret)
goto disable_xo_clk;
- ret = regulator_enable(adsp->cx_supply);
- if (ret)
- goto disable_aggre2_clk;
+ if (adsp->cx_supply) {
+ ret = regulator_enable(adsp->cx_supply);
+ if (ret)
+ goto disable_aggre2_clk;
+ }
- ret = regulator_enable(adsp->px_supply);
- if (ret)
- goto disable_cx_supply;
+ if (adsp->px_supply) {
+ ret = regulator_enable(adsp->px_supply);
+ if (ret)
+ goto disable_cx_supply;
+ }
ret = qcom_scm_pas_auth_and_reset(adsp->pas_id);
if (ret) {
@@ -212,9 +237,11 @@ static int adsp_start(struct rproc *rproc)
return 0;
disable_px_supply:
- regulator_disable(adsp->px_supply);
+ if (adsp->px_supply)
+ regulator_disable(adsp->px_supply);
disable_cx_supply:
- regulator_disable(adsp->cx_supply);
+ if (adsp->cx_supply)
+ regulator_disable(adsp->cx_supply);
disable_aggre2_clk:
clk_disable_unprepare(adsp->aggre2_clk);
disable_xo_clk:
@@ -231,8 +258,10 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
{
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
- regulator_disable(adsp->px_supply);
- regulator_disable(adsp->cx_supply);
+ if (adsp->px_supply)
+ regulator_disable(adsp->px_supply);
+ if (adsp->cx_supply)
+ regulator_disable(adsp->cx_supply);
clk_disable_unprepare(adsp->aggre2_clk);
clk_disable_unprepare(adsp->xo);
adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
@@ -249,6 +278,9 @@ static int adsp_stop(struct rproc *rproc)
dev_err(adsp->dev, "timed out on wait\n");
ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ if (ret && adsp->decrypt_shutdown)
+ ret = adsp_shutdown_poll_decrypt(adsp);
+
if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
@@ -268,6 +300,9 @@ static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iom
if (offset < 0 || offset + len > adsp->mem_size)
return NULL;
+ if (is_iomem)
+ *is_iomem = true;
+
return adsp->mem_region + offset;
}
@@ -326,14 +361,26 @@ static int adsp_init_clock(struct qcom_adsp *adsp)
static int adsp_init_regulator(struct qcom_adsp *adsp)
{
- adsp->cx_supply = devm_regulator_get(adsp->dev, "cx");
- if (IS_ERR(adsp->cx_supply))
- return PTR_ERR(adsp->cx_supply);
+ adsp->cx_supply = devm_regulator_get_optional(adsp->dev, "cx");
+ if (IS_ERR(adsp->cx_supply)) {
+ if (PTR_ERR(adsp->cx_supply) == -ENODEV)
+ adsp->cx_supply = NULL;
+ else
+ return PTR_ERR(adsp->cx_supply);
+ }
- regulator_set_load(adsp->cx_supply, 100000);
+ if (adsp->cx_supply)
+ regulator_set_load(adsp->cx_supply, 100000);
- adsp->px_supply = devm_regulator_get(adsp->dev, "px");
- return PTR_ERR_OR_ZERO(adsp->px_supply);
+ adsp->px_supply = devm_regulator_get_optional(adsp->dev, "px");
+ if (IS_ERR(adsp->px_supply)) {
+ if (PTR_ERR(adsp->px_supply) == -ENODEV)
+ adsp->px_supply = NULL;
+ else
+ return PTR_ERR(adsp->px_supply);
+ }
+
+ return 0;
}
static int adsp_pds_attach(struct device *dev, struct device **devs,
@@ -459,9 +506,12 @@ static int adsp_probe(struct platform_device *pdev)
adsp->pas_id = desc->pas_id;
adsp->has_aggre2_clk = desc->has_aggre2_clk;
adsp->info_name = desc->sysmon_name;
+ adsp->decrypt_shutdown = desc->decrypt_shutdown;
platform_set_drvdata(pdev, adsp);
- device_wakeup_enable(adsp->dev);
+ ret = device_init_wakeup(adsp->dev, true);
+ if (ret)
+ goto free_rproc;
ret = adsp_alloc_memory_region(adsp);
if (ret)
@@ -877,6 +927,25 @@ static const struct adsp_data sdx55_mpss_resource = {
.ssctl_id = 0x22,
};
+static const struct adsp_data sm8450_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .minidump_id = 3,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .decrypt_shutdown = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
@@ -916,7 +985,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource},
{ .compatible = "qcom,sm8450-slpi-pas", .data = &sm8350_slpi_resource},
- { .compatible = "qcom,sm8450-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8450-mpss-pas", .data = &sm8450_mpss_resource},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index 9fca81492863..57dde2a69b9d 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -41,6 +41,7 @@ struct qcom_sysmon {
struct completion comp;
struct completion ind_comp;
struct completion shutdown_comp;
+ struct completion ssctl_comp;
struct mutex lock;
bool ssr_ack;
@@ -445,6 +446,8 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
svc->priv = sysmon;
+ complete(&sysmon->ssctl_comp);
+
return 0;
}
@@ -501,6 +504,7 @@ static int sysmon_start(struct rproc_subdev *subdev)
.ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
};
+ reinit_completion(&sysmon->ssctl_comp);
mutex_lock(&sysmon->state_lock);
sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
@@ -508,10 +512,12 @@ static int sysmon_start(struct rproc_subdev *subdev)
mutex_lock(&sysmon_lock);
list_for_each_entry(target, &sysmon_list, node) {
- if (target == sysmon)
+ mutex_lock(&target->state_lock);
+ if (target == sysmon || target->state != SSCTL_SSR_EVENT_AFTER_POWERUP) {
+ mutex_unlock(&target->state_lock);
continue;
+ }
- mutex_lock(&target->state_lock);
event.subsys_name = target->name;
event.ssr_event = target->state;
@@ -545,6 +551,11 @@ static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
if (crashed)
return;
+ if (sysmon->ssctl_instance) {
+ if (!wait_for_completion_timeout(&sysmon->ssctl_comp, HZ / 2))
+ dev_err(sysmon->dev, "timeout waiting for ssctl service\n");
+ }
+
if (sysmon->ssctl_version)
sysmon->shutdown_acked = ssctl_request_shutdown(sysmon);
else if (sysmon->ept)
@@ -631,6 +642,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
init_completion(&sysmon->comp);
init_completion(&sysmon->ind_comp);
init_completion(&sysmon->shutdown_comp);
+ init_completion(&sysmon->ssctl_comp);
mutex_init(&sysmon->lock);
mutex_init(&sysmon->state_lock);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 9a223d394087..68f37296b151 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -467,6 +467,7 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
irq_handler_t thread_fn)
{
int ret;
+ int irq_number;
ret = platform_get_irq_byname(pdev, name);
if (ret < 0 && optional) {
@@ -477,14 +478,19 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
return ret;
}
+ irq_number = ret;
+
ret = devm_request_threaded_irq(&pdev->dev, ret,
NULL, thread_fn,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"wcnss", wcnss);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+ }
- return ret;
+ /* Return the IRQ number if the IRQ was successfully acquired */
+ return irq_number;
}
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 02a04ab34a23..e5279ed9a8d7 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -59,6 +59,7 @@ static int rproc_release_carveout(struct rproc *rproc,
/* Unique indices for remoteproc devices */
static DEFINE_IDA(rproc_dev_index);
+static struct workqueue_struct *rproc_recovery_wq;
static const char * const rproc_crash_names[] = {
[RPROC_MMUFAULT] = "mmufault",
@@ -334,7 +335,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
size_t size;
/* actual size of vring (in bytes) */
- size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
+ size = PAGE_ALIGN(vring_size(rvring->num, rvring->align));
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
@@ -401,7 +402,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
return -EINVAL;
}
- rvring->len = vring->num;
+ rvring->num = vring->num;
rvring->align = vring->align;
rvring->rvdev = rvdev;
@@ -461,6 +462,7 @@ static void rproc_rvdev_release(struct device *dev)
struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
of_reserved_mem_device_release(dev);
+ dma_release_coherent_memory(dev);
kfree(rvdev);
}
@@ -970,7 +972,7 @@ static int rproc_handle_carveout(struct rproc *rproc,
return 0;
}
- /* Register carveout in in list */
+ /* Register carveout in list */
carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da,
rproc_alloc_carveout,
rproc_release_carveout, rsc->name);
@@ -2434,7 +2436,7 @@ static void rproc_type_release(struct device *dev)
idr_destroy(&rproc->notifyids);
if (rproc->index >= 0)
- ida_simple_remove(&rproc_dev_index, rproc->index);
+ ida_free(&rproc_dev_index, rproc->index);
kfree_const(rproc->firmware);
kfree_const(rproc->name);
@@ -2551,9 +2553,9 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
goto put_device;
/* Assign a unique device index and name */
- rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
+ rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
if (rproc->index < 0) {
- dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
+ dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
goto put_device;
}
@@ -2762,8 +2764,7 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
dev_err(&rproc->dev, "crash detected in %s: type %s\n",
rproc->name, rproc_crash_to_string(type));
- /* Have a worker handle the error; ensure system is not suspended */
- queue_work(system_freezable_wq, &rproc->crash_handler);
+ queue_work(rproc_recovery_wq, &rproc->crash_handler);
}
EXPORT_SYMBOL(rproc_report_crash);
@@ -2812,6 +2813,13 @@ static void __exit rproc_exit_panic(void)
static int __init remoteproc_init(void)
{
+ rproc_recovery_wq = alloc_workqueue("rproc_recovery_wq",
+ WQ_UNBOUND | WQ_FREEZABLE, 0);
+ if (!rproc_recovery_wq) {
+ pr_err("remoteproc: creation of rproc_recovery_wq failed\n");
+ return -ENOMEM;
+ }
+
rproc_init_sysfs();
rproc_init_debugfs();
rproc_init_cdev();
@@ -2825,9 +2833,13 @@ static void __exit remoteproc_exit(void)
{
ida_destroy(&rproc_dev_index);
+ if (!rproc_recovery_wq)
+ return;
+
rproc_exit_panic();
rproc_exit_debugfs();
rproc_exit_sysfs();
+ destroy_workqueue(rproc_recovery_wq);
}
module_exit(remoteproc_exit);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 70ab496d0431..0f7706e23eb9 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -87,7 +87,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
struct fw_rsc_vdev *rsc;
struct virtqueue *vq;
void *addr;
- int len, size;
+ int num, size;
/* we're temporarily limited to two virtqueues per rvdev */
if (id >= ARRAY_SIZE(rvdev->vring))
@@ -104,20 +104,20 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
rvring = &rvdev->vring[id];
addr = mem->va;
- len = rvring->len;
+ num = rvring->num;
/* zero vring */
- size = vring_size(len, rvring->align);
+ size = vring_size(num, rvring->align);
memset(addr, 0, size);
dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
- id, addr, len, rvring->notifyid);
+ id, addr, num, rvring->notifyid);
/*
* Create the new vq, and tell virtio we're not interested in
* the 'weak' smp barriers, since we're talking with a real device.
*/
- vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx,
+ vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
addr, rproc_virtio_notify, callback, name);
if (!vq) {
dev_err(dev, "vring_new_virtqueue %s failed\n", name);
@@ -125,6 +125,8 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
return ERR_PTR(-ENOMEM);
}
+ vq->num_max = num;
+
rvring->vq = vq;
vq->priv = rvring;
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 4840ad906018..0481926c6975 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -1655,6 +1655,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
if (!cpdev) {
ret = -ENODEV;
dev_err(dev, "could not get R5 core platform device\n");
+ of_node_put(child);
goto fail;
}
@@ -1663,6 +1664,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
ret);
put_device(&cpdev->dev);
+ of_node_put(child);
goto fail;
}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 48d94649ea82..806773e88832 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -17,7 +17,7 @@ if RESET_CONTROLLER
config RESET_A10SR
tristate "Altera Arria10 System Resource Reset"
- depends on MFD_ALTERA_A10SR
+ depends on MFD_ALTERA_A10SR || COMPILE_TEST
help
This option enables support for the external reset functions for
peripheral PHYs on the Altera Arria10 System Resource Chip.
@@ -200,8 +200,9 @@ config RESET_SCMI
firmware controlling all the reset signals.
config RESET_SIMPLE
- bool "Simple Reset Controller Driver" if COMPILE_TEST
+ bool "Simple Reset Controller Driver" if COMPILE_TEST || EXPERT
default ARCH_ASPEED || ARCH_BCM4908 || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
+ depends on HAS_IOMEM
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
@@ -265,6 +266,14 @@ config RESET_TI_SYSCON
you wish to use the reset framework for such memory-mapped devices,
say Y here. Otherwise, say N.
+config RESET_TI_TPS380X
+ tristate "TI TPS380x Reset Driver"
+ select GPIOLIB
+ help
+ This enables the reset driver support for TI TPS380x devices. If
+ you wish to use the reset framework for such devices, say Y here.
+ Otherwise, say N.
+
config RESET_TN48M_CPLD
tristate "Delta Networks TN48M switch CPLD reset controller"
depends on MFD_TN48M_CPLD || COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 3ff378f43348..cd5cf8e7c6a7 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
+obj-$(CONFIG_RESET_TI_TPS380X) += reset-tps380x.o
obj-$(CONFIG_RESET_TN48M_CPLD) += reset-tn48m.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
diff --git a/drivers/reset/reset-tps380x.c b/drivers/reset/reset-tps380x.c
new file mode 100644
index 000000000000..09d511f069ba
--- /dev/null
+++ b/drivers/reset/reset-tps380x.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * TI TPS380x Supply Voltage Supervisor and Reset Controller Driver
+ *
+ * Copyright (C) 2022 Pengutronix, Marco Felsch <kernel@pengutronix.de>
+ *
+ * Based on Simple Reset Controller Driver
+ *
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reset-controller.h>
+
+struct tps380x_reset {
+ struct reset_controller_dev rcdev;
+ struct gpio_desc *reset_gpio;
+ unsigned int reset_ms;
+};
+
+struct tps380x_reset_devdata {
+ unsigned int min_reset_ms;
+ unsigned int typ_reset_ms;
+ unsigned int max_reset_ms;
+};
+
+static inline
+struct tps380x_reset *to_tps380x_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct tps380x_reset, rcdev);
+}
+
+static int
+tps380x_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct tps380x_reset *tps380x = to_tps380x_reset(rcdev);
+
+ gpiod_set_value_cansleep(tps380x->reset_gpio, 1);
+
+ return 0;
+}
+
+static int
+tps380x_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct tps380x_reset *tps380x = to_tps380x_reset(rcdev);
+
+ gpiod_set_value_cansleep(tps380x->reset_gpio, 0);
+ msleep(tps380x->reset_ms);
+
+ return 0;
+}
+
+static const struct reset_control_ops reset_tps380x_ops = {
+ .assert = tps380x_reset_assert,
+ .deassert = tps380x_reset_deassert,
+};
+
+static int tps380x_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ /* No special handling needed, we have only one reset line per device */
+ return 0;
+}
+
+static int tps380x_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct tps380x_reset_devdata *devdata;
+ struct tps380x_reset *tps380x;
+
+ devdata = device_get_match_data(dev);
+ if (!devdata)
+ return -EINVAL;
+
+ tps380x = devm_kzalloc(dev, sizeof(*tps380x), GFP_KERNEL);
+ if (!tps380x)
+ return -ENOMEM;
+
+ tps380x->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(tps380x->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(tps380x->reset_gpio),
+ "Failed to get GPIO\n");
+
+ tps380x->reset_ms = devdata->max_reset_ms;
+
+ tps380x->rcdev.ops = &reset_tps380x_ops;
+ tps380x->rcdev.owner = THIS_MODULE;
+ tps380x->rcdev.dev = dev;
+ tps380x->rcdev.of_node = dev->of_node;
+ tps380x->rcdev.of_reset_n_cells = 0;
+ tps380x->rcdev.of_xlate = tps380x_reset_of_xlate;
+ tps380x->rcdev.nr_resets = 1;
+
+ return devm_reset_controller_register(dev, &tps380x->rcdev);
+}
+
+static const struct tps380x_reset_devdata tps3801_reset_data = {
+ .min_reset_ms = 120,
+ .typ_reset_ms = 200,
+ .max_reset_ms = 280,
+};
+
+static const struct of_device_id tps380x_reset_dt_ids[] = {
+ { .compatible = "ti,tps3801", .data = &tps3801_reset_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tps380x_reset_dt_ids);
+
+static struct platform_driver tps380x_reset_driver = {
+ .probe = tps380x_reset_probe,
+ .driver = {
+ .name = "tps380x-reset",
+ .of_match_table = tps380x_reset_dt_ids,
+ },
+};
+module_platform_driver(tps380x_reset_driver);
+
+MODULE_AUTHOR("Marco Felsch <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("TI TPS380x Supply Voltage Supervisor and Reset Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
index 5b4404b8be4c..d1213c33da20 100644
--- a/drivers/rpmsg/mtk_rpmsg.c
+++ b/drivers/rpmsg/mtk_rpmsg.c
@@ -234,7 +234,9 @@ static void mtk_register_device_work_function(struct work_struct *register_work)
if (info->registered)
continue;
+ mutex_unlock(&subdev->channels_lock);
ret = mtk_rpmsg_register_device(subdev, &info->info);
+ mutex_lock(&subdev->channels_lock);
if (ret) {
dev_err(&pdev->dev, "Can't create rpmsg_device\n");
continue;
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 07586514991f..115c0a1eddb1 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -98,8 +98,6 @@ struct glink_core_rx_intent {
struct qcom_glink {
struct device *dev;
- const char *name;
-
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
@@ -1546,7 +1544,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
cancel_work_sync(&channel->intent_work);
if (channel->rpdev) {
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
@@ -1674,7 +1672,7 @@ static ssize_t rpmsg_name_show(struct device *dev,
if (ret < 0)
name = dev->of_node->name;
- return snprintf(buf, RPMSG_NAME_SIZE, "%s\n", name);
+ return sysfs_emit(buf, "%s\n", name);
}
static DEVICE_ATTR_RO(rpmsg_name);
@@ -1755,10 +1753,6 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
if (ret)
dev_err(dev, "failed to add groups\n");
- ret = of_property_read_string(dev->of_node, "label", &glink->name);
- if (ret < 0)
- glink->name = dev->of_node->name;
-
glink->mbox_client.dev = dev;
glink->mbox_client.knows_txdone = true;
glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
diff --git a/drivers/rpmsg/qcom_glink_ssr.c b/drivers/rpmsg/qcom_glink_ssr.c
index dea929c6045d..776d64446879 100644
--- a/drivers/rpmsg/qcom_glink_ssr.c
+++ b/drivers/rpmsg/qcom_glink_ssr.c
@@ -39,7 +39,7 @@ struct cleanup_done_msg {
__le32 seq_num;
};
-/**
+/*
* G-Link SSR protocol commands
*/
#define GLINK_SSR_DO_CLEANUP 0
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 1957b27c4cf3..1044cf03c542 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -729,11 +729,11 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
}
/**
- * qcom_smd_send - write data to smd channel
+ * __qcom_smd_send - write data to smd channel
* @channel: channel handle
* @data: buffer of data to write
* @len: number of bytes to write
- * @wait: flag to indicate if write has ca wait
+ * @wait: flag to indicate if write can wait
*
* This is a blocking write of len bytes into the channel's tx ring buffer and
* signal the remote end. It will sleep until there is enough space available
@@ -1089,7 +1089,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
/* Assign public information to the rpmsg_device */
rpdev = &qsdev->rpdev;
- strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
+ strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
rpdev->src = RPMSG_ADDR_ANY;
rpdev->dst = RPMSG_ADDR_ANY;
@@ -1323,7 +1323,7 @@ static void qcom_channel_state_worker(struct work_struct *work)
spin_unlock_irqrestore(&edge->channels_lock, flags);
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
rpmsg_unregister_device(&edge->dev, &chinfo);
@@ -1383,6 +1383,7 @@ static int qcom_smd_parse_edge(struct device *dev,
}
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
if (IS_ERR(edge->ipc_regmap)) {
ret = PTR_ERR(edge->ipc_regmap);
goto put_node;
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index b6183d4f62a2..4f2189111494 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -120,8 +120,11 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
struct rpmsg_device *rpdev = eptdev->rpdev;
struct device *dev = &eptdev->dev;
- if (eptdev->ept)
+ mutex_lock(&eptdev->ept_lock);
+ if (eptdev->ept) {
+ mutex_unlock(&eptdev->ept_lock);
return -EBUSY;
+ }
get_device(dev);
@@ -137,11 +140,13 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
if (!ept) {
dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
put_device(dev);
+ mutex_unlock(&eptdev->ept_lock);
return -EINVAL;
}
eptdev->ept = ept;
filp->private_data = eptdev;
+ mutex_unlock(&eptdev->ept_lock);
return 0;
}
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 290c1f02da10..d6dde00efdae 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -604,7 +604,7 @@ int rpmsg_register_device_override(struct rpmsg_device *rpdev,
int ret;
if (driver_override)
- strcpy(rpdev->id.name, driver_override);
+ strscpy_pad(rpdev->id.name, driver_override, RPMSG_NAME_SIZE);
dev_set_name(dev, "%s.%s.%d.%d", dev_name(dev->parent),
rpdev->id.name, rpdev->src, rpdev->dst);
@@ -618,6 +618,7 @@ int rpmsg_register_device_override(struct rpmsg_device *rpdev,
strlen(driver_override));
if (ret) {
dev_err(dev, "device_set_override failed: %d\n", ret);
+ put_device(dev);
return ret;
}
}
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index a22cd4abe7d1..39b646d0d40d 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -41,8 +41,8 @@ struct rpmsg_device_ops {
rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo);
- int (*announce_create)(struct rpmsg_device *ept);
- int (*announce_destroy)(struct rpmsg_device *ept);
+ int (*announce_create)(struct rpmsg_device *rpdev);
+ int (*announce_destroy)(struct rpmsg_device *rpdev);
};
/**
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a00f901b5c1d..b8de25118ad0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -383,6 +383,16 @@ config RTC_DRV_MAX77686
This driver can also be built as a module. If so, the module
will be called rtc-max77686.
+config RTC_DRV_NCT3018Y
+ tristate "Nuvoton NCT3018Y"
+ depends on OF
+ help
+ If you say yes here you get support for the Nuvoton NCT3018Y I2C RTC
+ chip.
+
+ This driver can also be built as a module, if so, the module will be
+ called "rtc-nct3018y".
+
config RTC_DRV_RK808
tristate "Rockchip RK805/RK808/RK809/RK817/RK818 RTC"
depends on MFD_RK808
@@ -1478,16 +1488,6 @@ config RTC_DRV_SUNPLUS
This driver can also be built as a module. If so, the module
will be called rtc-sunplus.
-config RTC_DRV_VR41XX
- tristate "NEC VR41XX"
- depends on CPU_VR41XX || COMPILE_TEST
- help
- If you say Y here you will get access to the real time clock
- built into your NEC VR41XX CPU.
-
- To compile this driver as a module, choose M here: the
- module will be called rtc-vr41xx.
-
config RTC_DRV_PL030
tristate "ARM AMBA PL030 RTC"
depends on ARM_AMBA
@@ -1929,6 +1929,17 @@ config RTC_DRV_ASPEED
This driver can also be built as a module, if so, the module
will be called "rtc-aspeed".
+config RTC_DRV_TI_K3
+ tristate "TI K3 RTC"
+ depends on ARCH_K3 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ If you say yes here you get support for the Texas Instruments's
+ Real Time Clock for K3 architecture.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-ti-k3".
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
@@ -1973,4 +1984,14 @@ config RTC_DRV_MSC313
This driver can also be built as a module, if so, the module
will be called "rtc-msc313".
+config RTC_DRV_POLARFIRE_SOC
+ tristate "Microchip PolarFire SoC built-in RTC"
+ depends on SOC_MICROCHIP_POLARFIRE
+ help
+ If you say yes here you will get support for the
+ built-in RTC on Polarfire SoC.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-mpfs".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index fb04467b652d..aab22bc63432 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -112,6 +112,7 @@ obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MXC_V2) += rtc-mxc_v2.o
obj-$(CONFIG_RTC_DRV_GAMECUBE) += rtc-gamecube.o
+obj-$(CONFIG_RTC_DRV_NCT3018Y) += rtc-nct3018y.o
obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
@@ -130,6 +131,7 @@ obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o
obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
+obj-$(CONFIG_RTC_DRV_POLARFIRE_SOC) += rtc-mpfs.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o
@@ -172,11 +174,11 @@ obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o
obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_TI_K3) += rtc-ti-k3.o
obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
-obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
obj-$(CONFIG_RTC_DRV_WILCO_EC) += rtc-wilco-ec.o
obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 3c8eec2218df..e48223c00c67 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -36,7 +36,7 @@ static void rtc_device_release(struct device *dev)
cancel_work_sync(&rtc->irqwork);
- ida_simple_remove(&rtc_ida, rtc->id);
+ ida_free(&rtc_ida, rtc->id);
mutex_destroy(&rtc->ops_lock);
kfree(rtc);
}
@@ -262,7 +262,7 @@ static int rtc_device_get_id(struct device *dev)
}
if (id < 0)
- id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&rtc_ida, GFP_KERNEL);
return id;
}
@@ -368,7 +368,7 @@ struct rtc_device *devm_rtc_allocate_device(struct device *dev)
rtc = rtc_allocate_device();
if (!rtc) {
- ida_simple_remove(&rtc_ida, id);
+ ida_free(&rtc_ida, id);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 69325aeede1a..4aad9bb99868 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -96,7 +96,7 @@ static int clear_uie(struct rtc_device *rtc)
}
if (rtc->uie_task_active) {
spin_unlock_irq(&rtc->irq_lock);
- flush_scheduled_work();
+ flush_work(&rtc->uie_task);
spin_lock_irq(&rtc->irq_lock);
}
rtc->uie_irq_active = 0;
@@ -566,9 +566,3 @@ void __init rtc_dev_init(void)
if (err < 0)
pr_err("failed to allocate char dev region\n");
}
-
-void __exit rtc_dev_exit(void)
-{
- if (rtc_devt)
- unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
-}
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index 6e3e320dc727..f2b0971d2c65 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -817,8 +817,7 @@ static const struct regmap_config abb5zes3_rtc_regmap_config = {
.val_bits = 8,
};
-static int abb5zes3_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int abb5zes3_probe(struct i2c_client *client)
{
struct abb5zes3_rtc_data *data = NULL;
struct device *dev = &client->dev;
@@ -945,7 +944,7 @@ static struct i2c_driver abb5zes3_driver = {
.pm = &abb5zes3_rtc_pm_ops,
.of_match_table = of_match_ptr(abb5zes3_dt_match),
},
- .probe = abb5zes3_probe,
+ .probe_new = abb5zes3_probe,
.id_table = abb5zes3_id,
};
module_i2c_driver(abb5zes3_driver);
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index e188ab517f1e..2f8deb8c4cd3 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -495,8 +495,7 @@ static void abeoz9_hwmon_register(struct device *dev,
#endif
-static int abeoz9_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int abeoz9_probe(struct i2c_client *client)
{
struct abeoz9_rtc_data *data = NULL;
struct device *dev = &client->dev;
@@ -580,7 +579,7 @@ static struct i2c_driver abeoz9_driver = {
.name = "rtc-ab-eoz9",
.of_match_table = of_match_ptr(abeoz9_dt_match),
},
- .probe = abeoz9_probe,
+ .probe_new = abeoz9_probe,
.id_table = abeoz9_id,
};
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 2235c968842d..e0bbb11d912e 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -249,8 +249,7 @@ static void bq32k_sysfs_unregister(struct device *dev)
device_remove_file(dev, &dev_attr_trickle_charge_bypass);
}
-static int bq32k_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int bq32k_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct rtc_device *rtc;
@@ -322,7 +321,7 @@ static struct i2c_driver bq32k_driver = {
.name = "bq32k",
.of_match_table = of_match_ptr(bq32k_of_match),
},
- .probe = bq32k_probe,
+ .probe_new = bq32k_probe,
.remove = bq32k_remove,
.id_table = bq32k_id,
};
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7c006c2b125f..bdb1df843c78 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1260,9 +1260,6 @@ static void use_acpi_alarm_quirks(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- return;
-
if (!is_hpet_enabled())
return;
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index 0abf98983e13..4b10a1b8f370 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -2,7 +2,6 @@
#ifdef CONFIG_RTC_INTF_DEV
extern void __init rtc_dev_init(void);
-extern void __exit rtc_dev_exit(void);
extern void rtc_dev_prepare(struct rtc_device *rtc);
#else
@@ -11,10 +10,6 @@ static inline void rtc_dev_init(void)
{
}
-static inline void rtc_dev_exit(void)
-{
-}
-
static inline void rtc_dev_prepare(struct rtc_device *rtc)
{
}
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 70626793ca69..887f5193e253 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -375,10 +375,8 @@ static int cros_ec_rtc_remove(struct platform_device *pdev)
ret = blocking_notifier_chain_unregister(
&cros_ec_rtc->cros_ec->event_notifier,
&cros_ec_rtc->notifier);
- if (ret) {
+ if (ret)
dev_err(dev, "failed to unregister notifier\n");
- return ret;
- }
return 0;
}
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 8db5a631bca8..b19de5100b1a 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -467,8 +467,7 @@ static const struct watchdog_ops ds1374_wdt_ops = {
*
*****************************************************************************
*/
-static int ds1374_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds1374_probe(struct i2c_client *client)
{
struct ds1374 *ds1374;
int ret;
@@ -575,7 +574,7 @@ static struct i2c_driver ds1374_driver = {
.of_match_table = of_match_ptr(ds1374_of_match),
.pm = &ds1374_pm,
},
- .probe = ds1374_probe,
+ .probe_new = ds1374_probe,
.remove = ds1374_remove,
.id_table = ds1374_id,
};
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 4cd8efbef6cf..a3bb2cd9c881 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -106,8 +106,7 @@ static const struct rtc_class_ops ds1672_rtc_ops = {
.set_time = ds1672_set_time,
};
-static int ds1672_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds1672_probe(struct i2c_client *client)
{
int err = 0;
struct rtc_device *rtc;
@@ -150,7 +149,7 @@ static struct i2c_driver ds1672_driver = {
.name = "rtc-ds1672",
.of_match_table = of_match_ptr(ds1672_of_match),
},
- .probe = &ds1672_probe,
+ .probe_new = ds1672_probe,
.id_table = ds1672_id,
};
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 168bc27f1f5a..dd31a60c1fc6 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -566,8 +566,7 @@ static const struct dev_pm_ops ds3232_pm_ops = {
#if IS_ENABLED(CONFIG_I2C)
-static int ds3232_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds3232_i2c_probe(struct i2c_client *client)
{
struct regmap *regmap;
static const struct regmap_config config = {
@@ -604,7 +603,7 @@ static struct i2c_driver ds3232_driver = {
.of_match_table = of_match_ptr(ds3232_of_match),
.pm = &ds3232_pm_ops,
},
- .probe = ds3232_i2c_probe,
+ .probe_new = ds3232_i2c_probe,
.id_table = ds3232_id,
};
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index 9f176bce48ba..53f9f9391a5f 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -111,8 +111,7 @@ static const struct rtc_class_ops em3027_rtc_ops = {
.set_time = em3027_set_time,
};
-static int em3027_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int em3027_probe(struct i2c_client *client)
{
struct rtc_device *rtc;
@@ -148,7 +147,7 @@ static struct i2c_driver em3027_driver = {
.name = "rtc-em3027",
.of_match_table = of_match_ptr(em3027_of_match),
},
- .probe = &em3027_probe,
+ .probe_new = em3027_probe,
.id_table = em3027_id,
};
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 677ec2da13d8..f59bb81f23c0 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -340,8 +340,7 @@ static const struct rtc_class_ops fm3130_rtc_ops = {
static struct i2c_driver fm3130_driver;
-static int fm3130_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int fm3130_probe(struct i2c_client *client)
{
struct fm3130 *fm3130;
int err = -ENODEV;
@@ -518,7 +517,7 @@ static struct i2c_driver fm3130_driver = {
.driver = {
.name = "rtc-fm3130",
},
- .probe = fm3130_probe,
+ .probe_new = fm3130_probe,
.id_table = fm3130_id,
};
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 90e602e99d03..cc710d682121 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -495,8 +495,7 @@ static int hym8563_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(hym8563_pm_ops, hym8563_suspend, hym8563_resume);
-static int hym8563_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int hym8563_probe(struct i2c_client *client)
{
struct hym8563 *hym8563;
int ret;
@@ -572,7 +571,7 @@ static struct i2c_driver hym8563_driver = {
.pm = &hym8563_pm_ops,
.of_match_table = hym8563_dt_idtable,
},
- .probe = hym8563_probe,
+ .probe_new = hym8563_probe,
.id_table = hym8563_id,
};
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 961bd5d1d109..79461ded1a48 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -232,8 +232,7 @@ static const struct rtc_class_ops isl12022_rtc_ops = {
.set_time = isl12022_rtc_set_time,
};
-static int isl12022_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int isl12022_probe(struct i2c_client *client)
{
struct isl12022 *isl12022;
@@ -275,7 +274,7 @@ static struct i2c_driver isl12022_driver = {
.of_match_table = of_match_ptr(isl12022_dt_match),
#endif
},
- .probe = isl12022_probe,
+ .probe_new = isl12022_probe,
.id_table = isl12022_id,
};
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 182dfa605515..f448a525333e 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -880,10 +880,14 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (rc)
return rc;
- if (client->irq > 0)
+ if (client->irq > 0) {
rc = isl1208_setup_irq(client, client->irq);
- if (rc)
- return rc;
+ if (rc)
+ return rc;
+
+ } else {
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, isl1208->rtc->features);
+ }
if (evdet_irq > 0 && evdet_irq != client->irq)
rc = isl1208_setup_irq(client, evdet_irq);
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 4beadfa41644..0a33851cc51f 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -197,8 +197,7 @@ static const struct rtc_class_ops max6900_rtc_ops = {
.set_time = max6900_rtc_set_time,
};
-static int
-max6900_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int max6900_probe(struct i2c_client *client)
{
struct rtc_device *rtc;
@@ -225,7 +224,7 @@ static struct i2c_driver max6900_driver = {
.driver = {
.name = "rtc-max6900",
},
- .probe = max6900_probe,
+ .probe_new = max6900_probe,
.id_table = max6900_id,
};
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 522449b25921..f1c09f1db044 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -21,13 +21,13 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
unsigned long flags;
unsigned char seconds;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 100; i++) {
spin_lock_irqsave(&rtc_lock, flags);
/*
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
- * every msec for completion.
+ * every 100 usec for completion.
*
* Store the second value before checking UIP so a long lasting
* NMI which happens to hit after the UIP check cannot make
@@ -37,7 +37,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
- mdelay(1);
+ udelay(100);
continue;
}
@@ -56,7 +56,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
*/
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
- mdelay(1);
+ udelay(100);
continue;
}
diff --git a/drivers/rtc/rtc-mpfs.c b/drivers/rtc/rtc-mpfs.c
new file mode 100644
index 000000000000..f14d1925e0c9
--- /dev/null
+++ b/drivers/rtc/rtc-mpfs.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip MPFS RTC driver
+ *
+ * Copyright (c) 2021-2022 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ * & Conor Dooley <conor.dooley@microchip.com>
+ */
+#include "linux/bits.h"
+#include "linux/iopoll.h"
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+
+#define CONTROL_REG 0x00
+#define MODE_REG 0x04
+#define PRESCALER_REG 0x08
+#define ALARM_LOWER_REG 0x0c
+#define ALARM_UPPER_REG 0x10
+#define COMPARE_LOWER_REG 0x14
+#define COMPARE_UPPER_REG 0x18
+#define DATETIME_LOWER_REG 0x20
+#define DATETIME_UPPER_REG 0x24
+
+#define CONTROL_RUNNING_BIT BIT(0)
+#define CONTROL_START_BIT BIT(0)
+#define CONTROL_STOP_BIT BIT(1)
+#define CONTROL_ALARM_ON_BIT BIT(2)
+#define CONTROL_ALARM_OFF_BIT BIT(3)
+#define CONTROL_RESET_BIT BIT(4)
+#define CONTROL_UPLOAD_BIT BIT(5)
+#define CONTROL_DOWNLOAD_BIT BIT(6)
+#define CONTROL_MATCH_BIT BIT(7)
+#define CONTROL_WAKEUP_CLR_BIT BIT(8)
+#define CONTROL_WAKEUP_SET_BIT BIT(9)
+#define CONTROL_UPDATED_BIT BIT(10)
+
+#define MODE_CLOCK_CALENDAR BIT(0)
+#define MODE_WAKE_EN BIT(1)
+#define MODE_WAKE_RESET BIT(2)
+#define MODE_WAKE_CONTINUE BIT(3)
+
+#define MAX_PRESCALER_COUNT GENMASK(25, 0)
+#define DATETIME_UPPER_MASK GENMASK(29, 0)
+#define ALARM_UPPER_MASK GENMASK(10, 0)
+
+#define UPLOAD_TIMEOUT_US 50
+
+struct mpfs_rtc_dev {
+ struct rtc_device *rtc;
+ void __iomem *base;
+};
+
+static void mpfs_rtc_start(struct mpfs_rtc_dev *rtcdev)
+{
+ u32 ctrl;
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_START_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+}
+
+static void mpfs_rtc_clear_irq(struct mpfs_rtc_dev *rtcdev)
+{
+ u32 val = readl(rtcdev->base + CONTROL_REG);
+
+ val &= ~(CONTROL_ALARM_ON_BIT | CONTROL_STOP_BIT);
+ val |= CONTROL_ALARM_OFF_BIT;
+ writel(val, rtcdev->base + CONTROL_REG);
+ /*
+ * Ensure that the posted write to the CONTROL_REG register completed before
+ * returning from this function. Not doing this may result in the interrupt
+ * only being cleared some time after this function returns.
+ */
+ (void)readl(rtcdev->base + CONTROL_REG);
+}
+
+static int mpfs_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u64 time;
+
+ time = readl(rtcdev->base + DATETIME_LOWER_REG);
+ time |= ((u64)readl(rtcdev->base + DATETIME_UPPER_REG) & DATETIME_UPPER_MASK) << 32;
+ rtc_time64_to_tm(time, tm);
+
+ return 0;
+}
+
+static int mpfs_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 ctrl, prog;
+ u64 time;
+ int ret;
+
+ time = rtc_tm_to_time64(tm);
+
+ writel((u32)time, rtcdev->base + DATETIME_LOWER_REG);
+ writel((u32)(time >> 32) & DATETIME_UPPER_MASK, rtcdev->base + DATETIME_UPPER_REG);
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_UPLOAD_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ ret = read_poll_timeout(readl, prog, prog & CONTROL_UPLOAD_BIT, 0, UPLOAD_TIMEOUT_US,
+ false, rtcdev->base + CONTROL_REG);
+ if (ret) {
+ dev_err(dev, "timed out uploading time to rtc");
+ return ret;
+ }
+ mpfs_rtc_start(rtcdev);
+
+ return 0;
+}
+
+static int mpfs_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 mode = readl(rtcdev->base + MODE_REG);
+ u64 time;
+
+ alrm->enabled = mode & MODE_WAKE_EN;
+
+ time = (u64)readl(rtcdev->base + ALARM_LOWER_REG) << 32;
+ time |= (readl(rtcdev->base + ALARM_UPPER_REG) & ALARM_UPPER_MASK);
+ rtc_time64_to_tm(time, &alrm->time);
+
+ return 0;
+}
+
+static int mpfs_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 mode, ctrl;
+ u64 time;
+
+ /* Disable the alarm before updating */
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl |= CONTROL_ALARM_OFF_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ time = rtc_tm_to_time64(&alrm->time);
+
+ writel((u32)time, rtcdev->base + ALARM_LOWER_REG);
+ writel((u32)(time >> 32) & ALARM_UPPER_MASK, rtcdev->base + ALARM_UPPER_REG);
+
+ /* Bypass compare register in alarm mode */
+ writel(GENMASK(31, 0), rtcdev->base + COMPARE_LOWER_REG);
+ writel(GENMASK(29, 0), rtcdev->base + COMPARE_UPPER_REG);
+
+ /* Configure the RTC to enable the alarm. */
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ mode = readl(rtcdev->base + MODE_REG);
+ if (alrm->enabled) {
+ mode = MODE_WAKE_EN | MODE_WAKE_CONTINUE;
+ /* Enable the alarm */
+ ctrl &= ~CONTROL_ALARM_OFF_BIT;
+ ctrl |= CONTROL_ALARM_ON_BIT;
+ }
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_START_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+ writel(mode, rtcdev->base + MODE_REG);
+
+ return 0;
+}
+
+static int mpfs_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~(CONTROL_ALARM_ON_BIT | CONTROL_ALARM_OFF_BIT | CONTROL_STOP_BIT);
+
+ if (enabled)
+ ctrl |= CONTROL_ALARM_ON_BIT;
+ else
+ ctrl |= CONTROL_ALARM_OFF_BIT;
+
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ return 0;
+}
+
+static inline struct clk *mpfs_rtc_init_clk(struct device *dev)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, "rtc");
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk);
+ return clk;
+}
+
+static irqreturn_t mpfs_rtc_wakeup_irq_handler(int irq, void *dev)
+{
+ struct mpfs_rtc_dev *rtcdev = dev;
+
+ mpfs_rtc_clear_irq(rtcdev);
+
+ rtc_update_irq(rtcdev->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops mpfs_rtc_ops = {
+ .read_time = mpfs_rtc_readtime,
+ .set_time = mpfs_rtc_settime,
+ .read_alarm = mpfs_rtc_readalarm,
+ .set_alarm = mpfs_rtc_setalarm,
+ .alarm_irq_enable = mpfs_rtc_alarm_irq_enable,
+};
+
+static int mpfs_rtc_probe(struct platform_device *pdev)
+{
+ struct mpfs_rtc_dev *rtcdev;
+ struct clk *clk;
+ u32 prescaler;
+ int wakeup_irq, ret;
+
+ rtcdev = devm_kzalloc(&pdev->dev, sizeof(struct mpfs_rtc_dev), GFP_KERNEL);
+ if (!rtcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rtcdev);
+
+ rtcdev->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtcdev->rtc))
+ return PTR_ERR(rtcdev->rtc);
+
+ rtcdev->rtc->ops = &mpfs_rtc_ops;
+
+ /* range is capped by alarm max, lower reg is 31:0 & upper is 10:0 */
+ rtcdev->rtc->range_max = GENMASK_ULL(42, 0);
+
+ clk = mpfs_rtc_init_clk(&pdev->dev);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rtcdev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtcdev->base)) {
+ dev_dbg(&pdev->dev, "invalid ioremap resources\n");
+ return PTR_ERR(rtcdev->base);
+ }
+
+ wakeup_irq = platform_get_irq(pdev, 0);
+ if (wakeup_irq <= 0) {
+ dev_dbg(&pdev->dev, "could not get wakeup irq\n");
+ return wakeup_irq;
+ }
+ ret = devm_request_irq(&pdev->dev, wakeup_irq, mpfs_rtc_wakeup_irq_handler, 0,
+ dev_name(&pdev->dev), rtcdev);
+ if (ret) {
+ dev_dbg(&pdev->dev, "could not request wakeup irq\n");
+ return ret;
+ }
+
+ /* prescaler hardware adds 1 to reg value */
+ prescaler = clk_get_rate(devm_clk_get(&pdev->dev, "rtcref")) - 1;
+
+ if (prescaler > MAX_PRESCALER_COUNT) {
+ dev_dbg(&pdev->dev, "invalid prescaler %d\n", prescaler);
+ return -EINVAL;
+ }
+
+ writel(prescaler, rtcdev->base + PRESCALER_REG);
+ dev_info(&pdev->dev, "prescaler set to: 0x%X \r\n", prescaler);
+
+ device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_wake_irq(&pdev->dev, wakeup_irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to enable irq wake\n");
+
+ return devm_rtc_register_device(rtcdev->rtc);
+}
+
+static int mpfs_rtc_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id mpfs_rtc_of_match[] = {
+ { .compatible = "microchip,mpfs-rtc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mpfs_rtc_of_match);
+
+static struct platform_driver mpfs_rtc_driver = {
+ .probe = mpfs_rtc_probe,
+ .remove = mpfs_rtc_remove,
+ .driver = {
+ .name = "mpfs_rtc",
+ .of_match_table = mpfs_rtc_of_match,
+ },
+};
+
+module_platform_driver(mpfs_rtc_driver);
+
+MODULE_DESCRIPTION("Real time clock for Microchip Polarfire SoC");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
new file mode 100644
index 000000000000..d43acd3920ed
--- /dev/null
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -0,0 +1,553 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Nuvoton Technology Corporation
+
+#include <linux/bcd.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define NCT3018Y_REG_SC 0x00 /* seconds */
+#define NCT3018Y_REG_SCA 0x01 /* alarm */
+#define NCT3018Y_REG_MN 0x02
+#define NCT3018Y_REG_MNA 0x03 /* alarm */
+#define NCT3018Y_REG_HR 0x04
+#define NCT3018Y_REG_HRA 0x05 /* alarm */
+#define NCT3018Y_REG_DW 0x06
+#define NCT3018Y_REG_DM 0x07
+#define NCT3018Y_REG_MO 0x08
+#define NCT3018Y_REG_YR 0x09
+#define NCT3018Y_REG_CTRL 0x0A /* timer control */
+#define NCT3018Y_REG_ST 0x0B /* status */
+#define NCT3018Y_REG_CLKO 0x0C /* clock out */
+
+#define NCT3018Y_BIT_AF BIT(7)
+#define NCT3018Y_BIT_ST BIT(7)
+#define NCT3018Y_BIT_DM BIT(6)
+#define NCT3018Y_BIT_HF BIT(5)
+#define NCT3018Y_BIT_DSM BIT(4)
+#define NCT3018Y_BIT_AIE BIT(3)
+#define NCT3018Y_BIT_OFIE BIT(2)
+#define NCT3018Y_BIT_CIE BIT(1)
+#define NCT3018Y_BIT_TWO BIT(0)
+
+#define NCT3018Y_REG_BAT_MASK 0x07
+#define NCT3018Y_REG_CLKO_F_MASK 0x03 /* frequenc mask */
+#define NCT3018Y_REG_CLKO_CKE 0x80 /* clock out enabled */
+
+struct nct3018y {
+ struct rtc_device *rtc;
+ struct i2c_client *client;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
+};
+
+static int nct3018y_set_alarm_mode(struct i2c_client *client, bool on)
+{
+ int err, flags;
+
+ dev_dbg(&client->dev, "%s:on:%d\n", __func__, on);
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0) {
+ dev_dbg(&client->dev,
+ "Failed to read NCT3018Y_REG_CTRL\n");
+ return flags;
+ }
+
+ if (on)
+ flags |= NCT3018Y_BIT_AIE;
+ else
+ flags &= ~NCT3018Y_BIT_AIE;
+
+ flags |= NCT3018Y_BIT_CIE;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
+ return err;
+ }
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (flags < 0) {
+ dev_dbg(&client->dev,
+ "Failed to read NCT3018Y_REG_ST\n");
+ return flags;
+ }
+
+ flags &= ~(NCT3018Y_BIT_AF);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_ST\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *alarm_enable,
+ unsigned char *alarm_flag)
+{
+ int flags;
+
+ if (alarm_enable) {
+ dev_dbg(&client->dev, "%s:NCT3018Y_REG_CTRL\n", __func__);
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0)
+ return flags;
+ *alarm_enable = flags & NCT3018Y_BIT_AIE;
+ }
+
+ if (alarm_flag) {
+ dev_dbg(&client->dev, "%s:NCT3018Y_REG_ST\n", __func__);
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (flags < 0)
+ return flags;
+ *alarm_flag = flags & NCT3018Y_BIT_AF;
+ }
+
+ dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n",
+ __func__, *alarm_enable, *alarm_flag);
+
+ return 0;
+}
+
+static irqreturn_t nct3018y_irq(int irq, void *dev_id)
+{
+ struct nct3018y *nct3018y = i2c_get_clientdata(dev_id);
+ struct i2c_client *client = nct3018y->client;
+ int err;
+ unsigned char alarm_flag;
+ unsigned char alarm_enable;
+
+ dev_dbg(&client->dev, "%s:irq:%d\n", __func__, irq);
+ err = nct3018y_get_alarm_mode(nct3018y->client, &alarm_enable, &alarm_flag);
+ if (err)
+ return IRQ_NONE;
+
+ if (alarm_flag) {
+ dev_dbg(&client->dev, "%s:alarm flag:%x\n",
+ __func__, alarm_flag);
+ rtc_update_irq(nct3018y->rtc, 1, RTC_IRQF | RTC_AF);
+ nct3018y_set_alarm_mode(nct3018y->client, 0);
+ dev_dbg(&client->dev, "%s:IRQ_HANDLED\n", __func__);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/*
+ * In the routines that deal directly with the nct3018y hardware, we use
+ * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
+ */
+static int nct3018y_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[10];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_ST, 1, buf);
+ if (err < 0)
+ return err;
+
+ if (!buf[0]) {
+ dev_dbg(&client->dev, " voltage <=1.7, date/time is not reliable.\n");
+ return -EINVAL;
+ }
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SC, sizeof(buf), buf);
+ if (err < 0)
+ return err;
+
+ tm->tm_sec = bcd2bin(buf[0] & 0x7F);
+ tm->tm_min = bcd2bin(buf[2] & 0x7F);
+ tm->tm_hour = bcd2bin(buf[4] & 0x3F);
+ tm->tm_wday = buf[6] & 0x07;
+ tm->tm_mday = bcd2bin(buf[7] & 0x3F);
+ tm->tm_mon = bcd2bin(buf[8] & 0x1F) - 1;
+ tm->tm_year = bcd2bin(buf[9]) + 100;
+
+ return 0;
+}
+
+static int nct3018y_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[4] = {0};
+ int err;
+
+ buf[0] = bin2bcd(tm->tm_sec);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SC, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SC\n");
+ return err;
+ }
+
+ buf[0] = bin2bcd(tm->tm_min);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MN, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MN\n");
+ return err;
+ }
+
+ buf[0] = bin2bcd(tm->tm_hour);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HR, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HR\n");
+ return err;
+ }
+
+ buf[0] = tm->tm_wday & 0x07;
+ buf[1] = bin2bcd(tm->tm_mday);
+ buf[2] = bin2bcd(tm->tm_mon + 1);
+ buf[3] = bin2bcd(tm->tm_year - 100);
+ err = i2c_smbus_write_i2c_block_data(client, NCT3018Y_REG_DW,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write for day and mon and year\n");
+ return -EIO;
+ }
+
+ return err;
+}
+
+static int nct3018y_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[5];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SCA,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to read date\n");
+ return -EIO;
+ }
+
+ dev_dbg(&client->dev, "%s: raw data is sec=%02x, min=%02x hr=%02x\n",
+ __func__, buf[0], buf[2], buf[4]);
+
+ tm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
+ tm->time.tm_min = bcd2bin(buf[2] & 0x7F);
+ tm->time.tm_hour = bcd2bin(buf[4] & 0x3F);
+
+ err = nct3018y_get_alarm_mode(client, &tm->enabled, &tm->pending);
+ if (err < 0)
+ return err;
+
+ dev_dbg(&client->dev, "%s:s=%d m=%d, hr=%d, enabled=%d, pending=%d\n",
+ __func__, tm->time.tm_sec, tm->time.tm_min,
+ tm->time.tm_hour, tm->enabled, tm->pending);
+
+ return 0;
+}
+
+static int nct3018y_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int err;
+
+ dev_dbg(dev, "%s, sec=%d, min=%d hour=%d tm->enabled:%d\n",
+ __func__, tm->time.tm_sec, tm->time.tm_min, tm->time.tm_hour,
+ tm->enabled);
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SCA, bin2bcd(tm->time.tm_sec));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SCA\n");
+ return err;
+ }
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MNA, bin2bcd(tm->time.tm_min));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MNA\n");
+ return err;
+ }
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HRA, bin2bcd(tm->time.tm_hour));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HRA\n");
+ return err;
+ }
+
+ return nct3018y_set_alarm_mode(client, tm->enabled);
+}
+
+static int nct3018y_irq_enable(struct device *dev, unsigned int enabled)
+{
+ dev_dbg(dev, "%s: alarm enable=%d\n", __func__, enabled);
+
+ return nct3018y_set_alarm_mode(to_i2c_client(dev), enabled);
+}
+
+static int nct3018y_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int status, flags = 0;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ status = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (status < 0)
+ return status;
+
+ if (!(status & NCT3018Y_REG_BAT_MASK))
+ flags |= RTC_VL_DATA_INVALID;
+
+ return put_user(flags, (unsigned int __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMMON_CLK
+/*
+ * Handling of the clkout
+ */
+
+#define clkout_hw_to_nct3018y(_hw) container_of(_hw, struct nct3018y, clkout_hw)
+
+static const int clkout_rates[] = {
+ 32768,
+ 1024,
+ 32,
+ 1,
+};
+
+static unsigned long nct3018y_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return 0;
+
+ flags &= NCT3018Y_REG_CLKO_F_MASK;
+ return clkout_rates[flags];
+}
+
+static long nct3018y_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int nct3018y_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int i, flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] == rate) {
+ flags &= ~NCT3018Y_REG_CLKO_F_MASK;
+ flags |= i;
+ return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags);
+ }
+
+ return -EINVAL;
+}
+
+static int nct3018y_clkout_control(struct clk_hw *hw, bool enable)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ if (enable)
+ flags |= NCT3018Y_REG_CLKO_CKE;
+ else
+ flags &= ~NCT3018Y_REG_CLKO_CKE;
+
+ return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags);
+}
+
+static int nct3018y_clkout_prepare(struct clk_hw *hw)
+{
+ return nct3018y_clkout_control(hw, 1);
+}
+
+static void nct3018y_clkout_unprepare(struct clk_hw *hw)
+{
+ nct3018y_clkout_control(hw, 0);
+}
+
+static int nct3018y_clkout_is_prepared(struct clk_hw *hw)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ return flags & NCT3018Y_REG_CLKO_CKE;
+}
+
+static const struct clk_ops nct3018y_clkout_ops = {
+ .prepare = nct3018y_clkout_prepare,
+ .unprepare = nct3018y_clkout_unprepare,
+ .is_prepared = nct3018y_clkout_is_prepared,
+ .recalc_rate = nct3018y_clkout_recalc_rate,
+ .round_rate = nct3018y_clkout_round_rate,
+ .set_rate = nct3018y_clkout_set_rate,
+};
+
+static struct clk *nct3018y_clkout_register_clk(struct nct3018y *nct3018y)
+{
+ struct i2c_client *client = nct3018y->client;
+ struct device_node *node = client->dev.of_node;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ init.name = "nct3018y-clkout";
+ init.ops = &nct3018y_clkout_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ nct3018y->clkout_hw.init = &init;
+
+ /* optional override of the clockname */
+ of_property_read_string(node, "clock-output-names", &init.name);
+
+ /* register the clock */
+ clk = devm_clk_register(&client->dev, &nct3018y->clkout_hw);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return clk;
+}
+#endif
+
+static const struct rtc_class_ops nct3018y_rtc_ops = {
+ .read_time = nct3018y_rtc_read_time,
+ .set_time = nct3018y_rtc_set_time,
+ .read_alarm = nct3018y_rtc_read_alarm,
+ .set_alarm = nct3018y_rtc_set_alarm,
+ .alarm_irq_enable = nct3018y_irq_enable,
+ .ioctl = nct3018y_ioctl,
+};
+
+static int nct3018y_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct nct3018y *nct3018y;
+ int err, flags;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ nct3018y = devm_kzalloc(&client->dev, sizeof(struct nct3018y),
+ GFP_KERNEL);
+ if (!nct3018y)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, nct3018y);
+ nct3018y->client = client;
+ device_set_wakeup_capable(&client->dev, 1);
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0) {
+ dev_dbg(&client->dev, "%s: read error\n", __func__);
+ return flags;
+ } else if (flags & NCT3018Y_BIT_TWO) {
+ dev_dbg(&client->dev, "%s: NCT3018Y_BIT_TWO is set\n", __func__);
+ }
+
+ flags = NCT3018Y_BIT_TWO;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
+ return err;
+ }
+
+ flags = 0;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "%s: write error\n", __func__);
+ return err;
+ }
+
+ nct3018y->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(nct3018y->rtc))
+ return PTR_ERR(nct3018y->rtc);
+
+ nct3018y->rtc->ops = &nct3018y_rtc_ops;
+ nct3018y->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ nct3018y->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ if (client->irq > 0) {
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, nct3018y_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ "nct3018y", client);
+ if (err) {
+ dev_dbg(&client->dev, "unable to request IRQ %d\n", client->irq);
+ return err;
+ }
+ } else {
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, nct3018y->rtc->features);
+ clear_bit(RTC_FEATURE_ALARM, nct3018y->rtc->features);
+ }
+
+#ifdef CONFIG_COMMON_CLK
+ /* register clk in common clk framework */
+ nct3018y_clkout_register_clk(nct3018y);
+#endif
+
+ return devm_rtc_register_device(nct3018y->rtc);
+}
+
+static const struct i2c_device_id nct3018y_id[] = {
+ { "nct3018y", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nct3018y_id);
+
+static const struct of_device_id nct3018y_of_match[] = {
+ { .compatible = "nuvoton,nct3018y" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, nct3018y_of_match);
+
+static struct i2c_driver nct3018y_driver = {
+ .driver = {
+ .name = "rtc-nct3018y",
+ .of_match_table = of_match_ptr(nct3018y_of_match),
+ },
+ .probe = nct3018y_probe,
+ .id_table = nct3018y_id,
+};
+
+module_i2c_driver(nct3018y_driver);
+
+MODULE_AUTHOR("Medad CChien <ctcchien@nuvoton.com>");
+MODULE_AUTHOR("Mia Lin <mimi05633@gmail.com>");
+MODULE_DESCRIPTION("Nuvoton NCT3018Y RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index b1b1943de844..6174b3fd4b98 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -390,8 +390,7 @@ static const struct regmap_config regmap_config = {
.max_register = 0x13,
};
-static int pcf8523_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8523_probe(struct i2c_client *client)
{
struct pcf8523 *pcf8523;
struct rtc_device *rtc;
@@ -485,7 +484,7 @@ static struct i2c_driver pcf8523_driver = {
.name = "rtc-pcf8523",
.of_match_table = pcf8523_of_match,
},
- .probe = pcf8523_probe,
+ .probe_new = pcf8523_probe,
.id_table = pcf8523_id,
};
module_i2c_driver(pcf8523_driver);
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index bb3e9ba75f6c..c05b722f0060 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -350,8 +350,7 @@ static const struct pcf85x63_config pcf_85363_config = {
.num_nvram = 2
};
-static int pcf85363_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf85363_probe(struct i2c_client *client)
{
struct pcf85363 *pcf85363;
const struct pcf85x63_config *config = &pcf_85363_config;
@@ -436,7 +435,7 @@ static struct i2c_driver pcf85363_driver = {
.name = "pcf85363",
.of_match_table = of_match_ptr(dev_ids),
},
- .probe = pcf85363_probe,
+ .probe_new = pcf85363_probe,
};
module_i2c_driver(pcf85363_driver);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 9d06813e2e6d..11fa9788558b 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -509,8 +509,7 @@ static const struct rtc_class_ops pcf8563_rtc_ops = {
.alarm_irq_enable = pcf8563_irq_enable,
};
-static int pcf8563_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8563_probe(struct i2c_client *client)
{
struct pcf8563 *pcf8563;
int err;
@@ -606,7 +605,7 @@ static struct i2c_driver pcf8563_driver = {
.name = "rtc-pcf8563",
.of_match_table = of_match_ptr(pcf8563_of_match),
},
- .probe = pcf8563_probe,
+ .probe_new = pcf8563_probe,
.id_table = pcf8563_id,
};
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index c80ca20e5d8d..87074d178274 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -275,8 +275,7 @@ static const struct rtc_class_ops pcf8583_rtc_ops = {
.set_time = pcf8583_rtc_set_time,
};
-static int pcf8583_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8583_probe(struct i2c_client *client)
{
struct pcf8583 *pcf8583;
@@ -307,7 +306,7 @@ static struct i2c_driver pcf8583_driver = {
.driver = {
.name = "pcf8583",
},
- .probe = pcf8583_probe,
+ .probe_new = pcf8583_probe,
.id_table = pcf8583_id,
};
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 8cb84c9595fc..eb483a30bd92 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -784,8 +784,7 @@ static const struct regmap_config config = {
#if IS_ENABLED(CONFIG_I2C)
-static int rv3029_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rv3029_i2c_probe(struct i2c_client *client)
{
struct regmap *regmap;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK |
@@ -819,7 +818,7 @@ static struct i2c_driver rv3029_driver = {
.name = "rv3029",
.of_match_table = of_match_ptr(rv3029_of_match),
},
- .probe = rv3029_i2c_probe,
+ .probe_new = rv3029_i2c_probe,
.id_table = rv3029_id,
};
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index f69e0b1137cd..3527a0521e9b 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -9,6 +9,7 @@
#include <linux/bcd.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/log2.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -33,6 +34,7 @@
#define RV8803_EXT 0x0D
#define RV8803_FLAG 0x0E
#define RV8803_CTRL 0x0F
+#define RV8803_OSC_OFFSET 0x2C
#define RV8803_EXT_WADA BIT(6)
@@ -49,12 +51,15 @@
#define RV8803_CTRL_TIE BIT(4)
#define RV8803_CTRL_UIE BIT(5)
+#define RX8803_CTRL_CSEL GENMASK(7, 6)
+
#define RX8900_BACKUP_CTRL 0x18
#define RX8900_FLAG_SWOFF BIT(2)
#define RX8900_FLAG_VDETOFF BIT(3)
enum rv8803_type {
rv_8803,
+ rx_8803,
rx_8804,
rx_8900
};
@@ -64,6 +69,7 @@ struct rv8803_data {
struct rtc_device *rtc;
struct mutex flags_lock;
u8 ctrl;
+ u8 backup;
enum rv8803_type type;
};
@@ -136,6 +142,44 @@ static int rv8803_write_regs(const struct i2c_client *client,
return ret;
}
+static int rv8803_regs_init(struct rv8803_data *rv8803)
+{
+ int ret;
+
+ ret = rv8803_write_reg(rv8803->client, RV8803_OSC_OFFSET, 0x00);
+ if (ret)
+ return ret;
+
+ ret = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ FIELD_PREP(RX8803_CTRL_CSEL, 1)); /* 2s */
+ if (ret)
+ return ret;
+
+ ret = rv8803_write_regs(rv8803->client, RV8803_ALARM_MIN, 3,
+ (u8[]){ 0, 0, 0 });
+ if (ret)
+ return ret;
+
+ return rv8803_write_reg(rv8803->client, RV8803_RAM, 0x00);
+}
+
+static int rv8803_regs_configure(struct rv8803_data *rv8803);
+
+static int rv8803_regs_reset(struct rv8803_data *rv8803)
+{
+ /*
+ * The RV-8803 resets all registers to POR defaults after voltage-loss,
+ * the Epson RTCs don't, so we manually reset the remainder here.
+ */
+ if (rv8803->type == rx_8803 || rv8803->type == rx_8900) {
+ int ret = rv8803_regs_init(rv8803);
+ if (ret)
+ return ret;
+ }
+
+ return rv8803_regs_configure(rv8803);
+}
+
static irqreturn_t rv8803_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
@@ -269,6 +313,14 @@ static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
return flags;
}
+ if (flags & RV8803_FLAG_V2F) {
+ ret = rv8803_regs_reset(rv8803);
+ if (ret) {
+ mutex_unlock(&rv8803->flags_lock);
+ return ret;
+ }
+ }
+
ret = rv8803_write_reg(rv8803->client, RV8803_FLAG,
flags & ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F));
@@ -498,18 +550,32 @@ static int rx8900_trickle_charger_init(struct rv8803_data *rv8803)
if (err < 0)
return err;
- flags = ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF) & (u8)err;
-
- if (of_property_read_bool(node, "epson,vdet-disable"))
- flags |= RX8900_FLAG_VDETOFF;
-
- if (of_property_read_bool(node, "trickle-diode-disable"))
- flags |= RX8900_FLAG_SWOFF;
+ flags = (u8)err;
+ flags &= ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF);
+ flags |= rv8803->backup;
return i2c_smbus_write_byte_data(rv8803->client, RX8900_BACKUP_CTRL,
flags);
}
+/* configure registers with values different than the Power-On reset defaults */
+static int rv8803_regs_configure(struct rv8803_data *rv8803)
+{
+ int err;
+
+ err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
+ if (err)
+ return err;
+
+ err = rx8900_trickle_charger_init(rv8803);
+ if (err) {
+ dev_err(&rv8803->client->dev, "failed to init charger\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int rv8803_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -576,15 +642,15 @@ static int rv8803_probe(struct i2c_client *client,
if (!client->irq)
clear_bit(RTC_FEATURE_ALARM, rv8803->rtc->features);
- err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
- if (err)
- return err;
+ if (of_property_read_bool(client->dev.of_node, "epson,vdet-disable"))
+ rv8803->backup |= RX8900_FLAG_VDETOFF;
- err = rx8900_trickle_charger_init(rv8803);
- if (err) {
- dev_err(&client->dev, "failed to init charger\n");
+ if (of_property_read_bool(client->dev.of_node, "trickle-diode-disable"))
+ rv8803->backup |= RX8900_FLAG_SWOFF;
+
+ err = rv8803_regs_configure(rv8803);
+ if (err)
return err;
- }
rv8803->rtc->ops = &rv8803_rtc_ops;
rv8803->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
@@ -603,7 +669,7 @@ static int rv8803_probe(struct i2c_client *client,
static const struct i2c_device_id rv8803_id[] = {
{ "rv8803", rv_8803 },
{ "rv8804", rx_8804 },
- { "rx8803", rv_8803 },
+ { "rx8803", rx_8803 },
{ "rx8900", rx_8900 },
{ }
};
@@ -616,7 +682,7 @@ static const __maybe_unused struct of_device_id rv8803_of_match[] = {
},
{
.compatible = "epson,rx8803",
- .data = (void *)rv_8803
+ .data = (void *)rx_8803
},
{
.compatible = "epson,rx8804",
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 758fd6e11a15..cc634558b928 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -419,8 +419,7 @@ static struct regmap_config regmap_i2c_config = {
.read_flag_mask = 0x80,
};
-static int rx6110_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx6110_i2c_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct rx6110_data *rx6110;
@@ -464,7 +463,7 @@ static struct i2c_driver rx6110_i2c_driver = {
.name = RX6110_DRIVER_NAME,
.acpi_match_table = rx6110_i2c_acpi_match,
},
- .probe = rx6110_i2c_probe,
+ .probe_new = rx6110_i2c_probe,
.id_table = rx6110_i2c_id,
};
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index b32117ccd74b..dde86f3e2a4b 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -55,6 +55,8 @@
#define RX8025_BIT_CTRL2_XST BIT(5)
#define RX8025_BIT_CTRL2_VDET BIT(6)
+#define RX8035_BIT_HOUR_1224 BIT(7)
+
/* Clock precision adjustment */
#define RX8025_ADJ_RESOLUTION 3050 /* in ppb */
#define RX8025_ADJ_DATA_MAX 62
@@ -78,6 +80,7 @@ struct rx8025_data {
struct rtc_device *rtc;
enum rx_model model;
u8 ctrl1;
+ int is_24;
};
static s32 rx8025_read_reg(const struct i2c_client *client, u8 number)
@@ -226,7 +229,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
dt->tm_sec = bcd2bin(date[RX8025_REG_SEC] & 0x7f);
dt->tm_min = bcd2bin(date[RX8025_REG_MIN] & 0x7f);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x3f);
else
dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x1f) % 12
@@ -254,7 +257,7 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
*/
date[RX8025_REG_SEC] = bin2bcd(dt->tm_sec);
date[RX8025_REG_MIN] = bin2bcd(dt->tm_min);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
date[RX8025_REG_HOUR] = bin2bcd(dt->tm_hour);
else
date[RX8025_REG_HOUR] = (dt->tm_hour >= 12 ? 0x20 : 0)
@@ -279,6 +282,7 @@ static int rx8025_init_client(struct i2c_client *client)
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
u8 ctrl[2], ctrl2;
int need_clear = 0;
+ int hour_reg;
int err;
err = rx8025_read_regs(client, RX8025_REG_CTRL1, 2, ctrl);
@@ -303,6 +307,16 @@ static int rx8025_init_client(struct i2c_client *client)
err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2);
}
+
+ if (rx8025->model == model_rx_8035) {
+ /* In RX-8035, 12/24 flag is in the hour register */
+ hour_reg = rx8025_read_reg(client, RX8025_REG_HOUR);
+ if (hour_reg < 0)
+ return hour_reg;
+ rx8025->is_24 = (hour_reg & RX8035_BIT_HOUR_1224);
+ } else {
+ rx8025->is_24 = (ctrl[1] & RX8025_BIT_CTRL1_1224);
+ }
out:
return err;
}
@@ -329,7 +343,7 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
/* Hardware alarms precision is 1 minute! */
t->time.tm_sec = 0;
t->time.tm_min = bcd2bin(ald[0] & 0x7f);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
t->time.tm_hour = bcd2bin(ald[1] & 0x3f);
else
t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
@@ -350,7 +364,7 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
int err;
ald[0] = bin2bcd(t->time.tm_min);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
ald[1] = bin2bcd(t->time.tm_hour);
else
ald[1] = (t->time.tm_hour >= 12 ? 0x20 : 0)
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index aed4898a0ff4..14edb7534c97 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -248,8 +248,7 @@ static const struct rx85x1_config rx8571_config = {
.num_nvram = 2
};
-static int rx8581_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx8581_probe(struct i2c_client *client)
{
struct rx8581 *rx8581;
const struct rx85x1_config *config = &rx8581_config;
@@ -326,7 +325,7 @@ static struct i2c_driver rx8581_driver = {
.name = "rtc-rx8581",
.of_match_table = of_match_ptr(rx8581_of_match),
},
- .probe = rx8581_probe,
+ .probe_new = rx8581_probe,
.id_table = rx8581_id,
};
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 26278c770731..81d97b1d3159 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -420,8 +420,7 @@ static const struct rtc_class_ops s35390a_rtc_ops = {
.ioctl = s35390a_rtc_ioctl,
};
-static int s35390a_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int s35390a_probe(struct i2c_client *client)
{
int err, err_read;
unsigned int i;
@@ -502,7 +501,7 @@ static struct i2c_driver s35390a_driver = {
.name = "rtc-s35390a",
.of_match_table = of_match_ptr(s35390a_of_match),
},
- .probe = s35390a_probe,
+ .probe_new = s35390a_probe,
.id_table = s35390a_id,
};
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index 24e8528e23ec..e2f90d768ca8 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -163,8 +163,7 @@ static const struct regmap_config regmap_config = {
.max_register = 0x11,
};
-static int sd3078_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sd3078_probe(struct i2c_client *client)
{
int ret;
struct sd3078 *sd3078;
@@ -218,7 +217,7 @@ static struct i2c_driver sd3078_driver = {
.name = "sd3078",
.of_match_table = of_match_ptr(rtc_dt_match),
},
- .probe = sd3078_probe,
+ .probe_new = sd3078_probe,
.id_table = sd3078_id,
};
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index d4777b01ab22..736fe535cd45 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -388,7 +388,7 @@ static int spear_rtc_probe(struct platform_device *pdev)
config->rtc->ops = &spear_rtc_ops;
config->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
- config->rtc->range_min = RTC_TIMESTAMP_END_9999;
+ config->rtc->range_max = RTC_TIMESTAMP_END_9999;
status = devm_rtc_register_device(config->rtc);
if (status)
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 57540727ce1c..ed5516089e9a 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -875,6 +875,8 @@ static const struct of_device_id sun6i_rtc_dt_ids[] = {
{ .compatible = "allwinner,sun50i-h6-rtc" },
{ .compatible = "allwinner,sun50i-h616-rtc",
.data = (void *)RTC_LINEAR_DAY },
+ { .compatible = "allwinner,sun50i-r329-rtc",
+ .data = (void *)RTC_LINEAR_DAY },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids);
diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
new file mode 100644
index 000000000000..7a0f181d3fef
--- /dev/null
+++ b/drivers/rtc/rtc-ti-k3.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments K3 RTC driver
+ *
+ * Copyright (C) 2021-2022 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+/* Registers */
+#define REG_K3RTC_S_CNT_LSW 0x08
+#define REG_K3RTC_S_CNT_MSW 0x0c
+#define REG_K3RTC_COMP 0x10
+#define REG_K3RTC_ON_OFF_S_CNT_LSW 0x20
+#define REG_K3RTC_ON_OFF_S_CNT_MSW 0x24
+#define REG_K3RTC_SCRATCH0 0x30
+#define REG_K3RTC_SCRATCH7 0x4c
+#define REG_K3RTC_GENERAL_CTL 0x50
+#define REG_K3RTC_IRQSTATUS_RAW_SYS 0x54
+#define REG_K3RTC_IRQSTATUS_SYS 0x58
+#define REG_K3RTC_IRQENABLE_SET_SYS 0x5c
+#define REG_K3RTC_IRQENABLE_CLR_SYS 0x60
+#define REG_K3RTC_SYNCPEND 0x68
+#define REG_K3RTC_KICK0 0x70
+#define REG_K3RTC_KICK1 0x74
+
+/* Freeze when lsw is read and unfreeze when msw is read */
+#define K3RTC_CNT_FMODE_S_CNT_VALUE (0x2 << 24)
+
+/* Magic values for lock/unlock */
+#define K3RTC_KICK0_UNLOCK_VALUE 0x83e70b13
+#define K3RTC_KICK1_UNLOCK_VALUE 0x95a4f1e0
+
+/* Multiplier for ppb conversions */
+#define K3RTC_PPB_MULT (1000000000LL)
+/* Min and max values supported with 'offset' interface (swapped sign) */
+#define K3RTC_MIN_OFFSET (-277761)
+#define K3RTC_MAX_OFFSET (277778)
+
+/**
+ * struct ti_k3_rtc_soc_data - Private of compatible data for ti-k3-rtc
+ * @unlock_irq_erratum: Has erratum for unlock infinite IRQs (erratum i2327)
+ */
+struct ti_k3_rtc_soc_data {
+ const bool unlock_irq_erratum;
+};
+
+static const struct regmap_config ti_k3_rtc_regmap_config = {
+ .name = "peripheral-registers",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_K3RTC_KICK1,
+};
+
+enum ti_k3_rtc_fields {
+ K3RTC_KICK0,
+ K3RTC_KICK1,
+ K3RTC_S_CNT_LSW,
+ K3RTC_S_CNT_MSW,
+ K3RTC_O32K_OSC_DEP_EN,
+ K3RTC_UNLOCK,
+ K3RTC_CNT_FMODE,
+ K3RTC_PEND,
+ K3RTC_RELOAD_FROM_BBD,
+ K3RTC_COMP,
+
+ K3RTC_ALM_S_CNT_LSW,
+ K3RTC_ALM_S_CNT_MSW,
+ K3RTC_IRQ_STATUS_RAW,
+ K3RTC_IRQ_STATUS,
+ K3RTC_IRQ_ENABLE_SET,
+ K3RTC_IRQ_ENABLE_CLR,
+
+ K3RTC_IRQ_STATUS_ALT,
+ K3RTC_IRQ_ENABLE_CLR_ALT,
+
+ K3_RTC_MAX_FIELDS
+};
+
+static const struct reg_field ti_rtc_reg_fields[] = {
+ [K3RTC_KICK0] = REG_FIELD(REG_K3RTC_KICK0, 0, 31),
+ [K3RTC_KICK1] = REG_FIELD(REG_K3RTC_KICK1, 0, 31),
+ [K3RTC_S_CNT_LSW] = REG_FIELD(REG_K3RTC_S_CNT_LSW, 0, 31),
+ [K3RTC_S_CNT_MSW] = REG_FIELD(REG_K3RTC_S_CNT_MSW, 0, 15),
+ [K3RTC_O32K_OSC_DEP_EN] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 21, 21),
+ [K3RTC_UNLOCK] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 23, 23),
+ [K3RTC_CNT_FMODE] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 24, 25),
+ [K3RTC_PEND] = REG_FIELD(REG_K3RTC_SYNCPEND, 0, 1),
+ [K3RTC_RELOAD_FROM_BBD] = REG_FIELD(REG_K3RTC_SYNCPEND, 31, 31),
+ [K3RTC_COMP] = REG_FIELD(REG_K3RTC_COMP, 0, 31),
+
+ /* We use on to off as alarm trigger */
+ [K3RTC_ALM_S_CNT_LSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_LSW, 0, 31),
+ [K3RTC_ALM_S_CNT_MSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_MSW, 0, 15),
+ [K3RTC_IRQ_STATUS_RAW] = REG_FIELD(REG_K3RTC_IRQSTATUS_RAW_SYS, 0, 0),
+ [K3RTC_IRQ_STATUS] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 0, 0),
+ [K3RTC_IRQ_ENABLE_SET] = REG_FIELD(REG_K3RTC_IRQENABLE_SET_SYS, 0, 0),
+ [K3RTC_IRQ_ENABLE_CLR] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 0, 0),
+ /* Off to on is alternate */
+ [K3RTC_IRQ_STATUS_ALT] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 1, 1),
+ [K3RTC_IRQ_ENABLE_CLR_ALT] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 1, 1),
+};
+
+/**
+ * struct ti_k3_rtc - Private data for ti-k3-rtc
+ * @irq: IRQ
+ * @sync_timeout_us: data sync timeout period in uSec
+ * @rate_32k: 32k clock rate in Hz
+ * @rtc_dev: rtc device
+ * @regmap: rtc mmio regmap
+ * @r_fields: rtc register fields
+ * @soc: SoC compatible match data
+ */
+struct ti_k3_rtc {
+ unsigned int irq;
+ u32 sync_timeout_us;
+ unsigned long rate_32k;
+ struct rtc_device *rtc_dev;
+ struct regmap *regmap;
+ struct regmap_field *r_fields[K3_RTC_MAX_FIELDS];
+ const struct ti_k3_rtc_soc_data *soc;
+};
+
+static int k3rtc_field_read(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f)
+{
+ int ret;
+ int val;
+
+ ret = regmap_field_read(priv->r_fields[f], &val);
+ /*
+ * We shouldn't be seeing regmap fail on us for mmio reads
+ * This is possible if clock context fails, but that isn't the case for us
+ */
+ if (WARN_ON_ONCE(ret))
+ return ret;
+ return val;
+}
+
+static void k3rtc_field_write(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f, u32 val)
+{
+ regmap_field_write(priv->r_fields[f], val);
+}
+
+/**
+ * k3rtc_fence - Ensure a register sync took place between the two domains
+ * @priv: pointer to priv data
+ *
+ * Return: 0 if the sync took place, else returns -ETIMEDOUT
+ */
+static int k3rtc_fence(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_PEND], ret,
+ !ret, 2, priv->sync_timeout_us);
+
+ return ret;
+}
+
+static inline int k3rtc_check_unlocked(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = k3rtc_field_read(priv, K3RTC_UNLOCK);
+ if (ret < 0)
+ return ret;
+
+ return (ret) ? 0 : 1;
+}
+
+static int k3rtc_unlock_rtc(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = k3rtc_check_unlocked(priv);
+ if (!ret)
+ return ret;
+
+ k3rtc_field_write(priv, K3RTC_KICK0, K3RTC_KICK0_UNLOCK_VALUE);
+ k3rtc_field_write(priv, K3RTC_KICK1, K3RTC_KICK1_UNLOCK_VALUE);
+
+ /* Skip fence since we are going to check the unlock bit as fence */
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_UNLOCK], ret,
+ !ret, 2, priv->sync_timeout_us);
+
+ return ret;
+}
+
+static int k3rtc_configure(struct device *dev)
+{
+ int ret;
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ /*
+ * HWBUG: The compare state machine is broken if the RTC module
+ * is NOT unlocked in under one second of boot - which is pretty long
+ * time from the perspective of Linux driver (module load, u-boot
+ * shell all can take much longer than this.
+ *
+ * In such occurrence, it is assumed that the RTC module is unusable
+ */
+ if (priv->soc->unlock_irq_erratum) {
+ ret = k3rtc_check_unlocked(priv);
+ /* If there is an error OR if we are locked, return error */
+ if (ret) {
+ dev_err(dev,
+ HW_ERR "Erratum i2327 unlock QUIRK! Cannot operate!!\n");
+ return -EFAULT;
+ }
+ } else {
+ /* May need to explicitly unlock first time */
+ ret = k3rtc_unlock_rtc(priv);
+ if (ret) {
+ dev_err(dev, "Failed to unlock(%d)!\n", ret);
+ return ret;
+ }
+ }
+
+ /* Enable Shadow register sync on 32k clock boundary */
+ k3rtc_field_write(priv, K3RTC_O32K_OSC_DEP_EN, 0x1);
+
+ /*
+ * Wait at least clock sync time before proceeding further programming.
+ * This ensures that the 32k based sync is active.
+ */
+ usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 5);
+
+ /* We need to ensure fence here to make sure sync here */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev,
+ "Failed fence osc_dep enable(%d) - is 32k clk working?!\n", ret);
+ return ret;
+ }
+
+ /*
+ * FMODE setting: Reading lower seconds will freeze value on higher
+ * seconds. This also implies that we must *ALWAYS* read lower seconds
+ * prior to reading higher seconds
+ */
+ k3rtc_field_write(priv, K3RTC_CNT_FMODE, K3RTC_CNT_FMODE_S_CNT_VALUE);
+
+ /* Clear any spurious IRQ sources if any */
+ k3rtc_field_write(priv, K3RTC_IRQ_STATUS_ALT, 0x1);
+ k3rtc_field_write(priv, K3RTC_IRQ_STATUS, 0x1);
+ /* Disable all IRQs */
+ k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR_ALT, 0x1);
+ k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR, 0x1);
+
+ /* And.. Let us Sync the writes in */
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds_lo, seconds_hi;
+
+ seconds_lo = k3rtc_field_read(priv, K3RTC_S_CNT_LSW);
+ seconds_hi = k3rtc_field_read(priv, K3RTC_S_CNT_MSW);
+
+ rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, tm);
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ time64_t seconds;
+
+ seconds = rtc_tm_to_time64(tm);
+
+ /*
+ * Read operation on LSW will freeze the RTC, so to update
+ * the time, we cannot use field operations. Just write since the
+ * reserved bits are ignored.
+ */
+ regmap_write(priv->regmap, REG_K3RTC_S_CNT_LSW, seconds);
+ regmap_write(priv->regmap, REG_K3RTC_S_CNT_MSW, seconds >> 32);
+
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 reg;
+ u32 offset = enabled ? K3RTC_IRQ_ENABLE_SET : K3RTC_IRQ_ENABLE_CLR;
+
+ reg = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET);
+ if ((enabled && reg) || (!enabled && !reg))
+ return 0;
+
+ k3rtc_field_write(priv, offset, 0x1);
+
+ /*
+ * Ensure the write sync is through - NOTE: it should be OK to have
+ * ISR to fire as we are checking sync (which should be done in a 32k
+ * cycle or so).
+ */
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds_lo, seconds_hi;
+
+ seconds_lo = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_LSW);
+ seconds_hi = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_MSW);
+
+ rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, &alarm->time);
+
+ alarm->enabled = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET);
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ time64_t seconds;
+ int ret;
+
+ seconds = rtc_tm_to_time64(&alarm->time);
+
+ k3rtc_field_write(priv, K3RTC_ALM_S_CNT_LSW, seconds);
+ k3rtc_field_write(priv, K3RTC_ALM_S_CNT_MSW, (seconds >> 32));
+
+ /* Make sure the alarm time is synced in */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence(%d)! Potential config issue?\n", ret);
+ return ret;
+ }
+
+ /* Alarm IRQ enable will do a sync */
+ return ti_k3_rtc_alarm_irq_enable(dev, alarm->enabled);
+}
+
+static int ti_k3_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 ticks_per_hr = priv->rate_32k * 3600;
+ int comp;
+ s64 tmp;
+
+ comp = k3rtc_field_read(priv, K3RTC_COMP);
+
+ /* Convert from RTC calibration register format to ppb format */
+ tmp = comp * (s64)K3RTC_PPB_MULT;
+ if (tmp < 0)
+ tmp -= ticks_per_hr / 2LL;
+ else
+ tmp += ticks_per_hr / 2LL;
+ tmp = div_s64(tmp, ticks_per_hr);
+
+ /* Offset value operates in negative way, so swap sign */
+ *offset = (long)-tmp;
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_offset(struct device *dev, long offset)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 ticks_per_hr = priv->rate_32k * 3600;
+ int comp;
+ s64 tmp;
+
+ /* Make sure offset value is within supported range */
+ if (offset < K3RTC_MIN_OFFSET || offset > K3RTC_MAX_OFFSET)
+ return -ERANGE;
+
+ /* Convert from ppb format to RTC calibration register format */
+ tmp = offset * (s64)ticks_per_hr;
+ if (tmp < 0)
+ tmp -= K3RTC_PPB_MULT / 2LL;
+ else
+ tmp += K3RTC_PPB_MULT / 2LL;
+ tmp = div_s64(tmp, K3RTC_PPB_MULT);
+
+ /* Offset value operates in negative way, so swap sign */
+ comp = (int)-tmp;
+
+ k3rtc_field_write(priv, K3RTC_COMP, comp);
+
+ return k3rtc_fence(priv);
+}
+
+static irqreturn_t ti_k3_rtc_interrupt(s32 irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 reg;
+ int ret;
+
+ /*
+ * IRQ assertion can be very fast, however, the IRQ Status clear
+ * de-assert depends on 32k clock edge in the 32k domain
+ * If we clear the status prior to the first 32k clock edge,
+ * the status bit is cleared, but the IRQ stays re-asserted.
+ *
+ * To prevent this condition, we need to wait for clock sync time.
+ * We can either do that by polling the 32k observability signal for
+ * a toggle OR we could just sleep and let the processor do other
+ * stuff.
+ */
+ usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 2);
+
+ /* Lets make sure that this is a valid interrupt */
+ reg = k3rtc_field_read(priv, K3RTC_IRQ_STATUS);
+
+ if (!reg) {
+ u32 raw = k3rtc_field_read(priv, K3RTC_IRQ_STATUS_RAW);
+
+ dev_err(dev,
+ HW_ERR
+ "Erratum i2327/IRQ trig: status: 0x%08x / 0x%08x\n", reg, raw);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Write 1 to clear status reg
+ * We cannot use a field operation here due to a potential race between
+ * 32k domain and vbus domain.
+ */
+ regmap_write(priv->regmap, REG_K3RTC_IRQSTATUS_SYS, 0x1);
+
+ /* Sync the write in */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence irq status clr(%d)!\n", ret);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Force the 32k status to be reloaded back in to ensure status is
+ * reflected back correctly.
+ */
+ k3rtc_field_write(priv, K3RTC_RELOAD_FROM_BBD, 0x1);
+
+ /* Ensure the write sync is through */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence reload from bbd(%d)!\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* Now we ensure that the status bit is cleared */
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_IRQ_STATUS],
+ ret, !ret, 2, priv->sync_timeout_us);
+ if (ret) {
+ dev_err(dev, "Time out waiting for status clear\n");
+ return IRQ_NONE;
+ }
+
+ /* Notify RTC core on event */
+ rtc_update_irq(priv->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops ti_k3_rtc_ops = {
+ .read_time = ti_k3_rtc_read_time,
+ .set_time = ti_k3_rtc_set_time,
+ .read_alarm = ti_k3_rtc_read_alarm,
+ .set_alarm = ti_k3_rtc_set_alarm,
+ .read_offset = ti_k3_rtc_read_offset,
+ .set_offset = ti_k3_rtc_set_offset,
+ .alarm_irq_enable = ti_k3_rtc_alarm_irq_enable,
+};
+
+static int ti_k3_rtc_scratch_read(void *priv_data, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data;
+
+ return regmap_bulk_read(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4);
+}
+
+static int ti_k3_rtc_scratch_write(void *priv_data, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data;
+ int ret;
+
+ ret = regmap_bulk_write(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4);
+ if (ret)
+ return ret;
+
+ return k3rtc_fence(priv);
+}
+
+static struct nvmem_config ti_k3_rtc_nvmem_config = {
+ .name = "ti_k3_rtc_scratch",
+ .word_size = 4,
+ .stride = 4,
+ .size = REG_K3RTC_SCRATCH7 - REG_K3RTC_SCRATCH0 + 4,
+ .reg_read = ti_k3_rtc_scratch_read,
+ .reg_write = ti_k3_rtc_scratch_write,
+};
+
+static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = devm_clk_get(dev, "osc32k");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
+
+ priv->rate_32k = clk_get_rate(clk);
+
+ /* Make sure we are exact 32k clock. Else, try to compensate delay */
+ if (priv->rate_32k != 32768)
+ dev_warn(dev, "Clock rate %ld is not 32768! Could misbehave!\n",
+ priv->rate_32k);
+
+ /*
+ * Sync timeout should be two 32k clk sync cycles = ~61uS. We double
+ * it to comprehend intermediate bus segment and cpu frequency
+ * deltas
+ */
+ priv->sync_timeout_us = (u32)(DIV_ROUND_UP_ULL(1000000, priv->rate_32k) * 4);
+
+ return ret;
+}
+
+static int k3rtc_get_vbusclk(struct device *dev, struct ti_k3_rtc *priv)
+{
+ int ret;
+ struct clk *clk;
+
+ /* Note: VBUS isn't a context clock, it is needed for hardware operation */
+ clk = devm_clk_get(dev, "vbus");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+}
+
+static int ti_k3_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_k3_rtc *priv;
+ void __iomem *rtc_base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct ti_k3_rtc), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rtc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc_base))
+ return PTR_ERR(rtc_base);
+
+ priv->regmap = devm_regmap_init_mmio(dev, rtc_base, &ti_k3_rtc_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ ret = devm_regmap_field_bulk_alloc(dev, priv->regmap, priv->r_fields,
+ ti_rtc_reg_fields, K3_RTC_MAX_FIELDS);
+ if (ret)
+ return ret;
+
+ ret = k3rtc_get_32kclk(dev, priv);
+ if (ret)
+ return ret;
+ ret = k3rtc_get_vbusclk(dev, priv);
+ if (ret)
+ return ret;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ priv->irq = (unsigned int)ret;
+
+ priv->rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(priv->rtc_dev))
+ return PTR_ERR(priv->rtc_dev);
+
+ priv->soc = of_device_get_match_data(dev);
+
+ priv->rtc_dev->ops = &ti_k3_rtc_ops;
+ priv->rtc_dev->range_max = (1ULL << 48) - 1; /* 48Bit seconds */
+ ti_k3_rtc_nvmem_config.priv = priv;
+
+ ret = devm_request_threaded_irq(dev, priv->irq, NULL,
+ ti_k3_rtc_interrupt,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (ret) {
+ dev_err(dev, "Could not request IRQ: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = k3rtc_configure(dev);
+ if (ret)
+ return ret;
+
+ if (device_property_present(dev, "wakeup-source"))
+ device_init_wakeup(dev, true);
+ else
+ device_set_wakeup_capable(dev, true);
+
+ ret = devm_rtc_register_device(priv->rtc_dev);
+ if (ret)
+ return ret;
+
+ return devm_rtc_nvmem_register(priv->rtc_dev, &ti_k3_rtc_nvmem_config);
+}
+
+static const struct ti_k3_rtc_soc_data ti_k3_am62_data = {
+ .unlock_irq_erratum = true,
+};
+
+static const struct of_device_id ti_k3_rtc_of_match_table[] = {
+ {.compatible = "ti,am62-rtc", .data = &ti_k3_am62_data},
+ {}
+};
+MODULE_DEVICE_TABLE(of, ti_k3_rtc_of_match_table);
+
+static int __maybe_unused ti_k3_rtc_suspend(struct device *dev)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(priv->irq);
+ return 0;
+}
+
+static int __maybe_unused ti_k3_rtc_resume(struct device *dev)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(priv->irq);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ti_k3_rtc_pm_ops, ti_k3_rtc_suspend, ti_k3_rtc_resume);
+
+static struct platform_driver ti_k3_rtc_driver = {
+ .probe = ti_k3_rtc_probe,
+ .driver = {
+ .name = "rtc-ti-k3",
+ .of_match_table = ti_k3_rtc_of_match_table,
+ .pm = &ti_k3_rtc_pm_ops,
+ },
+};
+module_platform_driver(ti_k3_rtc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 RTC driver");
+MODULE_AUTHOR("Nishanth Menon");
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
deleted file mode 100644
index 5a9f9ad86d32..000000000000
--- a/drivers/rtc/rtc-vr41xx.c
+++ /dev/null
@@ -1,363 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for NEC VR4100 series Real Time Clock unit.
- *
- * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/compat.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/rtc.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/log2.h>
-
-#include <asm/div64.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
-MODULE_LICENSE("GPL v2");
-
-/* RTC 1 registers */
-#define ETIMELREG 0x00
-#define ETIMEMREG 0x02
-#define ETIMEHREG 0x04
-/* RFU */
-#define ECMPLREG 0x08
-#define ECMPMREG 0x0a
-#define ECMPHREG 0x0c
-/* RFU */
-#define RTCL1LREG 0x10
-#define RTCL1HREG 0x12
-#define RTCL1CNTLREG 0x14
-#define RTCL1CNTHREG 0x16
-#define RTCL2LREG 0x18
-#define RTCL2HREG 0x1a
-#define RTCL2CNTLREG 0x1c
-#define RTCL2CNTHREG 0x1e
-
-/* RTC 2 registers */
-#define TCLKLREG 0x00
-#define TCLKHREG 0x02
-#define TCLKCNTLREG 0x04
-#define TCLKCNTHREG 0x06
-/* RFU */
-#define RTCINTREG 0x1e
- #define TCLOCK_INT 0x08
- #define RTCLONG2_INT 0x04
- #define RTCLONG1_INT 0x02
- #define ELAPSEDTIME_INT 0x01
-
-#define RTC_FREQUENCY 32768
-#define MAX_PERIODIC_RATE 6553
-
-static void __iomem *rtc1_base;
-static void __iomem *rtc2_base;
-
-#define rtc1_read(offset) readw(rtc1_base + (offset))
-#define rtc1_write(offset, value) writew((value), rtc1_base + (offset))
-
-#define rtc2_read(offset) readw(rtc2_base + (offset))
-#define rtc2_write(offset, value) writew((value), rtc2_base + (offset))
-
-/* 32-bit compat for ioctls that nobody else uses */
-#define RTC_EPOCH_READ32 _IOR('p', 0x0d, __u32)
-
-static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
-
-static DEFINE_SPINLOCK(rtc_lock);
-static char rtc_name[] = "RTC";
-static unsigned long periodic_count;
-static unsigned int alarm_enabled;
-static int aie_irq;
-static int pie_irq;
-
-static inline time64_t read_elapsed_second(void)
-{
-
- unsigned long first_low, first_mid, first_high;
-
- unsigned long second_low, second_mid, second_high;
-
- do {
- first_low = rtc1_read(ETIMELREG);
- first_mid = rtc1_read(ETIMEMREG);
- first_high = rtc1_read(ETIMEHREG);
- second_low = rtc1_read(ETIMELREG);
- second_mid = rtc1_read(ETIMEMREG);
- second_high = rtc1_read(ETIMEHREG);
- } while (first_low != second_low || first_mid != second_mid ||
- first_high != second_high);
-
- return ((u64)first_high << 17) | (first_mid << 1) | (first_low >> 15);
-}
-
-static inline void write_elapsed_second(time64_t sec)
-{
- spin_lock_irq(&rtc_lock);
-
- rtc1_write(ETIMELREG, (uint16_t)(sec << 15));
- rtc1_write(ETIMEMREG, (uint16_t)(sec >> 1));
- rtc1_write(ETIMEHREG, (uint16_t)(sec >> 17));
-
- spin_unlock_irq(&rtc_lock);
-}
-
-static int vr41xx_rtc_read_time(struct device *dev, struct rtc_time *time)
-{
- time64_t epoch_sec, elapsed_sec;
-
- epoch_sec = mktime64(epoch, 1, 1, 0, 0, 0);
- elapsed_sec = read_elapsed_second();
-
- rtc_time64_to_tm(epoch_sec + elapsed_sec, time);
-
- return 0;
-}
-
-static int vr41xx_rtc_set_time(struct device *dev, struct rtc_time *time)
-{
- time64_t epoch_sec, current_sec;
-
- epoch_sec = mktime64(epoch, 1, 1, 0, 0, 0);
- current_sec = rtc_tm_to_time64(time);
-
- write_elapsed_second(current_sec - epoch_sec);
-
- return 0;
-}
-
-static int vr41xx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
-{
- unsigned long low, mid, high;
- struct rtc_time *time = &wkalrm->time;
-
- spin_lock_irq(&rtc_lock);
-
- low = rtc1_read(ECMPLREG);
- mid = rtc1_read(ECMPMREG);
- high = rtc1_read(ECMPHREG);
- wkalrm->enabled = alarm_enabled;
-
- spin_unlock_irq(&rtc_lock);
-
- rtc_time64_to_tm((high << 17) | (mid << 1) | (low >> 15), time);
-
- return 0;
-}
-
-static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
-{
- time64_t alarm_sec;
-
- alarm_sec = rtc_tm_to_time64(&wkalrm->time);
-
- spin_lock_irq(&rtc_lock);
-
- if (alarm_enabled)
- disable_irq(aie_irq);
-
- rtc1_write(ECMPLREG, (uint16_t)(alarm_sec << 15));
- rtc1_write(ECMPMREG, (uint16_t)(alarm_sec >> 1));
- rtc1_write(ECMPHREG, (uint16_t)(alarm_sec >> 17));
-
- if (wkalrm->enabled)
- enable_irq(aie_irq);
-
- alarm_enabled = wkalrm->enabled;
-
- spin_unlock_irq(&rtc_lock);
-
- return 0;
-}
-
-static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case RTC_EPOCH_READ:
- return put_user(epoch, (unsigned long __user *)arg);
-#ifdef CONFIG_64BIT
- case RTC_EPOCH_READ32:
- return put_user(epoch, (unsigned int __user *)arg);
-#endif
- case RTC_EPOCH_SET:
- /* Doesn't support before 1900 */
- if (arg < 1900)
- return -EINVAL;
- epoch = arg;
- break;
- default:
- return -ENOIOCTLCMD;
- }
-
- return 0;
-}
-
-static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
-{
- spin_lock_irq(&rtc_lock);
- if (enabled) {
- if (!alarm_enabled) {
- enable_irq(aie_irq);
- alarm_enabled = 1;
- }
- } else {
- if (alarm_enabled) {
- disable_irq(aie_irq);
- alarm_enabled = 0;
- }
- }
- spin_unlock_irq(&rtc_lock);
- return 0;
-}
-
-static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
-{
- struct platform_device *pdev = (struct platform_device *)dev_id;
- struct rtc_device *rtc = platform_get_drvdata(pdev);
-
- rtc2_write(RTCINTREG, ELAPSEDTIME_INT);
-
- rtc_update_irq(rtc, 1, RTC_AF);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
-{
- struct platform_device *pdev = (struct platform_device *)dev_id;
- struct rtc_device *rtc = platform_get_drvdata(pdev);
- unsigned long count = periodic_count;
-
- rtc2_write(RTCINTREG, RTCLONG1_INT);
-
- rtc1_write(RTCL1LREG, count);
- rtc1_write(RTCL1HREG, count >> 16);
-
- rtc_update_irq(rtc, 1, RTC_PF);
-
- return IRQ_HANDLED;
-}
-
-static const struct rtc_class_ops vr41xx_rtc_ops = {
- .ioctl = vr41xx_rtc_ioctl,
- .read_time = vr41xx_rtc_read_time,
- .set_time = vr41xx_rtc_set_time,
- .read_alarm = vr41xx_rtc_read_alarm,
- .set_alarm = vr41xx_rtc_set_alarm,
- .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
-};
-
-static int rtc_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct rtc_device *rtc;
- int retval;
-
- if (pdev->num_resources != 4)
- return -EBUSY;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
-
- rtc1_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!rtc1_base)
- return -EBUSY;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- retval = -EBUSY;
- goto err_rtc1_iounmap;
- }
-
- rtc2_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!rtc2_base) {
- retval = -EBUSY;
- goto err_rtc1_iounmap;
- }
-
- rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtc)) {
- retval = PTR_ERR(rtc);
- goto err_iounmap_all;
- }
-
- rtc->ops = &vr41xx_rtc_ops;
-
- /* 48-bit counter at 32.768 kHz */
- rtc->range_max = (1ULL << 33) - 1;
- rtc->max_user_freq = MAX_PERIODIC_RATE;
-
- spin_lock_irq(&rtc_lock);
-
- rtc1_write(ECMPLREG, 0);
- rtc1_write(ECMPMREG, 0);
- rtc1_write(ECMPHREG, 0);
- rtc1_write(RTCL1LREG, 0);
- rtc1_write(RTCL1HREG, 0);
-
- spin_unlock_irq(&rtc_lock);
-
- aie_irq = platform_get_irq(pdev, 0);
- if (aie_irq <= 0) {
- retval = -EBUSY;
- goto err_iounmap_all;
- }
-
- retval = devm_request_irq(&pdev->dev, aie_irq, elapsedtime_interrupt, 0,
- "elapsed_time", pdev);
- if (retval < 0)
- goto err_iounmap_all;
-
- pie_irq = platform_get_irq(pdev, 1);
- if (pie_irq <= 0) {
- retval = -EBUSY;
- goto err_iounmap_all;
- }
-
- retval = devm_request_irq(&pdev->dev, pie_irq, rtclong1_interrupt, 0,
- "rtclong1", pdev);
- if (retval < 0)
- goto err_iounmap_all;
-
- platform_set_drvdata(pdev, rtc);
-
- disable_irq(aie_irq);
- disable_irq(pie_irq);
-
- dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
-
- retval = devm_rtc_register_device(rtc);
- if (retval)
- goto err_iounmap_all;
-
- return 0;
-
-err_iounmap_all:
- rtc2_base = NULL;
-
-err_rtc1_iounmap:
- rtc1_base = NULL;
-
- return retval;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:RTC");
-
-static struct platform_driver rtc_platform_driver = {
- .probe = rtc_probe,
- .driver = {
- .name = rtc_name,
- },
-};
-
-module_platform_driver(rtc_platform_driver);
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index d1d5a44d9122..ba0d22a5b421 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -614,8 +614,7 @@ static void x1205_sysfs_unregister(struct device *dev)
}
-static int x1205_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int x1205_probe(struct i2c_client *client)
{
int err = 0;
unsigned char sr;
@@ -681,7 +680,7 @@ static struct i2c_driver x1205_driver = {
.name = "rtc-x1205",
.of_match_table = x1205_dt_ids,
},
- .probe = x1205_probe,
+ .probe_new = x1205_probe,
.remove = x1205_remove,
.id_table = x1205_id,
};
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index f440bb52be92..c9b85c838ebe 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -36,17 +37,23 @@
#define RTC_OSC_EN BIT(24)
#define RTC_BATT_EN BIT(31)
-#define RTC_CALIB_DEF 0x198233
+#define RTC_CALIB_DEF 0x7FFF
#define RTC_CALIB_MASK 0x1FFFFF
#define RTC_ALRM_MASK BIT(1)
#define RTC_MSEC 1000
+#define RTC_FR_MASK 0xF0000
+#define RTC_FR_MAX_TICKS 16
+#define RTC_PPB 1000000000LL
+#define RTC_MIN_OFFSET -32768000
+#define RTC_MAX_OFFSET 32767000
struct xlnx_rtc_dev {
struct rtc_device *rtc;
void __iomem *reg_base;
int alarm_irq;
int sec_irq;
- unsigned int calibval;
+ struct clk *rtc_clk;
+ unsigned int freq;
};
static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -61,13 +68,6 @@ static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
*/
new_time = rtc_tm_to_time64(tm) + 1;
- /*
- * Writing into calibration register will clear the Tick Counter and
- * force the next second to be signaled exactly in 1 second period
- */
- xrtcdev->calibval &= RTC_CALIB_MASK;
- writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
-
writel(new_time, xrtcdev->reg_base + RTC_SET_TM_WR);
/*
@@ -173,15 +173,76 @@ static void xlnx_init_rtc(struct xlnx_rtc_dev *xrtcdev)
rtc_ctrl = readl(xrtcdev->reg_base + RTC_CTRL);
rtc_ctrl |= RTC_BATT_EN;
writel(rtc_ctrl, xrtcdev->reg_base + RTC_CTRL);
+}
- /*
- * Based on crystal freq of 33.330 KHz
- * set the seconds counter and enable, set fractions counter
- * to default value suggested as per design spec
- * to correct RTC delay in frequency over period of time.
+static int xlnx_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned long long rtc_ppb = RTC_PPB;
+ unsigned int tick_mult = do_div(rtc_ppb, xrtcdev->freq);
+ unsigned int calibval;
+ long offset_val;
+
+ calibval = readl(xrtcdev->reg_base + RTC_CALIB_RD);
+ /* Offset with seconds ticks */
+ offset_val = calibval & RTC_TICK_MASK;
+ offset_val = offset_val - RTC_CALIB_DEF;
+ offset_val = offset_val * tick_mult;
+
+ /* Offset with fractional ticks */
+ if (calibval & RTC_FR_EN)
+ offset_val += ((calibval & RTC_FR_MASK) >> RTC_FR_DATSHIFT)
+ * (tick_mult / RTC_FR_MAX_TICKS);
+ *offset = offset_val;
+
+ return 0;
+}
+
+static int xlnx_rtc_set_offset(struct device *dev, long offset)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned long long rtc_ppb = RTC_PPB;
+ unsigned int tick_mult = do_div(rtc_ppb, xrtcdev->freq);
+ unsigned char fract_tick = 0;
+ unsigned int calibval;
+ short int max_tick;
+ int fract_offset;
+
+ if (offset < RTC_MIN_OFFSET || offset > RTC_MAX_OFFSET)
+ return -ERANGE;
+
+ /* Number ticks for given offset */
+ max_tick = div_s64_rem(offset, tick_mult, &fract_offset);
+
+ /* Number fractional ticks for given offset */
+ if (fract_offset) {
+ if (fract_offset < 0) {
+ fract_offset = fract_offset + tick_mult;
+ max_tick--;
+ }
+ if (fract_offset > (tick_mult / RTC_FR_MAX_TICKS)) {
+ for (fract_tick = 1; fract_tick < 16; fract_tick++) {
+ if (fract_offset <=
+ (fract_tick *
+ (tick_mult / RTC_FR_MAX_TICKS)))
+ break;
+ }
+ }
+ }
+
+ /* Zynqmp RTC uses second and fractional tick
+ * counters for compensation
*/
- xrtcdev->calibval &= RTC_CALIB_MASK;
- writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+ calibval = max_tick + RTC_CALIB_DEF;
+
+ if (fract_tick)
+ calibval |= RTC_FR_EN;
+
+ calibval |= (fract_tick << RTC_FR_DATSHIFT);
+
+ writel(calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+
+ return 0;
}
static const struct rtc_class_ops xlnx_rtc_ops = {
@@ -190,6 +251,8 @@ static const struct rtc_class_ops xlnx_rtc_ops = {
.read_alarm = xlnx_rtc_read_alarm,
.set_alarm = xlnx_rtc_set_alarm,
.alarm_irq_enable = xlnx_rtc_alarm_irq_enable,
+ .read_offset = xlnx_rtc_read_offset,
+ .set_offset = xlnx_rtc_set_offset,
};
static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
@@ -255,10 +318,22 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(pdev->dev.of_node, "calibration",
- &xrtcdev->calibval);
- if (ret)
- xrtcdev->calibval = RTC_CALIB_DEF;
+ /* Getting the rtc_clk info */
+ xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc_clk");
+ if (IS_ERR(xrtcdev->rtc_clk)) {
+ if (PTR_ERR(xrtcdev->rtc_clk) != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "Device clock not found.\n");
+ }
+ xrtcdev->freq = clk_get_rate(xrtcdev->rtc_clk);
+ if (!xrtcdev->freq) {
+ ret = of_property_read_u32(pdev->dev.of_node, "calibration",
+ &xrtcdev->freq);
+ if (ret)
+ xrtcdev->freq = RTC_CALIB_DEF;
+ }
+ ret = readl(xrtcdev->reg_base + RTC_CALIB_RD);
+ if (!ret)
+ writel(xrtcdev->freq, (xrtcdev->reg_base + RTC_CALIB_WR));
xlnx_init_rtc(xrtcdev);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 4df8bf6505fc..ea82821599f6 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1725,7 +1725,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
dasd_put_device(device);
}
- /* check for for attention message */
+ /* check for attention message */
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
device = dasd_device_from_cdev_locked(cdev);
if (!IS_ERR(device)) {
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index e9edf3b6ed7c..94ee59864971 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -639,6 +639,7 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, PAGE_SIZE - 1);
}
static int dasd_diag_pe_handler(struct dasd_device *device,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 836838f7d686..3cc93e2e4e15 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -6626,6 +6626,7 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, PAGE_SIZE - 1);
}
static struct ccw_driver dasd_eckd_driver = {
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4d8d1759775a..5187705bd0f3 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -863,7 +863,7 @@ dcssblk_submit_bio(struct bio *bio)
unsigned long source_addr;
unsigned long bytes_done;
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 57f41efb8043..7d1749b0d378 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -89,7 +89,7 @@ config HMC_DRV
Management Console (HMC) drive CD/DVD-ROM. It is available as a
module, called 'hmcdrv', and also as kernel built-in. There is one
optional parameter for this module: cachesize=N, which modifies the
- transfer cache size from it's default value 0.5MB to N bytes. If N
+ transfer cache size from its default value 0.5MB to N bytes. If N
is zero, then no caching is performed.
config SCLP_OFB
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 38cc1565d6ae..751945fb6793 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -548,7 +548,7 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
case 0x2e:
/*
* Not capable. This indicates either that the drive fails
- * reading the format id mark or that that format specified
+ * reading the format id mark or that format specified
* is not supported by the drive.
*/
dev_warn (&device->cdev->dev, "The tape unit cannot process "
diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c
index 66505d7166a6..1d40457c7b10 100644
--- a/drivers/s390/char/uvdevice.c
+++ b/drivers/s390/char/uvdevice.c
@@ -27,6 +27,7 @@
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/cpufeature.h>
#include <asm/uvdevice.h>
#include <asm/uv.h>
@@ -244,12 +245,10 @@ static void __exit uvio_dev_exit(void)
static int __init uvio_dev_init(void)
{
- if (!test_facility(158))
- return -ENXIO;
return misc_register(&uvio_dev_miscdev);
}
-module_init(uvio_dev_init);
+module_cpu_feature_match(S390_CPU_FEATURE_UV, uvio_dev_init);
module_exit(uvio_dev_exit);
MODULE_AUTHOR("IBM Corporation");
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 516783ba950f..f6da215ccf9f 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
+#include <linux/uio.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h>
@@ -50,36 +51,41 @@ static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block;
+static DEFINE_MUTEX(hsa_buf_mutex);
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
/*
- * Copy memory from HSA to user memory (not reentrant):
+ * Copy memory from HSA to iterator (not reentrant):
*
- * @dest: User buffer where memory should be copied to
+ * @iter: Iterator where memory should be copied to
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
*/
-int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
+size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count)
{
- unsigned long offset, bytes;
+ size_t bytes, copied, res = 0;
+ unsigned long offset;
if (!hsa_available)
- return -ENODATA;
+ return 0;
+ mutex_lock(&hsa_buf_mutex);
while (count) {
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n");
- return -EIO;
+ break;
}
offset = src % PAGE_SIZE;
bytes = min(PAGE_SIZE - offset, count);
- if (copy_to_user(dest, hsa_buf + offset, bytes))
- return -EFAULT;
- src += bytes;
- dest += bytes;
- count -= bytes;
+ copied = copy_to_iter(hsa_buf + offset, bytes, iter);
+ count -= copied;
+ src += copied;
+ res += copied;
+ if (copied < bytes)
+ break;
}
- return 0;
+ mutex_unlock(&hsa_buf_mutex);
+ return res;
}
/*
@@ -89,25 +95,16 @@ int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
*/
-int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
+static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count)
{
- unsigned long offset, bytes;
+ struct iov_iter iter;
+ struct kvec kvec;
- if (!hsa_available)
- return -ENODATA;
-
- while (count) {
- if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
- TRACE("sclp_sdias_copy() failed\n");
- return -EIO;
- }
- offset = src % PAGE_SIZE;
- bytes = min(PAGE_SIZE - offset, count);
- memcpy(dest, hsa_buf + offset, bytes);
- src += bytes;
- dest += bytes;
- count -= bytes;
- }
+ kvec.iov_base = dst;
+ kvec.iov_len = count;
+ iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
+ if (memcpy_hsa_iter(&iter, src, count) < count)
+ return -EIO;
return 0;
}
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
index 7a838e3d7c0f..420d89ba7f83 100644
--- a/drivers/s390/cio/vfio_ccw_async.c
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -8,7 +8,6 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
#include "vfio_ccw_private.h"
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 0c2be9421ab7..7b02e97f4b29 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -11,6 +11,7 @@
#include <linux/ratelimit.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/vfio.h>
#include <asm/idals.h>
@@ -18,13 +19,11 @@
#include "vfio_ccw_cp.h"
#include "vfio_ccw_private.h"
-struct pfn_array {
- /* Starting guest physical I/O address. */
- unsigned long pa_iova;
- /* Array that stores PFNs of the pages need to pin. */
- unsigned long *pa_iova_pfn;
- /* Array that receives PFNs of the pages pinned. */
- unsigned long *pa_pfn;
+struct page_array {
+ /* Array that stores pages need to pin. */
+ dma_addr_t *pa_iova;
+ /* Array that receives the pinned pages. */
+ struct page **pa_page;
/* Number of pages pinned from @pa_iova. */
int pa_nr;
};
@@ -37,116 +36,158 @@ struct ccwchain {
/* Count of the valid ccws in chain. */
int ch_len;
/* Pinned PAGEs for the original data. */
- struct pfn_array *ch_pa;
+ struct page_array *ch_pa;
};
/*
- * pfn_array_alloc() - alloc memory for PFNs
- * @pa: pfn_array on which to perform the operation
+ * page_array_alloc() - alloc memory for page array
+ * @pa: page_array on which to perform the operation
* @iova: target guest physical address
* @len: number of bytes that should be pinned from @iova
*
- * Attempt to allocate memory for PFNs.
+ * Attempt to allocate memory for page array.
*
- * Usage of pfn_array:
- * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * Usage of page_array:
+ * We expect (pa_nr == 0) and (pa_iova == NULL), any field in
* this structure will be filled in by this function.
*
* Returns:
- * 0 if PFNs are allocated
- * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
+ * 0 if page array is allocated
+ * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is not NULL
* -ENOMEM if alloc failed
*/
-static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
+static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
{
int i;
- if (pa->pa_nr || pa->pa_iova_pfn)
+ if (pa->pa_nr || pa->pa_iova)
return -EINVAL;
- pa->pa_iova = iova;
-
pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (!pa->pa_nr)
return -EINVAL;
- pa->pa_iova_pfn = kcalloc(pa->pa_nr,
- sizeof(*pa->pa_iova_pfn) +
- sizeof(*pa->pa_pfn),
- GFP_KERNEL);
- if (unlikely(!pa->pa_iova_pfn)) {
+ pa->pa_iova = kcalloc(pa->pa_nr,
+ sizeof(*pa->pa_iova) + sizeof(*pa->pa_page),
+ GFP_KERNEL);
+ if (unlikely(!pa->pa_iova)) {
pa->pa_nr = 0;
return -ENOMEM;
}
- pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
+ pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr];
- pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
- pa->pa_pfn[0] = -1ULL;
+ pa->pa_iova[0] = iova;
+ pa->pa_page[0] = NULL;
for (i = 1; i < pa->pa_nr; i++) {
- pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
- pa->pa_pfn[i] = -1ULL;
+ pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
+ pa->pa_page[i] = NULL;
}
return 0;
}
/*
- * pfn_array_pin() - Pin user pages in memory
- * @pa: pfn_array on which to perform the operation
+ * page_array_unpin() - Unpin user pages in memory
+ * @pa: page_array on which to perform the operation
+ * @vdev: the vfio device to perform the operation
+ * @pa_nr: number of user pages to unpin
+ *
+ * Only unpin if any pages were pinned to begin with, i.e. pa_nr > 0,
+ * otherwise only clear pa->pa_nr
+ */
+static void page_array_unpin(struct page_array *pa,
+ struct vfio_device *vdev, int pa_nr)
+{
+ int unpinned = 0, npage = 1;
+
+ while (unpinned < pa_nr) {
+ dma_addr_t *first = &pa->pa_iova[unpinned];
+ dma_addr_t *last = &first[npage];
+
+ if (unpinned + npage < pa_nr &&
+ *first + npage * PAGE_SIZE == *last) {
+ npage++;
+ continue;
+ }
+
+ vfio_unpin_pages(vdev, *first, npage);
+ unpinned += npage;
+ npage = 1;
+ }
+
+ pa->pa_nr = 0;
+}
+
+/*
+ * page_array_pin() - Pin user pages in memory
+ * @pa: page_array on which to perform the operation
* @mdev: the mediated device to perform pin operations
*
* Returns number of pages pinned upon success.
* If the pin request partially succeeds, or fails completely,
* all pages are left unpinned and a negative error value is returned.
*/
-static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev)
+static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
{
+ int pinned = 0, npage = 1;
int ret = 0;
- ret = vfio_pin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr,
- IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+ while (pinned < pa->pa_nr) {
+ dma_addr_t *first = &pa->pa_iova[pinned];
+ dma_addr_t *last = &first[npage];
- if (ret < 0) {
- goto err_out;
- } else if (ret > 0 && ret != pa->pa_nr) {
- vfio_unpin_pages(vdev, pa->pa_iova_pfn, ret);
- ret = -EINVAL;
- goto err_out;
+ if (pinned + npage < pa->pa_nr &&
+ *first + npage * PAGE_SIZE == *last) {
+ npage++;
+ continue;
+ }
+
+ ret = vfio_pin_pages(vdev, *first, npage,
+ IOMMU_READ | IOMMU_WRITE,
+ &pa->pa_page[pinned]);
+ if (ret < 0) {
+ goto err_out;
+ } else if (ret > 0 && ret != npage) {
+ pinned += ret;
+ ret = -EINVAL;
+ goto err_out;
+ }
+ pinned += npage;
+ npage = 1;
}
return ret;
err_out:
- pa->pa_nr = 0;
-
+ page_array_unpin(pa, vdev, pinned);
return ret;
}
/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev)
+static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev)
{
- /* Only unpin if any pages were pinned to begin with */
- if (pa->pa_nr)
- vfio_unpin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr);
- pa->pa_nr = 0;
- kfree(pa->pa_iova_pfn);
+ page_array_unpin(pa, vdev, pa->pa_nr);
+ kfree(pa->pa_iova);
}
-static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
+static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
{
- unsigned long iova_pfn = iova >> PAGE_SHIFT;
+ u64 iova_pfn_start = iova >> PAGE_SHIFT;
+ u64 iova_pfn_end = (iova + length - 1) >> PAGE_SHIFT;
+ u64 pfn;
int i;
- for (i = 0; i < pa->pa_nr; i++)
- if (pa->pa_iova_pfn[i] == iova_pfn)
+ for (i = 0; i < pa->pa_nr; i++) {
+ pfn = pa->pa_iova[i] >> PAGE_SHIFT;
+ if (pfn >= iova_pfn_start && pfn <= iova_pfn_end)
return true;
+ }
return false;
}
-/* Create the list of IDAL words for a pfn_array. */
-static inline void pfn_array_idal_create_words(
- struct pfn_array *pa,
- unsigned long *idaws)
+/* Create the list of IDAL words for a page_array. */
+static inline void page_array_idal_create_words(struct page_array *pa,
+ unsigned long *idaws)
{
int i;
@@ -159,10 +200,10 @@ static inline void pfn_array_idal_create_words(
*/
for (i = 0; i < pa->pa_nr; i++)
- idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+ idaws[i] = page_to_phys(pa->pa_page[i]);
/* Adjust the first IDAW, since it may not start on a page boundary */
- idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
+ idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1);
}
static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
@@ -194,24 +235,24 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
unsigned long n)
{
- struct pfn_array pa = {0};
- u64 from;
+ struct page_array pa = {0};
int i, ret;
unsigned long l, m;
- ret = pfn_array_alloc(&pa, iova, n);
+ ret = page_array_alloc(&pa, iova, n);
if (ret < 0)
return ret;
- ret = pfn_array_pin(&pa, vdev);
+ ret = page_array_pin(&pa, vdev);
if (ret < 0) {
- pfn_array_unpin_free(&pa, vdev);
+ page_array_unpin_free(&pa, vdev);
return ret;
}
l = n;
for (i = 0; i < pa.pa_nr; i++) {
- from = pa.pa_pfn[i] << PAGE_SHIFT;
+ void *from = kmap_local_page(pa.pa_page[i]);
+
m = PAGE_SIZE;
if (i == 0) {
from += iova & (PAGE_SIZE - 1);
@@ -219,14 +260,15 @@ static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
}
m = min(l, m);
- memcpy(to + (n - l), (void *)from, m);
+ memcpy(to + (n - l), from, m);
+ kunmap_local(from);
l -= m;
if (l == 0)
break;
}
- pfn_array_unpin_free(&pa, vdev);
+ page_array_unpin_free(&pa, vdev);
return l;
}
@@ -329,7 +371,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
chain->ch_ccw = (struct ccw1 *)data;
data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pa = (struct pfn_array *)data;
+ chain->ch_pa = (struct page_array *)data;
chain->ch_len = len;
@@ -513,7 +555,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccw1 *ccw;
- struct pfn_array *pa;
+ struct page_array *pa;
u64 iova;
unsigned long *idaws;
int ret;
@@ -547,13 +589,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
}
/*
- * Allocate an array of pfn's for pages to pin/translate.
+ * Allocate an array of pages to pin/translate.
* The number of pages is actually the count of the idaws
* required for the data transfer, since we only only support
* 4K IDAWs today.
*/
pa = chain->ch_pa + idx;
- ret = pfn_array_alloc(pa, iova, bytes);
+ ret = page_array_alloc(pa, iova, bytes);
if (ret < 0)
goto out_free_idaws;
@@ -564,21 +606,21 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
goto out_unpin;
/*
- * Copy guest IDAWs into pfn_array, in case the memory they
+ * Copy guest IDAWs into page_array, in case the memory they
* occupy is not contiguous.
*/
for (i = 0; i < idaw_nr; i++)
- pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
+ pa->pa_iova[i] = idaws[i];
} else {
/*
- * No action is required here; the iova addresses in pfn_array
- * were initialized sequentially in pfn_array_alloc() beginning
+ * No action is required here; the iova addresses in page_array
+ * were initialized sequentially in page_array_alloc() beginning
* with the contents of ccw->cda.
*/
}
if (ccw_does_data_transfer(ccw)) {
- ret = pfn_array_pin(pa, vdev);
+ ret = page_array_pin(pa, vdev);
if (ret < 0)
goto out_unpin;
} else {
@@ -588,13 +630,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
ccw->cda = (__u32) virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
- /* Populate the IDAL with pinned/translated addresses from pfn */
- pfn_array_idal_create_words(pa, idaws);
+ /* Populate the IDAL with pinned/translated addresses from page */
+ page_array_idal_create_words(pa, idaws);
return 0;
out_unpin:
- pfn_array_unpin_free(pa, vdev);
+ page_array_unpin_free(pa, vdev);
out_free_idaws:
kfree(idaws);
out_init:
@@ -700,7 +742,7 @@ void cp_free(struct channel_program *cp)
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
- pfn_array_unpin_free(chain->ch_pa + i, vdev);
+ page_array_unpin_free(chain->ch_pa + i, vdev);
ccwchain_cda_free(chain, i);
}
ccwchain_free(chain);
@@ -862,11 +904,12 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
* cp_iova_pinned() - check if an iova is pinned for a ccw chain.
* @cp: channel_program on which to perform the operation
* @iova: the iova to check
+ * @length: the length to check from @iova
*
* If the @iova is currently pinned for the ccw chain, return true;
* else return false.
*/
-bool cp_iova_pinned(struct channel_program *cp, u64 iova)
+bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length)
{
struct ccwchain *chain;
int i;
@@ -876,7 +919,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
+ if (page_array_iova_pinned(chain->ch_pa + i, iova, length))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index e4c436199b4c..54d26e242533 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -41,11 +41,11 @@ struct channel_program {
struct ccw1 *guest_cp;
};
-extern int cp_init(struct channel_program *cp, union orb *orb);
-extern void cp_free(struct channel_program *cp);
-extern int cp_prefetch(struct channel_program *cp);
-extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
-extern void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
-extern bool cp_iova_pinned(struct channel_program *cp, u64 iova);
+int cp_init(struct channel_program *cp, union orb *orb);
+void cp_free(struct channel_program *cp);
+int cp_prefetch(struct channel_program *cp);
+union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
+void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
+bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length);
#endif
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index ee182cfb467d..86d9e428357b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include <asm/isc.h>
@@ -42,13 +41,6 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
DECLARE_COMPLETION_ONSTACK(completion);
int iretry, ret = 0;
- spin_lock_irq(sch->lock);
- if (!sch->schib.pmcw.ena)
- goto out_unlock;
- ret = cio_disable_subchannel(sch);
- if (ret != -EBUSY)
- goto out_unlock;
-
iretry = 255;
do {
@@ -75,9 +67,7 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
-out_unlock:
- private->state = VFIO_CCW_STATE_NOT_OPER;
- spin_unlock_irq(sch->lock);
+
return ret;
}
@@ -107,9 +97,10 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
/*
* Reset to IDLE only if processing of a channel program
* has finished. Do not overwrite a possible processing
- * state if the final interrupt was for HSCH or CSCH.
+ * state if the interrupt was unsolicited, or if the final
+ * interrupt was for HSCH or CSCH.
*/
- if (private->mdev && cp_is_finished)
+ if (cp_is_finished)
private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger)
@@ -147,7 +138,7 @@ static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
private->sch = sch;
mutex_init(&private->io_mutex);
- private->state = VFIO_CCW_STATE_NOT_OPER;
+ private->state = VFIO_CCW_STATE_STANDBY;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
@@ -231,26 +222,15 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
dev_set_drvdata(&sch->dev, private);
- spin_lock_irq(sch->lock);
- sch->isc = VFIO_CCW_ISC;
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
- spin_unlock_irq(sch->lock);
+ ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
if (ret)
goto out_free;
- private->state = VFIO_CCW_STATE_STANDBY;
-
- ret = vfio_ccw_mdev_reg(sch);
- if (ret)
- goto out_disable;
-
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
return 0;
-out_disable:
- cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
vfio_ccw_free_private(private);
@@ -261,8 +241,7 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
- vfio_ccw_sch_quiesce(sch);
- vfio_ccw_mdev_unreg(sch);
+ mdev_unregister_device(&sch->dev);
dev_set_drvdata(&sch->dev, NULL);
@@ -275,7 +254,10 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
static void vfio_ccw_sch_shutdown(struct subchannel *sch)
{
- vfio_ccw_sch_quiesce(sch);
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
/**
@@ -301,19 +283,11 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
if (work_pending(&sch->todo_work))
goto out_unlock;
- if (cio_update_schib(sch)) {
- vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
- rc = 0;
- goto out_unlock;
- }
-
- private = dev_get_drvdata(&sch->dev);
- if (private->state == VFIO_CCW_STATE_NOT_OPER) {
- private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
- VFIO_CCW_STATE_STANDBY;
- }
rc = 0;
+ if (cio_update_schib(sch))
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
@@ -358,8 +332,8 @@ static int vfio_ccw_chp_event(struct subchannel *sch,
return 0;
trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
- VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
- mdev_uuid(private->mdev), sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
+ sch->schid.cssid,
sch->schid.ssid, sch->schid.sch_no,
mask, event);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 8483a266051c..a59c758869f8 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -10,7 +10,8 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
+
+#include <asm/isc.h>
#include "ioasm.h"
#include "vfio_ccw_private.h"
@@ -161,8 +162,12 @@ static void fsm_notoper(struct vfio_ccw_private *private,
{
struct subchannel *sch = private->sch;
- VFIO_CCW_TRACE_EVENT(2, "notoper");
- VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
+ sch->schid.cssid,
+ sch->schid.ssid,
+ sch->schid.sch_no,
+ event,
+ private->state);
/*
* TODO:
@@ -170,6 +175,9 @@ static void fsm_notoper(struct vfio_ccw_private *private,
*/
css_sched_sch_todo(sch, SCH_TODO_UNREG);
private->state = VFIO_CCW_STATE_NOT_OPER;
+
+ /* This is usually handled during CLOSE event */
+ cp_free(&private->cp);
}
/*
@@ -242,7 +250,6 @@ static void fsm_io_request(struct vfio_ccw_private *private,
union orb *orb;
union scsw *scsw = &private->scsw;
struct ccw_io_region *io_region = private->io_region;
- struct mdev_device *mdev = private->mdev;
char *errstr = "request";
struct subchannel_id schid = get_schid(private);
@@ -256,8 +263,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
if (orb->tm.b) {
io_region->ret_code = -EOPNOTSUPP;
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): transport mode\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: transport mode\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
errstr = "transport mode";
goto err_out;
@@ -265,8 +272,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_init(&private->cp, orb);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_init=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_init=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp init";
@@ -276,8 +283,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_prefetch(&private->cp);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_prefetch=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp prefetch";
@@ -289,8 +296,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = fsm_io_helper(private);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: fsm_io_helper=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp fsm_io_helper";
@@ -300,16 +307,16 @@ static void fsm_io_request(struct vfio_ccw_private *private,
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): halt on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: halt on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): clear on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: clear on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
@@ -366,6 +373,54 @@ static void fsm_irq(struct vfio_ccw_private *private,
complete(private->completion);
}
+static void fsm_open(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ sch->isc = VFIO_CCW_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_IDLE;
+ spin_unlock_irq(sch->lock);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
+static void fsm_close(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+
+ if (!sch->schib.pmcw.ena)
+ goto err_unlock;
+
+ ret = cio_disable_subchannel(sch);
+ if (ret == -EBUSY)
+ ret = vfio_ccw_sch_quiesce(sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_STANDBY;
+ spin_unlock_irq(sch->lock);
+ cp_free(&private->cp);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
/*
* Device statemachine
*/
@@ -375,29 +430,39 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_nop,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_nop,
},
[VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
- [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_open,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_notoper,
},
[VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PROCESSING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index b49e2e9db2dc..4a806a2273b5 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -21,54 +21,28 @@ static const struct vfio_device_ops vfio_ccw_dev_ops;
static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
{
- struct subchannel *sch;
- int ret;
-
- sch = private->sch;
/*
- * TODO:
- * In the cureent stage, some things like "no I/O running" and "no
- * interrupt pending" are clear, but we are not sure what other state
- * we need to care about.
- * There are still a lot more instructions need to be handled. We
- * should come back here later.
+ * If the FSM state is seen as Not Operational after closing
+ * and re-opening the mdev, return an error.
*/
- ret = vfio_ccw_sch_quiesce(sch);
- if (ret)
- return ret;
-
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
- if (!ret)
- private->state = VFIO_CCW_STATE_IDLE;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -EINVAL;
- return ret;
+ return 0;
}
-static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
+static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length)
{
struct vfio_ccw_private *private =
- container_of(nb, struct vfio_ccw_private, nb);
-
- /*
- * Vendor drivers MUST unpin pages in response to an
- * invalidation.
- */
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
-
- if (!cp_iova_pinned(&private->cp, unmap->iova))
- return NOTIFY_OK;
+ container_of(vdev, struct vfio_ccw_private, vdev);
- if (vfio_ccw_mdev_reset(private))
- return NOTIFY_BAD;
+ /* Drivers MUST unpin pages in response to an invalidation. */
+ if (!cp_iova_pinned(&private->cp, iova, length))
+ return;
- cp_free(&private->cp);
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
+ vfio_ccw_mdev_reset(private);
}
static ssize_t name_show(struct mdev_type *mtype,
@@ -128,11 +102,8 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
vfio_init_group_dev(&private->vdev, &mdev->dev,
&vfio_ccw_dev_ops);
- private->mdev = mdev;
- private->state = VFIO_CCW_STATE_IDLE;
-
- VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
- mdev_uuid(mdev), private->sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
+ private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
@@ -145,8 +116,6 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
err_atomic:
vfio_uninit_group_dev(&private->vdev);
atomic_inc(&private->avail);
- private->mdev = NULL;
- private->state = VFIO_CCW_STATE_IDLE;
return ret;
}
@@ -154,23 +123,14 @@ static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
- VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
- mdev_uuid(mdev), private->sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: remove\n",
+ private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
vfio_unregister_group_dev(&private->vdev);
- if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
- (private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_sch_quiesce(private->sch))
- private->state = VFIO_CCW_STATE_STANDBY;
- /* The state will be NOT_OPER on error. */
- }
-
vfio_uninit_group_dev(&private->vdev);
- cp_free(&private->cp);
- private->mdev = NULL;
atomic_inc(&private->avail);
}
@@ -178,19 +138,15 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
- unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
int ret;
- private->nb.notifier_call = vfio_ccw_mdev_notifier;
-
- ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY,
- &events, &private->nb);
- if (ret)
- return ret;
+ /* Device cannot simply be opened again from this state */
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -EINVAL;
ret = vfio_ccw_register_async_dev_regions(private);
if (ret)
- goto out_unregister;
+ return ret;
ret = vfio_ccw_register_schib_dev_regions(private);
if (ret)
@@ -200,11 +156,16 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
if (ret)
goto out_unregister;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER) {
+ ret = -EINVAL;
+ goto out_unregister;
+ }
+
return ret;
out_unregister:
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
return ret;
}
@@ -213,16 +174,8 @@ static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
- if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
- (private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_mdev_reset(private))
- private->state = VFIO_CCW_STATE_STANDBY;
- /* The state will be NOT_OPER on error. */
- }
-
- cp_free(&private->cp);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
@@ -645,6 +598,7 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = {
.write = vfio_ccw_mdev_write,
.ioctl = vfio_ccw_mdev_ioctl,
.request = vfio_ccw_mdev_request,
+ .dma_unmap = vfio_ccw_dma_unmap,
};
struct mdev_driver vfio_ccw_mdev_driver = {
@@ -657,13 +611,3 @@ struct mdev_driver vfio_ccw_mdev_driver = {
.remove = vfio_ccw_mdev_remove,
.supported_type_groups = mdev_type_groups,
};
-
-int vfio_ccw_mdev_reg(struct subchannel *sch)
-{
- return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
-}
-
-void vfio_ccw_mdev_unreg(struct subchannel *sch)
-{
- mdev_unregister_device(&sch->dev);
-}
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 7272eb788612..cd24b7fada91 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -73,8 +73,6 @@ struct vfio_ccw_crw {
* @state: internal state of the device
* @completion: synchronization helper of the I/O completion
* @avail: available for creating a mediated device
- * @mdev: pointer to the mediated device
- * @nb: notifier for vfio events
* @io_region: MMIO region to input/output I/O arguments/results
* @io_mutex: protect against concurrent update of I/O regions
* @region: additional regions for other subchannel operations
@@ -97,8 +95,6 @@ struct vfio_ccw_private {
int state;
struct completion *completion;
atomic_t avail;
- struct mdev_device *mdev;
- struct notifier_block nb;
struct ccw_io_region *io_region;
struct mutex io_mutex;
struct vfio_ccw_region *region;
@@ -119,10 +115,7 @@ struct vfio_ccw_private {
struct work_struct crw_work;
} __aligned(8);
-extern int vfio_ccw_mdev_reg(struct subchannel *sch);
-extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
-
-extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
+int vfio_ccw_sch_quiesce(struct subchannel *sch);
extern struct mdev_driver vfio_ccw_mdev_driver;
@@ -147,6 +140,8 @@ enum vfio_ccw_event {
VFIO_CCW_EVENT_IO_REQ,
VFIO_CCW_EVENT_INTERRUPT,
VFIO_CCW_EVENT_ASYNC_REQ,
+ VFIO_CCW_EVENT_OPEN,
+ VFIO_CCW_EVENT_CLOSE,
/* last element! */
NR_VFIO_CCW_EVENTS
};
@@ -158,7 +153,7 @@ typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event);
extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
- int event)
+ enum vfio_ccw_event event)
{
trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
vfio_ccw_jumptable[private->state][event](private, event);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 95cfdeab5baf..59ac98f2bd27 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -838,6 +838,17 @@ static void ap_bus_revise_bindings(void)
bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
}
+/**
+ * ap_owned_by_def_drv: indicates whether an AP adapter is reserved for the
+ * default host driver or not.
+ * @card: the APID of the adapter card to check
+ * @queue: the APQI of the queue to check
+ *
+ * Note: the ap_perms_mutex must be locked by the caller of this function.
+ *
+ * Return: an int specifying whether the AP adapter is reserved for the host (1)
+ * or not (0).
+ */
int ap_owned_by_def_drv(int card, int queue)
{
int rc = 0;
@@ -845,25 +856,31 @@ int ap_owned_by_def_drv(int card, int queue)
if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
return -EINVAL;
- mutex_lock(&ap_perms_mutex);
-
if (test_bit_inv(card, ap_perms.apm) &&
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
- mutex_unlock(&ap_perms_mutex);
-
return rc;
}
EXPORT_SYMBOL(ap_owned_by_def_drv);
+/**
+ * ap_apqn_in_matrix_owned_by_def_drv: indicates whether every APQN contained in
+ * a set is reserved for the host drivers
+ * or not.
+ * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check
+ * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check
+ *
+ * Note: the ap_perms_mutex must be locked by the caller of this function.
+ *
+ * Return: an int specifying whether each APQN is reserved for the host (1) or
+ * not (0)
+ */
int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
unsigned long *aqm)
{
int card, queue, rc = 0;
- mutex_lock(&ap_perms_mutex);
-
for (card = 0; !rc && card < AP_DEVICES; card++)
if (test_bit_inv(card, apm) &&
test_bit_inv(card, ap_perms.apm))
@@ -872,8 +889,6 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
- mutex_unlock(&ap_perms_mutex);
-
return rc;
}
EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
@@ -2071,6 +2086,9 @@ static inline void ap_scan_adapter(int ap)
*/
static bool ap_get_configuration(void)
{
+ if (!ap_qci_info) /* QCI not supported */
+ return false;
+
memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
ap_fetch_qci_info(ap_qci_info);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 0c40af157df2..0f17933954fb 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -148,12 +148,16 @@ struct ap_driver {
/*
* Called at the start of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_config_changed)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
/*
* Called at the end of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_scan_complete)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index c48b0db824e3..a32457b4cbb8 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -34,7 +34,7 @@ static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
qirqctrl.ir = 1;
qirqctrl.isc = AP_ISC;
- status = ap_aqic(aq->qid, qirqctrl, ind);
+ status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 7329caa7d467..5a05d1cdfec2 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -2115,5 +2115,5 @@ static void __exit pkey_exit(void)
pkey_debug_exit();
}
-module_cpu_feature_match(MSA, pkey_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 4ac9c6521ec1..f43cfeabd2cc 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -18,9 +18,6 @@
#define VFIO_AP_ROOT_NAME "vfio_ap"
#define VFIO_AP_DEV_NAME "matrix"
-#define AP_QUEUE_ASSIGNED "assigned"
-#define AP_QUEUE_UNASSIGNED "unassigned"
-#define AP_QUEUE_IN_USE "in use"
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
@@ -46,120 +43,12 @@ static struct ap_device_id ap_queue_ids[] = {
{ /* end of sibling */ },
};
-static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
-{
- struct ap_matrix_mdev *matrix_mdev;
- unsigned long apid = AP_QID_CARD(q->apqn);
- unsigned long apqi = AP_QID_QUEUE(q->apqn);
-
- list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
- if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
- test_bit_inv(apqi, matrix_mdev->matrix.aqm))
- return matrix_mdev;
- }
-
- return NULL;
-}
-
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- ssize_t nchars = 0;
- struct vfio_ap_queue *q;
- struct ap_matrix_mdev *matrix_mdev;
- struct ap_device *apdev = to_ap_dev(dev);
-
- mutex_lock(&matrix_dev->lock);
- q = dev_get_drvdata(&apdev->device);
- matrix_mdev = vfio_ap_mdev_for_queue(q);
-
- if (matrix_mdev) {
- if (matrix_mdev->kvm)
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_IN_USE);
- else
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_ASSIGNED);
- } else {
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_UNASSIGNED);
- }
-
- mutex_unlock(&matrix_dev->lock);
-
- return nchars;
-}
-
-static DEVICE_ATTR_RO(status);
-
-static struct attribute *vfio_queue_attrs[] = {
- &dev_attr_status.attr,
- NULL,
-};
-
-static const struct attribute_group vfio_queue_attr_group = {
- .attrs = vfio_queue_attrs,
-};
-
-/**
- * vfio_ap_queue_dev_probe: Allocate a vfio_ap_queue structure and associate it
- * with the device as driver_data.
- *
- * @apdev: the AP device being probed
- *
- * Return: returns 0 if the probe succeeded; otherwise, returns an error if
- * storage could not be allocated for a vfio_ap_queue object or the
- * sysfs 'status' attribute could not be created for the queue device.
- */
-static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
-{
- int ret;
- struct vfio_ap_queue *q;
-
- q = kzalloc(sizeof(*q), GFP_KERNEL);
- if (!q)
- return -ENOMEM;
-
- mutex_lock(&matrix_dev->lock);
- dev_set_drvdata(&apdev->device, q);
- q->apqn = to_ap_queue(&apdev->device)->qid;
- q->saved_isc = VFIO_AP_ISC_INVALID;
-
- ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
- if (ret) {
- dev_set_drvdata(&apdev->device, NULL);
- kfree(q);
- }
-
- mutex_unlock(&matrix_dev->lock);
-
- return ret;
-}
-
-/**
- * vfio_ap_queue_dev_remove: Free the associated vfio_ap_queue structure.
- *
- * @apdev: the AP device being removed
- *
- * Takes the matrix lock to avoid actions on this device while doing the remove.
- */
-static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
-{
- struct vfio_ap_queue *q;
-
- mutex_lock(&matrix_dev->lock);
- sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
- q = dev_get_drvdata(&apdev->device);
- vfio_ap_mdev_reset_queue(q, 1);
- dev_set_drvdata(&apdev->device, NULL);
- kfree(q);
- mutex_unlock(&matrix_dev->lock);
-}
-
static struct ap_driver vfio_ap_drv = {
- .probe = vfio_ap_queue_dev_probe,
- .remove = vfio_ap_queue_dev_remove,
+ .probe = vfio_ap_mdev_probe_queue,
+ .remove = vfio_ap_mdev_remove_queue,
+ .in_use = vfio_ap_mdev_resource_in_use,
+ .on_config_changed = vfio_ap_on_cfg_changed,
+ .on_scan_complete = vfio_ap_on_scan_complete,
.ids = ap_queue_ids,
};
@@ -212,8 +101,9 @@ static int vfio_ap_matrix_dev_create(void)
goto matrix_alloc_err;
}
- mutex_init(&matrix_dev->lock);
+ mutex_init(&matrix_dev->mdevs_lock);
INIT_LIST_HEAD(&matrix_dev->mdev_list);
+ mutex_init(&matrix_dev->guests_lock);
dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
matrix_dev->device.parent = root_device;
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index a7d2a95796d3..6c8c41fac4e1 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -26,44 +26,193 @@
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
-static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
+#define AP_QUEUE_ASSIGNED "assigned"
+#define AP_QUEUE_UNASSIGNED "unassigned"
+#define AP_QUEUE_IN_USE "in use"
+
+static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
+static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry);
+
+/**
+ * get_update_locks_for_kvm: Acquire the locks required to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @kvm is NULL, the KVM lock will not be taken.
+ */
+static inline void get_update_locks_for_kvm(struct kvm *kvm)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (kvm)
+ mutex_lock(&kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * release_update_locks_for_kvm: Release the locks used to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
+ *
+ * The proper unlocking order is:
+ * 1. matrix_dev->mdevs_lock
+ * 2. kvm->lock
+ * 3. matrix_dev->guests_lock
+ *
+ * Note: If @kvm is NULL, the KVM lock will not be released.
+ */
+static inline void release_update_locks_for_kvm(struct kvm *kvm)
+{
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ if (kvm)
+ mutex_unlock(&kvm->lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+}
+
+/**
+ * get_update_locks_for_mdev: Acquire the locks required to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
+ * configuration data to use to update a KVM guest's APCB.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
+ * lock will not be taken.
+ */
+static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (matrix_mdev && matrix_mdev->kvm)
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * release_update_locks_for_mdev: Release the locks used to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
+ * configuration data to use to update a KVM guest's APCB.
+ *
+ * The proper unlocking order is:
+ * 1. matrix_dev->mdevs_lock
+ * 2. matrix_mdev->kvm->lock
+ * 3. matrix_dev->guests_lock
+ *
+ * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
+ * lock will not be released.
+ */
+static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
+{
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ if (matrix_mdev && matrix_mdev->kvm)
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+}
-static int match_apqn(struct device *dev, const void *data)
+/**
+ * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and
+ * acquire the locks required to update the APCB of
+ * the KVM guest to which the mdev is attached.
+ *
+ * @apqn: the APQN of a queue device.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock
+ * will not be taken.
+ *
+ * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn
+ * is not assigned to an ap_matrix_mdev.
+ */
+static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn)
{
- struct vfio_ap_queue *q = dev_get_drvdata(dev);
+ struct ap_matrix_mdev *matrix_mdev;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) &&
+ test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) {
+ if (matrix_mdev->kvm)
+ mutex_lock(&matrix_mdev->kvm->lock);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ return matrix_mdev;
+ }
+ }
- return (q->apqn == *(int *)(data)) ? 1 : 0;
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ return NULL;
}
/**
- * vfio_ap_get_queue - retrieve a queue with a specific APQN from a list
- * @matrix_mdev: the associated mediated matrix
- * @apqn: The queue APQN
+ * get_update_locks_for_queue: get the locks required to update the APCB of the
+ * KVM guest to which the matrix mdev linked to a
+ * vfio_ap_queue object is attached.
+ *
+ * @q: a pointer to a vfio_ap_queue object.
*
- * Retrieve a queue with a specific APQN from the list of the
- * devices of the vfio_ap_drv.
- * Verify that the APID and the APQI are set in the matrix.
+ * The proper locking order is:
+ * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a
+ * KVM guest's APCB.
+ * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev
+ *
+ * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock
+ * will not be taken.
+ */
+static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (q->matrix_mdev && q->matrix_mdev->kvm)
+ mutex_lock(&q->matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a
+ * hash table of queues assigned to a matrix mdev
+ * @matrix_mdev: the matrix mdev
+ * @apqn: The APQN of a queue device
*
- * Return: the pointer to the associated vfio_ap_queue
+ * Return: the pointer to the vfio_ap_queue struct representing the queue or
+ * NULL if the queue is not assigned to @matrix_mdev
*/
-static struct vfio_ap_queue *vfio_ap_get_queue(
+static struct vfio_ap_queue *vfio_ap_mdev_get_queue(
struct ap_matrix_mdev *matrix_mdev,
int apqn)
{
struct vfio_ap_queue *q;
- if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
- return NULL;
- if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
- return NULL;
-
- q = vfio_ap_find_queue(apqn);
- if (q)
- q->matrix_mdev = matrix_mdev;
+ hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
+ apqn) {
+ if (q && q->apqn == apqn)
+ return q;
+ }
- return q;
+ return NULL;
}
/**
@@ -112,7 +261,7 @@ static void vfio_ap_wait_for_irqclear(int apqn)
*
* Unregisters the ISC in the GIB when the saved ISC not invalid.
* Unpins the guest's page holding the NIB when it exists.
- * Resets the saved_pfn and saved_isc to invalid values.
+ * Resets the saved_iova and saved_isc to invalid values.
*/
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
@@ -123,9 +272,9 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
q->saved_isc = VFIO_AP_ISC_INVALID;
}
- if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
- vfio_unpin_pages(&q->matrix_mdev->vdev, &q->saved_pfn, 1);
- q->saved_pfn = 0;
+ if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
+ vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
+ q->saved_iova = 0;
}
}
@@ -154,7 +303,7 @@ static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
int retries = 5;
do {
- status = ap_aqic(q->apqn, aqic_gisa, NULL);
+ status = ap_aqic(q->apqn, aqic_gisa, 0);
switch (status.response_code) {
case AP_RESPONSE_OTHERWISE_CHANGED:
case AP_RESPONSE_NORMAL:
@@ -180,7 +329,6 @@ static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
status.response_code);
end_free:
vfio_ap_free_aqic_resources(q);
- q->matrix_mdev = NULL;
return status;
}
@@ -189,27 +337,19 @@ end_free:
*
* @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction.
* @nib: the location for storing the nib address.
- * @g_pfn: the location for storing the page frame number of the page containing
- * the nib.
*
* When the PQAP(AQIC) instruction is executed, general register 2 contains the
* address of the notification indicator byte (nib) used for IRQ notification.
- * This function parses the nib from gr2 and calculates the page frame
- * number for the guest of the page containing the nib. The values are
- * stored in @nib and @g_pfn respectively.
- *
- * The g_pfn of the nib is then validated to ensure the nib address is valid.
+ * This function parses and validates the nib from gr2.
*
* Return: returns zero if the nib address is a valid; otherwise, returns
* -EINVAL.
*/
-static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, unsigned long *nib,
- unsigned long *g_pfn)
+static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
{
*nib = vcpu->run->s.regs.gprs[2];
- *g_pfn = *nib >> PAGE_SHIFT;
- if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *g_pfn)))
+ if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
return -EINVAL;
return 0;
@@ -239,33 +379,34 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
int isc,
struct kvm_vcpu *vcpu)
{
- unsigned long nib;
struct ap_qirq_ctrl aqic_gisa = {};
struct ap_queue_status status = {};
struct kvm_s390_gisa *gisa;
+ struct page *h_page;
int nisc;
struct kvm *kvm;
- unsigned long h_nib, g_pfn, h_pfn;
+ phys_addr_t h_nib;
+ dma_addr_t nib;
int ret;
/* Verify that the notification indicator byte address is valid */
- if (vfio_ap_validate_nib(vcpu, &nib, &g_pfn)) {
- VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%#lx, g_pfn=%#lx, apqn=%#04x\n",
- __func__, nib, g_pfn, q->apqn);
+ if (vfio_ap_validate_nib(vcpu, &nib)) {
+ VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n",
+ __func__, &nib, q->apqn);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
}
- ret = vfio_pin_pages(&q->matrix_mdev->vdev, &g_pfn, 1,
- IOMMU_READ | IOMMU_WRITE, &h_pfn);
+ ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
+ IOMMU_READ | IOMMU_WRITE, &h_page);
switch (ret) {
case 1:
break;
default:
VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d,"
- "nib=%#lx, g_pfn=%#lx, apqn=%#04x\n",
- __func__, ret, nib, g_pfn, q->apqn);
+ "nib=%pad, apqn=%#04x\n",
+ __func__, ret, &nib, q->apqn);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
@@ -274,7 +415,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
kvm = q->matrix_mdev->kvm;
gisa = kvm->arch.gisa_int.origin;
- h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
+ h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
aqic_gisa.gisc = isc;
nisc = kvm_s390_gisc_register(kvm, isc);
@@ -290,17 +431,17 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
aqic_gisa.ir = 1;
aqic_gisa.gisa = (uint64_t)gisa >> 4;
- status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
+ status = ap_aqic(q->apqn, aqic_gisa, h_nib);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
/* See if we did clear older IRQ configuration */
vfio_ap_free_aqic_resources(q);
- q->saved_pfn = g_pfn;
+ q->saved_iova = nib;
q->saved_isc = isc;
break;
case AP_RESPONSE_OTHERWISE_CHANGED:
/* We could not modify IRQ setings: clear new configuration */
- vfio_unpin_pages(&q->matrix_mdev->vdev, &g_pfn, 1);
+ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
kvm_s390_gisc_unregister(kvm, isc);
break;
default:
@@ -406,10 +547,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
if (!vcpu->kvm->arch.crypto.pqap_hook) {
VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n",
__func__, apqn);
+
goto out_unlock;
}
@@ -425,7 +568,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
goto out_unlock;
}
- q = vfio_ap_get_queue(matrix_mdev, apqn);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
if (!q) {
VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n",
__func__, AP_QID_CARD(apqn),
@@ -444,7 +587,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
out_unlock:
memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
vcpu->run->s.regs.gprs[1] >>= 32;
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
}
@@ -456,6 +599,91 @@ static void vfio_ap_matrix_init(struct ap_config_info *info,
matrix->adm_max = info->apxa ? info->Nd : 15;
}
+static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (matrix_mdev->kvm)
+ kvm_arch_crypto_set_masks(matrix_mdev->kvm,
+ matrix_mdev->shadow_apcb.apm,
+ matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev->shadow_apcb.adm);
+}
+
+static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
+{
+ DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS);
+
+ bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS);
+ bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm,
+ (unsigned long *)matrix_dev->info.adm, AP_DOMAINS);
+
+ return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm,
+ AP_DOMAINS);
+}
+
+/*
+ * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
+ * to ensure no queue devices are passed through to
+ * the guest that are not bound to the vfio_ap
+ * device driver.
+ *
+ * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
+ *
+ * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
+ * driver, its APID will be filtered from the guest's APCB. The matrix
+ * structure precludes filtering an individual APQN, so its APID will be
+ * filtered.
+ *
+ * Return: a boolean value indicating whether the KVM guest's APCB was changed
+ * by the filtering or not.
+ */
+static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ struct ap_matrix_mdev *matrix_mdev)
+{
+ unsigned long apid, apqi, apqn;
+ DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
+ DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
+ struct vfio_ap_queue *q;
+
+ bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
+ bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+
+ /*
+ * Copy the adapters, domains and control domains to the shadow_apcb
+ * from the matrix mdev, but only those that are assigned to the host's
+ * AP configuration.
+ */
+ bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm,
+ (unsigned long *)matrix_dev->info.apm, AP_DEVICES);
+ bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
+ (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ /*
+ * If the APQN is not bound to the vfio_ap device
+ * driver, then we can't assign it to the guest's
+ * AP configuration. The AP architecture won't
+ * allow filtering of a single APQN, so let's filter
+ * the APID since an adapter represents a physical
+ * hardware device.
+ */
+ apqn = AP_MKQID(apid, apqi);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
+ if (!q || q->reset_rc) {
+ clear_bit_inv(apid,
+ matrix_mdev->shadow_apcb.apm);
+ break;
+ }
+ }
+ }
+
+ return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm,
+ AP_DEVICES) ||
+ !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm,
+ AP_DOMAINS);
+}
+
static int vfio_ap_mdev_probe(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev;
@@ -475,20 +703,19 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev)
matrix_mdev->mdev = mdev;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
matrix_mdev->pqap_hook = handle_pqap;
- mutex_lock(&matrix_dev->lock);
- list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
- mutex_unlock(&matrix_dev->lock);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+ hash_init(matrix_mdev->qtable.queues);
ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret)
goto err_list;
dev_set_drvdata(&mdev->dev, matrix_mdev);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
err_list:
- mutex_lock(&matrix_dev->lock);
- list_del(&matrix_mdev->node);
- mutex_unlock(&matrix_dev->lock);
vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
err_dec_available:
@@ -496,16 +723,62 @@ err_dec_available:
return ret;
}
+static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
+ struct vfio_ap_queue *q)
+{
+ if (q) {
+ q->matrix_mdev = matrix_mdev;
+ hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
+ }
+}
+
+static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
+{
+ struct vfio_ap_queue *q;
+
+ q = vfio_ap_find_queue(apqn);
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+}
+
+static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
+{
+ hash_del(&q->mdev_qnode);
+}
+
+static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
+{
+ q->matrix_mdev = NULL;
+}
+
+static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
+{
+ struct vfio_ap_queue *q;
+ unsigned long apid, apqi;
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ AP_DOMAINS) {
+ q = vfio_ap_mdev_get_queue(matrix_mdev,
+ AP_MKQID(apid, apqi));
+ if (q)
+ q->matrix_mdev = NULL;
+ }
+ }
+}
+
static void vfio_ap_mdev_remove(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
vfio_unregister_group_dev(&matrix_mdev->vdev);
- mutex_lock(&matrix_dev->lock);
- vfio_ap_mdev_reset_queues(matrix_mdev);
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
+ vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
list_del(&matrix_mdev->node);
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_dev->guests_lock);
vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
atomic_inc(&matrix_dev->available_instances);
@@ -554,141 +827,48 @@ static struct attribute_group *vfio_ap_mdev_type_groups[] = {
NULL,
};
-struct vfio_ap_queue_reserved {
- unsigned long *apid;
- unsigned long *apqi;
- bool reserved;
-};
-
-/**
- * vfio_ap_has_queue - determines if the AP queue containing the target in @data
- *
- * @dev: an AP queue device
- * @data: a struct vfio_ap_queue_reserved reference
- *
- * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
- * apid or apqi specified in @data:
- *
- * - If @data contains both an apid and apqi value, then @data will be flagged
- * as reserved if the APID and APQI fields for the AP queue device matches
- *
- * - If @data contains only an apid value, @data will be flagged as
- * reserved if the APID field in the AP queue device matches
- *
- * - If @data contains only an apqi value, @data will be flagged as
- * reserved if the APQI field in the AP queue device matches
- *
- * Return: 0 to indicate the input to function succeeded. Returns -EINVAL if
- * @data does not contain either an apid or apqi.
- */
-static int vfio_ap_has_queue(struct device *dev, void *data)
-{
- struct vfio_ap_queue_reserved *qres = data;
- struct ap_queue *ap_queue = to_ap_queue(dev);
- ap_qid_t qid;
- unsigned long id;
-
- if (qres->apid && qres->apqi) {
- qid = AP_MKQID(*qres->apid, *qres->apqi);
- if (qid == ap_queue->qid)
- qres->reserved = true;
- } else if (qres->apid && !qres->apqi) {
- id = AP_QID_CARD(ap_queue->qid);
- if (id == *qres->apid)
- qres->reserved = true;
- } else if (!qres->apid && qres->apqi) {
- id = AP_QID_QUEUE(ap_queue->qid);
- if (id == *qres->apqi)
- qres->reserved = true;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * vfio_ap_verify_queue_reserved - verifies that the AP queue containing
- * @apid or @aqpi is reserved
- *
- * @apid: an AP adapter ID
- * @apqi: an AP queue index
- *
- * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
- * driver according to the following rules:
- *
- * - If both @apid and @apqi are not NULL, then there must be an AP queue
- * device bound to the vfio_ap driver with the APQN identified by @apid and
- * @apqi
- *
- * - If only @apid is not NULL, then there must be an AP queue device bound
- * to the vfio_ap driver with an APQN containing @apid
- *
- * - If only @apqi is not NULL, then there must be an AP queue device bound
- * to the vfio_ap driver with an APQN containing @apqi
- *
- * Return: 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
- */
-static int vfio_ap_verify_queue_reserved(unsigned long *apid,
- unsigned long *apqi)
-{
- int ret;
- struct vfio_ap_queue_reserved qres;
-
- qres.apid = apid;
- qres.apqi = apqi;
- qres.reserved = false;
-
- ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &qres, vfio_ap_has_queue);
- if (ret)
- return ret;
-
- if (qres.reserved)
- return 0;
-
- return -EADDRNOTAVAIL;
-}
+#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
+ "already assigned to %s"
-static int
-vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apid)
+static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apm,
+ unsigned long *aqm)
{
- int ret;
- unsigned long apqi;
- unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
-
- if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
- return vfio_ap_verify_queue_reserved(&apid, NULL);
-
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
- ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
- if (ret)
- return ret;
- }
+ unsigned long apid, apqi;
+ const struct device *dev = mdev_dev(matrix_mdev->mdev);
+ const char *mdev_name = dev_name(dev);
- return 0;
+ for_each_set_bit_inv(apid, apm, AP_DEVICES)
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
+ dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
}
/**
- * vfio_ap_mdev_verify_no_sharing - verifies that the AP matrix is not configured
+ * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
*
- * @matrix_mdev: the mediated matrix device
+ * @mdev_apm: mask indicating the APIDs of the APQNs to be verified
+ * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
*
- * Verifies that the APQNs derived from the cross product of the AP adapter IDs
- * and AP queue indexes comprising the AP matrix are not configured for another
+ * Verifies that each APQN derived from the Cartesian product of a bitmap of
+ * AP adapter IDs and AP queue indexes is not configured for any matrix
* mediated device. AP queue sharing is not allowed.
*
- * Return: 0 if the APQNs are not shared; otherwise returns -EADDRINUSE.
+ * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
*/
-static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
+static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
+ unsigned long *mdev_aqm)
{
- struct ap_matrix_mdev *lstdev;
+ struct ap_matrix_mdev *matrix_mdev;
DECLARE_BITMAP(apm, AP_DEVICES);
DECLARE_BITMAP(aqm, AP_DOMAINS);
- list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
- if (matrix_mdev == lstdev)
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ /*
+ * If the input apm and aqm are fields of the matrix_mdev
+ * object, then move on to the next matrix_mdev.
+ */
+ if (mdev_apm == matrix_mdev->matrix.apm &&
+ mdev_aqm == matrix_mdev->matrix.aqm)
continue;
memset(apm, 0, sizeof(apm));
@@ -698,14 +878,16 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* We work on full longs, as we can only exclude the leftover
* bits in non-inverse order. The leftover is all zeros.
*/
- if (!bitmap_and(apm, matrix_mdev->matrix.apm,
- lstdev->matrix.apm, AP_DEVICES))
+ if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
+ AP_DEVICES))
continue;
- if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
- lstdev->matrix.aqm, AP_DOMAINS))
+ if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
+ AP_DOMAINS))
continue;
+ vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
+
return -EADDRINUSE;
}
@@ -713,6 +895,41 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
}
/**
+ * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are
+ * not reserved for the default zcrypt driver and
+ * are not assigned to another mdev.
+ *
+ * @matrix_mdev: the mdev to which the APQNs being validated are assigned.
+ *
+ * Return: One of the following values:
+ * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function,
+ * most likely -EBUSY indicating the ap_perms_mutex lock is already held.
+ * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the
+ * zcrypt default driver.
+ * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev
+ * o A zero indicating validation succeeded.
+ */
+static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm))
+ return -EADDRNOTAVAIL;
+
+ return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm);
+}
+
+static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ unsigned long apqi;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS)
+ vfio_ap_mdev_link_apqn(matrix_mdev,
+ AP_MKQID(apid, apqi));
+}
+
+/**
* assign_adapter_store - parses the APID from @buf and sets the
* corresponding bit in the mediated matrix device's APM
*
@@ -741,6 +958,10 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* An APQN derived from the cross product of the APID being assigned
* and the APQIs previously assigned is being used by another mediated
* matrix device
+ *
+ * 5. -EAGAIN
+ * A lock required to validate the mdev's AP configuration could not
+ * be obtained.
*/
static ssize_t assign_adapter_store(struct device *dev,
struct device_attribute *attr,
@@ -748,15 +969,11 @@ static ssize_t assign_adapter_store(struct device *dev,
{
int ret;
unsigned long apid;
+ DECLARE_BITMAP(apm_delta, AP_DEVICES);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow assignment of adapter */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ mutex_lock(&ap_perms_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
@@ -767,33 +984,97 @@ static ssize_t assign_adapter_store(struct device *dev,
goto done;
}
- /*
- * Set the bit in the AP mask (APM) corresponding to the AP adapter
- * number (APID). The bits in the mask, from most significant to least
- * significant bit, correspond to APIDs 0-255.
- */
- ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
- if (ret)
+ set_bit_inv(apid, matrix_mdev->matrix.apm);
+
+ ret = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (ret) {
+ clear_bit_inv(apid, matrix_mdev->matrix.apm);
goto done;
+ }
- set_bit_inv(apid, matrix_mdev->matrix.apm);
+ vfio_ap_mdev_link_adapter(matrix_mdev, apid);
+ memset(apm_delta, 0, sizeof(apm_delta));
+ set_bit_inv(apid, apm_delta);
- ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
- if (ret)
- goto share_err;
+ if (vfio_ap_mdev_filter_matrix(apm_delta,
+ matrix_mdev->matrix.aqm, matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
- goto done;
-
-share_err:
- clear_bit_inv(apid, matrix_mdev->matrix.apm);
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_perms_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_adapter);
+static struct vfio_ap_queue
+*vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid, unsigned long apqi)
+{
+ struct vfio_ap_queue *q = NULL;
+
+ q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
+ /* If the queue is assigned to the matrix mdev, unlink it. */
+ if (q)
+ vfio_ap_unlink_queue_fr_mdev(q);
+
+ return q;
+}
+
+/**
+ * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned
+ * adapter from the matrix mdev to which the
+ * adapter was assigned.
+ * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
+ * @apid: the APID of the unassigned adapter.
+ * @qtable: table for storing queues associated with unassigned adapter.
+ */
+static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid,
+ struct ap_queue_table *qtable)
+{
+ unsigned long apqi;
+ struct vfio_ap_queue *q;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+ if (q && qtable) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ hash_add(qtable->queues, &q->mdev_qnode,
+ q->apqn);
+ }
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ int loop_cursor;
+ struct vfio_ap_queue *q;
+ struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
+
+ hash_init(qtable->queues);
+ vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
+
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+ vfio_ap_mdev_reset_queues(qtable);
+
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+ hash_del(&q->mdev_qnode);
+ }
+
+ kfree(qtable);
+}
+
/**
* unassign_adapter_store - parses the APID from @buf and clears the
* corresponding bit in the mediated matrix device's APM
@@ -817,13 +1098,7 @@ static ssize_t unassign_adapter_store(struct device *dev,
unsigned long apid;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow unassignment of adapter */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
@@ -835,31 +1110,22 @@ static ssize_t unassign_adapter_store(struct device *dev,
}
clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
+ vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_adapter);
-static int
-vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apqi)
+static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
{
- int ret;
unsigned long apid;
- unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
-
- if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
- return vfio_ap_verify_queue_reserved(NULL, &apqi);
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
- ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
- if (ret)
- return ret;
- }
-
- return 0;
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES)
+ vfio_ap_mdev_link_apqn(matrix_mdev,
+ AP_MKQID(apid, apqi));
}
/**
@@ -891,6 +1157,10 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
* An APQN derived from the cross product of the APQI being assigned
* and the APIDs previously assigned is being used by another mediated
* matrix device
+ *
+ * 5. -EAGAIN
+ * The lock required to validate the mdev's AP configuration could not
+ * be obtained.
*/
static ssize_t assign_domain_store(struct device *dev,
struct device_attribute *attr,
@@ -898,47 +1168,89 @@ static ssize_t assign_domain_store(struct device *dev,
{
int ret;
unsigned long apqi;
+ DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
-
- mutex_lock(&matrix_dev->lock);
- /* If the KVM guest is running, disallow assignment of domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ mutex_lock(&ap_perms_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
goto done;
- if (apqi > max_apqi) {
+
+ if (apqi > matrix_mdev->matrix.aqm_max) {
ret = -ENODEV;
goto done;
}
- ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
- if (ret)
+ set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+
+ ret = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (ret) {
+ clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
goto done;
+ }
- set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+ vfio_ap_mdev_link_domain(matrix_mdev, apqi);
+ memset(aqm_delta, 0, sizeof(aqm_delta));
+ set_bit_inv(apqi, aqm_delta);
- ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
- if (ret)
- goto share_err;
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
+ matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
- goto done;
-
-share_err:
- clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_perms_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_domain);
+static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi,
+ struct ap_queue_table *qtable)
+{
+ unsigned long apid;
+ struct vfio_ap_queue *q;
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+ if (q && qtable) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ hash_add(qtable->queues, &q->mdev_qnode,
+ q->apqn);
+ }
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+{
+ int loop_cursor;
+ struct vfio_ap_queue *q;
+ struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
+
+ hash_init(qtable->queues);
+ vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
+
+ if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+ vfio_ap_mdev_reset_queues(qtable);
+
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+ hash_del(&q->mdev_qnode);
+ }
+
+ kfree(qtable);
+}
/**
* unassign_domain_store - parses the APQI from @buf and clears the
@@ -963,13 +1275,7 @@ static ssize_t unassign_domain_store(struct device *dev,
unsigned long apqi;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow unassignment of domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
@@ -981,10 +1287,11 @@ static ssize_t unassign_domain_store(struct device *dev,
}
clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
+ vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_domain);
@@ -1011,13 +1318,7 @@ static ssize_t assign_control_domain_store(struct device *dev,
unsigned long id;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow assignment of control domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &id);
if (ret)
@@ -1034,9 +1335,12 @@ static ssize_t assign_control_domain_store(struct device *dev,
* number of control domains that can be assigned.
*/
set_bit_inv(id, matrix_mdev->matrix.adm);
+ if (vfio_ap_mdev_filter_cdoms(matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(assign_control_domain);
@@ -1062,28 +1366,28 @@ static ssize_t unassign_control_domain_store(struct device *dev,
int ret;
unsigned long domid;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- unsigned long max_domid = matrix_mdev->matrix.adm_max;
-
- mutex_lock(&matrix_dev->lock);
- /* If a KVM guest is running, disallow unassignment of control domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &domid);
if (ret)
goto done;
- if (domid > max_domid) {
+
+ if (domid > matrix_mdev->matrix.adm_max) {
ret = -ENODEV;
goto done;
}
clear_bit_inv(domid, matrix_mdev->matrix.adm);
+
+ if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
+ clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_control_domain);
@@ -1099,40 +1403,36 @@ static ssize_t control_domains_show(struct device *dev,
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
n = sprintf(bufpos, "%04lx\n", id);
bufpos += n;
nchars += n;
}
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(control_domains);
-static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
{
- struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
char *bufpos = buf;
unsigned long apid;
unsigned long apqi;
unsigned long apid1;
unsigned long apqi1;
- unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
- unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
+ unsigned long napm_bits = matrix->apm_max + 1;
+ unsigned long naqm_bits = matrix->aqm_max + 1;
int nchars = 0;
int n;
- apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
- apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
-
- mutex_lock(&matrix_dev->lock);
+ apid1 = find_first_bit_inv(matrix->apm, napm_bits);
+ apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
+ for_each_set_bit_inv(apqi, matrix->aqm,
naqm_bits) {
n = sprintf(bufpos, "%02lx.%04lx\n", apid,
apqi);
@@ -1141,25 +1441,50 @@ static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
}
}
} else if (apid1 < napm_bits) {
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
n = sprintf(bufpos, "%02lx.\n", apid);
bufpos += n;
nchars += n;
}
} else if (apqi1 < naqm_bits) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
+ for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
n = sprintf(bufpos, ".%04lx\n", apqi);
bufpos += n;
nchars += n;
}
}
- mutex_unlock(&matrix_dev->lock);
+ return nchars;
+}
+
+static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(matrix);
+static ssize_t guest_matrix_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t nchars;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(guest_matrix);
+
static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_assign_adapter.attr,
&dev_attr_unassign_adapter.attr,
@@ -1169,6 +1494,7 @@ static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_unassign_control_domain.attr,
&dev_attr_control_domains.attr,
&dev_attr_matrix.attr,
+ &dev_attr_guest_matrix.attr,
NULL,
};
@@ -1201,59 +1527,32 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
- mutex_lock(&kvm->lock);
- mutex_lock(&matrix_dev->lock);
+ get_update_locks_for_kvm(kvm);
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
if (m != matrix_mdev && m->kvm == kvm) {
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
return -EPERM;
}
}
kvm_get_kvm(kvm);
matrix_mdev->kvm = kvm;
- kvm_arch_crypto_set_masks(kvm,
- matrix_mdev->matrix.apm,
- matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.adm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
}
return 0;
}
-/**
- * vfio_ap_mdev_iommu_notifier - IOMMU notifier callback
- *
- * @nb: The notifier block
- * @action: Action to be taken
- * @data: data associated with the request
- *
- * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
- * pinned before). Other requests are ignored.
- *
- * Return: for an UNMAP request, NOFITY_OK; otherwise NOTIFY_DONE.
- */
-static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
+ u64 length)
{
- struct ap_matrix_mdev *matrix_mdev;
-
- matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
-
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
- unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
-
- vfio_unpin_pages(&matrix_mdev->vdev, &g_pfn, 1);
- return NOTIFY_OK;
- }
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
- return NOTIFY_DONE;
+ vfio_unpin_pages(&matrix_mdev->vdev, iova, 1);
}
/**
@@ -1271,36 +1570,36 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
kvm->arch.crypto.pqap_hook = NULL;
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
- mutex_lock(&kvm->lock);
- mutex_lock(&matrix_dev->lock);
+ get_update_locks_for_kvm(kvm);
kvm_arch_crypto_clear_masks(kvm);
- vfio_ap_mdev_reset_queues(matrix_mdev);
+ vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
kvm_put_kvm(kvm);
matrix_mdev->kvm = NULL;
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
}
}
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
{
- struct device *dev;
+ struct ap_queue *queue;
struct vfio_ap_queue *q = NULL;
- dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &apqn, match_apqn);
- if (dev) {
- q = dev_get_drvdata(dev);
- put_device(dev);
- }
+ queue = ap_get_qdev(apqn);
+ if (!queue)
+ return NULL;
+
+ if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver)
+ q = dev_get_drvdata(&queue->ap_dev.device);
+
+ put_device(&queue->ap_dev.device);
return q;
}
-int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
- unsigned int retry)
+static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+ unsigned int retry)
{
struct ap_queue_status status;
int ret;
@@ -1308,9 +1607,9 @@ int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
if (!q)
return 0;
-
retry_zapq:
status = ap_zapq(q->apqn);
+ q->reset_rc = status.response_code;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
ret = 0;
@@ -1325,12 +1624,17 @@ retry_zapq:
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
- WARN_ON_ONCE(status.irq_enabled);
+ WARN_ONCE(status.irq_enabled,
+ "PQAP/ZAPQ for %02x.%04x failed with rc=%u while IRQ enabled",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
+ status.response_code);
ret = -EBUSY;
goto free_resources;
default:
/* things are really broken, give up */
- WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
+ WARN(true,
+ "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
status.response_code);
return -EIO;
}
@@ -1342,7 +1646,8 @@ retry_zapq:
msleep(20);
status = ap_tapq(q->apqn, NULL);
}
- WARN_ON_ONCE(retry2 <= 0);
+ WARN_ONCE(retry2 <= 0, "unable to verify reset of queue %02x.%04x",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn));
free_resources:
vfio_ap_free_aqic_resources(q);
@@ -1350,27 +1655,20 @@ free_resources:
return ret;
}
-static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
+static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
{
- int ret;
- int rc = 0;
- unsigned long apid, apqi;
+ int ret, loop_cursor, rc = 0;
struct vfio_ap_queue *q;
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
- matrix_mdev->matrix.apm_max + 1) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.aqm_max + 1) {
- q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
- ret = vfio_ap_mdev_reset_queue(q, 1);
- /*
- * Regardless whether a queue turns out to be busy, or
- * is not operational, we need to continue resetting
- * the remaining queues.
- */
- if (ret)
- rc = ret;
- }
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ ret = vfio_ap_mdev_reset_queue(q, 1);
+ /*
+ * Regardless whether a queue turns out to be busy, or
+ * is not operational, we need to continue resetting
+ * the remaining queues.
+ */
+ if (ret)
+ rc = ret;
}
return rc;
@@ -1380,27 +1678,11 @@ static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
{
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
- unsigned long events;
- int ret;
if (!vdev->kvm)
return -EINVAL;
- ret = vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
- if (ret)
- return ret;
-
- matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
- events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY, &events,
- &matrix_mdev->iommu_notifier);
- if (ret)
- goto err_kvm;
- return 0;
-
-err_kvm:
- vfio_ap_mdev_unset_kvm(matrix_mdev);
- return ret;
+ return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
}
static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
@@ -1408,8 +1690,6 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY,
- &matrix_mdev->iommu_notifier);
vfio_ap_mdev_unset_kvm(matrix_mdev);
}
@@ -1440,27 +1720,84 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
container_of(vdev, struct ap_matrix_mdev, vdev);
int ret;
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
ret = vfio_ap_mdev_get_device_info(arg);
break;
case VFIO_DEVICE_RESET:
- ret = vfio_ap_mdev_reset_queues(matrix_mdev);
+ ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
break;
default:
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return ret;
}
+static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long apid = AP_QID_CARD(q->apqn);
+ unsigned long apqi = AP_QID_QUEUE(q->apqn);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
+ test_bit_inv(apqi, matrix_mdev->matrix.aqm))
+ return matrix_mdev;
+ }
+
+ return NULL;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars = 0;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+ struct ap_device *apdev = to_ap_dev(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ q = dev_get_drvdata(&apdev->device);
+ matrix_mdev = vfio_ap_mdev_for_queue(q);
+
+ if (matrix_mdev) {
+ if (matrix_mdev->kvm)
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_IN_USE);
+ else
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_ASSIGNED);
+ } else {
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_UNASSIGNED);
+ }
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+
+ return nchars;
+}
+
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *vfio_queue_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group vfio_queue_attr_group = {
+ .attrs = vfio_queue_attrs,
+};
+
static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
.open_device = vfio_ap_mdev_open_device,
.close_device = vfio_ap_mdev_close_device,
.ioctl = vfio_ap_mdev_ioctl,
+ .dma_unmap = vfio_ap_mdev_dma_unmap,
};
static struct mdev_driver vfio_ap_matrix_driver = {
@@ -1500,3 +1837,432 @@ void vfio_ap_mdev_unregister(void)
mdev_unregister_device(&matrix_dev->device);
mdev_unregister_driver(&vfio_ap_matrix_driver);
}
+
+int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+{
+ int ret;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ if (ret)
+ return ret;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ q->apqn = to_ap_queue(&apdev->device)->qid;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ matrix_mdev = get_update_locks_by_apqn(q->apqn);
+
+ if (matrix_mdev) {
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm,
+ matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+ dev_set_drvdata(&apdev->device, q);
+ release_update_locks_for_mdev(matrix_mdev);
+
+ return 0;
+}
+
+void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
+{
+ unsigned long apid, apqi;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ q = dev_get_drvdata(&apdev->device);
+ get_update_locks_for_queue(q);
+ matrix_mdev = q->matrix_mdev;
+
+ if (matrix_mdev) {
+ vfio_ap_unlink_queue_fr_mdev(q);
+
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+
+ /*
+ * If the queue is assigned to the guest's APCB, then remove
+ * the adapter's APID from the APCB and hot it into the guest.
+ */
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+ }
+
+ vfio_ap_mdev_reset_queue(q, 1);
+ dev_set_drvdata(&apdev->device, NULL);
+ kfree(q);
+ release_update_locks_for_mdev(matrix_mdev);
+}
+
+/**
+ * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is
+ * assigned to a mediated device under the control
+ * of the vfio_ap device driver.
+ *
+ * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check.
+ * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check.
+ *
+ * Return:
+ * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are
+ * assigned to a mediated device under the control of the vfio_ap
+ * device driver.
+ * * Otherwise, return 0.
+ */
+int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
+{
+ int ret;
+
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+
+ return ret;
+}
+
+/**
+ * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control
+ * domains that have been removed from the host's
+ * AP configuration from a guest.
+ *
+ * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest.
+ * @aprem: the adapters that have been removed from the host's AP configuration
+ * @aqrem: the domains that have been removed from the host's AP configuration
+ * @cdrem: the control domains that have been removed from the host's AP
+ * configuration.
+ */
+static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *aprem,
+ unsigned long *aqrem,
+ unsigned long *cdrem)
+{
+ int do_hotplug = 0;
+
+ if (!bitmap_empty(aprem, AP_DEVICES)) {
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm,
+ matrix_mdev->shadow_apcb.apm,
+ aprem, AP_DEVICES);
+ }
+
+ if (!bitmap_empty(aqrem, AP_DOMAINS)) {
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev->shadow_apcb.aqm,
+ aqrem, AP_DEVICES);
+ }
+
+ if (!bitmap_empty(cdrem, AP_DOMAINS))
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm,
+ matrix_mdev->shadow_apcb.adm,
+ cdrem, AP_DOMAINS);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+}
+
+/**
+ * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters,
+ * domains and control domains that have been removed
+ * from the host AP configuration and unplugs them
+ * from those guests.
+ *
+ * @ap_remove: bitmap specifying which adapters have been removed from the host
+ * config.
+ * @aq_remove: bitmap specifying which domains have been removed from the host
+ * config.
+ * @cd_remove: bitmap specifying which control domains have been removed from
+ * the host config.
+ */
+static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove,
+ unsigned long *aq_remove,
+ unsigned long *cd_remove)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ DECLARE_BITMAP(aprem, AP_DEVICES);
+ DECLARE_BITMAP(aqrem, AP_DOMAINS);
+ DECLARE_BITMAP(cdrem, AP_DOMAINS);
+ int do_remove = 0;
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ do_remove |= bitmap_and(aprem, ap_remove,
+ matrix_mdev->matrix.apm,
+ AP_DEVICES);
+ do_remove |= bitmap_and(aqrem, aq_remove,
+ matrix_mdev->matrix.aqm,
+ AP_DOMAINS);
+ do_remove |= bitmap_andnot(cdrem, cd_remove,
+ matrix_mdev->matrix.adm,
+ AP_DOMAINS);
+
+ if (do_remove)
+ vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem,
+ cdrem);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ }
+}
+
+/**
+ * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and
+ * control domains from the host AP configuration
+ * by unplugging them from the guests that are
+ * using them.
+ * @cur_config_info: the current host AP configuration information
+ * @prev_config_info: the previous host AP configuration information
+ */
+static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info,
+ struct ap_config_info *prev_config_info)
+{
+ int do_remove;
+ DECLARE_BITMAP(aprem, AP_DEVICES);
+ DECLARE_BITMAP(aqrem, AP_DOMAINS);
+ DECLARE_BITMAP(cdrem, AP_DOMAINS);
+
+ do_remove = bitmap_andnot(aprem,
+ (unsigned long *)prev_config_info->apm,
+ (unsigned long *)cur_config_info->apm,
+ AP_DEVICES);
+ do_remove |= bitmap_andnot(aqrem,
+ (unsigned long *)prev_config_info->aqm,
+ (unsigned long *)cur_config_info->aqm,
+ AP_DEVICES);
+ do_remove |= bitmap_andnot(cdrem,
+ (unsigned long *)prev_config_info->adm,
+ (unsigned long *)cur_config_info->adm,
+ AP_DEVICES);
+
+ if (do_remove)
+ vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem);
+}
+
+/**
+ * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that
+ * are older than AP type 10 (CEX4).
+ * @apm: a bitmap of the APIDs to examine
+ * @aqm: a bitmap of the APQIs of the queues to query for the AP type.
+ */
+static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
+{
+ bool apid_cleared;
+ struct ap_queue_status status;
+ unsigned long apid, apqi, info;
+ int qtype, qtype_mask = 0xff000000;
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ apid_cleared = false;
+
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info);
+ switch (status.response_code) {
+ /*
+ * According to the architecture in each case
+ * below, the queue's info should be filled.
+ */
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_BUSY:
+ qtype = info & qtype_mask;
+
+ /*
+ * The vfio_ap device driver only
+ * supports CEX4 and newer adapters, so
+ * remove the APID if the adapter is
+ * older than a CEX4.
+ */
+ if (qtype < AP_DEVICE_TYPE_CEX4) {
+ clear_bit_inv(apid, apm);
+ apid_cleared = true;
+ }
+
+ break;
+
+ default:
+ /*
+ * If we don't know the adapter type,
+ * clear its APID since it can't be
+ * determined whether the vfio_ap
+ * device driver supports it.
+ */
+ clear_bit_inv(apid, apm);
+ apid_cleared = true;
+ break;
+ }
+
+ /*
+ * If we've already cleared the APID from the apm, there
+ * is no need to continue examining the remainin AP
+ * queues to determine the type of the adapter.
+ */
+ if (apid_cleared)
+ continue;
+ }
+ }
+}
+
+/**
+ * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and
+ * control domains that have been added to the host's
+ * AP configuration for each matrix mdev to which they
+ * are assigned.
+ *
+ * @apm_add: a bitmap specifying the adapters that have been added to the AP
+ * configuration.
+ * @aqm_add: a bitmap specifying the domains that have been added to the AP
+ * configuration.
+ * @adm_add: a bitmap specifying the control domains that have been added to the
+ * AP configuration.
+ */
+static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add,
+ unsigned long *adm_add)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if (list_empty(&matrix_dev->mdev_list))
+ return;
+
+ vfio_ap_filter_apid_by_qtype(apm_add, aqm_add);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ bitmap_and(matrix_mdev->apm_add,
+ matrix_mdev->matrix.apm, apm_add, AP_DEVICES);
+ bitmap_and(matrix_mdev->aqm_add,
+ matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS);
+ bitmap_and(matrix_mdev->adm_add,
+ matrix_mdev->matrix.adm, adm_add, AP_DEVICES);
+ }
+}
+
+/**
+ * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and
+ * control domains to the host AP configuration
+ * by updating the bitmaps that specify what adapters,
+ * domains and control domains have been added so they
+ * can be hot plugged into the guest when the AP bus
+ * scan completes (see vfio_ap_on_scan_complete
+ * function).
+ * @cur_config_info: the current AP configuration information
+ * @prev_config_info: the previous AP configuration information
+ */
+static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info,
+ struct ap_config_info *prev_config_info)
+{
+ bool do_add;
+ DECLARE_BITMAP(apm_add, AP_DEVICES);
+ DECLARE_BITMAP(aqm_add, AP_DOMAINS);
+ DECLARE_BITMAP(adm_add, AP_DOMAINS);
+
+ do_add = bitmap_andnot(apm_add,
+ (unsigned long *)cur_config_info->apm,
+ (unsigned long *)prev_config_info->apm,
+ AP_DEVICES);
+ do_add |= bitmap_andnot(aqm_add,
+ (unsigned long *)cur_config_info->aqm,
+ (unsigned long *)prev_config_info->aqm,
+ AP_DOMAINS);
+ do_add |= bitmap_andnot(adm_add,
+ (unsigned long *)cur_config_info->adm,
+ (unsigned long *)prev_config_info->adm,
+ AP_DOMAINS);
+
+ if (do_add)
+ vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add);
+}
+
+/**
+ * vfio_ap_on_cfg_changed - handles notification of changes to the host AP
+ * configuration.
+ *
+ * @cur_cfg_info: the current host AP configuration
+ * @prev_cfg_info: the previous host AP configuration
+ */
+void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
+ struct ap_config_info *prev_cfg_info)
+{
+ if (!cur_cfg_info || !prev_cfg_info)
+ return;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info);
+ vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info);
+ memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info));
+
+ mutex_unlock(&matrix_dev->guests_lock);
+}
+
+static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
+{
+ bool do_hotplug = false;
+ int filter_domains = 0;
+ int filter_adapters = 0;
+ DECLARE_BITMAP(apm, AP_DEVICES);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
+
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
+ matrix_mdev->apm_add, AP_DEVICES);
+ filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
+ matrix_mdev->aqm_add, AP_DOMAINS);
+
+ if (filter_adapters && filter_domains)
+ do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
+ else if (filter_adapters)
+ do_hotplug |=
+ vfio_ap_mdev_filter_matrix(apm,
+ matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev);
+ else
+ do_hotplug |=
+ vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
+ aqm, matrix_mdev);
+
+ if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
+ AP_DOMAINS))
+ do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+}
+
+void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) &&
+ bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) &&
+ bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS))
+ continue;
+
+ vfio_ap_mdev_hot_plug_cfg(matrix_mdev);
+ bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES);
+ bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS);
+ bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS);
+ }
+
+ mutex_unlock(&matrix_dev->guests_lock);
+}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index a26efd804d0d..d782cf463eab 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/hashtable.h>
#include "ap_bus.h"
@@ -32,20 +33,26 @@
* @available_instances: number of mediated matrix devices that can be created
* @info: the struct containing the output from the PQAP(QCI) instruction
* @mdev_list: the list of mediated matrix devices created
- * @lock: mutex for locking the AP matrix device. This lock will be
+ * @mdevs_lock: mutex for locking the AP matrix device. This lock will be
* taken every time we fiddle with state managed by the vfio_ap
* driver, be it using @mdev_list or writing the state of a
* single ap_matrix_mdev device. It's quite coarse but we don't
* expect much contention.
* @vfio_ap_drv: the vfio_ap device driver
+ * @guests_lock: mutex for controlling access to a guest that is using AP
+ * devices passed through by the vfio_ap device driver. This lock
+ * will be taken when the AP devices are plugged into or unplugged
+ * from a guest, and when an ap_matrix_mdev device is added to or
+ * removed from @mdev_list or the list is iterated.
*/
struct ap_matrix_dev {
struct device device;
atomic_t available_instances;
struct ap_config_info info;
struct list_head mdev_list;
- struct mutex lock;
+ struct mutex mdevs_lock; /* serializes access to each ap_matrix_mdev */
struct ap_driver *vfio_ap_drv;
+ struct mutex guests_lock; /* serializes access to each KVM guest */
};
extern struct ap_matrix_dev *matrix_dev;
@@ -75,48 +82,77 @@ struct ap_matrix {
};
/**
+ * struct ap_queue_table - a table of queue objects.
+ *
+ * @queues: a hashtable of queues (struct vfio_ap_queue).
+ */
+struct ap_queue_table {
+ DECLARE_HASHTABLE(queues, 8);
+};
+
+/**
* struct ap_matrix_mdev - Contains the data associated with a matrix mediated
* device.
* @vdev: the vfio device
* @node: allows the ap_matrix_mdev struct to be added to a list
* @matrix: the adapters, usage domains and control domains assigned to the
* mediated matrix device.
- * @iommu_notifier: notifier block used for specifying callback function for
- * handling the VFIO_IOMMU_NOTIFY_DMA_UNMAP even
+ * @shadow_apcb: the shadow copy of the APCB field of the KVM guest's CRYCB
* @kvm: the struct holding guest's state
* @pqap_hook: the function pointer to the interception handler for the
* PQAP(AQIC) instruction.
* @mdev: the mediated device
+ * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev
+ * @apm_add: bitmap of APIDs added to the host's AP configuration
+ * @aqm_add: bitmap of APQIs added to the host's AP configuration
+ * @adm_add: bitmap of control domain numbers added to the host's AP
+ * configuration
*/
struct ap_matrix_mdev {
struct vfio_device vdev;
struct list_head node;
struct ap_matrix matrix;
- struct notifier_block iommu_notifier;
+ struct ap_matrix shadow_apcb;
struct kvm *kvm;
crypto_hook pqap_hook;
struct mdev_device *mdev;
+ struct ap_queue_table qtable;
+ DECLARE_BITMAP(apm_add, AP_DEVICES);
+ DECLARE_BITMAP(aqm_add, AP_DOMAINS);
+ DECLARE_BITMAP(adm_add, AP_DOMAINS);
};
/**
* struct vfio_ap_queue - contains the data associated with a queue bound to the
* vfio_ap device driver
* @matrix_mdev: the matrix mediated device
- * @saved_pfn: the guest PFN pinned for the guest
+ * @saved_iova: the notification indicator byte (nib) address
* @apqn: the APQN of the AP queue device
* @saved_isc: the guest ISC registered with the GIB interface
+ * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
+ * @reset_rc: the status response code from the last reset of the queue
*/
struct vfio_ap_queue {
struct ap_matrix_mdev *matrix_mdev;
- unsigned long saved_pfn;
+ dma_addr_t saved_iova;
int apqn;
#define VFIO_AP_ISC_INVALID 0xff
unsigned char saved_isc;
+ struct hlist_node mdev_qnode;
+ unsigned int reset_rc;
};
int vfio_ap_mdev_register(void);
void vfio_ap_mdev_unregister(void);
-int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
- unsigned int retry);
+
+int vfio_ap_mdev_probe_queue(struct ap_device *queue);
+void vfio_ap_mdev_remove_queue(struct ap_device *queue);
+
+int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm);
+
+void vfio_ap_on_cfg_changed(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
+void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 35d4b398c197..8bd9fd51208c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -763,6 +763,49 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
ipa_name, com, CARD_DEVID(card));
}
+static void qeth_default_link_info(struct qeth_card *card)
+{
+ struct qeth_link_info *link_info = &card->info.link_info;
+
+ QETH_CARD_TEXT(card, 2, "dftlinfo");
+ link_info->duplex = DUPLEX_FULL;
+
+ if (IS_IQD(card) || IS_VM_NIC(card)) {
+ link_info->speed = SPEED_10000;
+ link_info->port = PORT_FIBRE;
+ link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
+ } else {
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_FAST_ETH:
+ case QETH_LINK_TYPE_LANE_ETH100:
+ link_info->speed = SPEED_100;
+ link_info->port = PORT_TP;
+ break;
+ case QETH_LINK_TYPE_GBIT_ETH:
+ case QETH_LINK_TYPE_LANE_ETH1000:
+ link_info->speed = SPEED_1000;
+ link_info->port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ link_info->speed = SPEED_10000;
+ link_info->port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ link_info->speed = SPEED_25000;
+ link_info->port = PORT_FIBRE;
+ break;
+ default:
+ dev_info(&card->gdev->dev,
+ "Unknown link type %x\n",
+ card->info.link_type);
+ link_info->speed = SPEED_UNKNOWN;
+ link_info->port = PORT_OTHER;
+ }
+
+ link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
+ }
+}
+
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
@@ -790,6 +833,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
netdev_name(card->dev), card->info.chpid);
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
netif_carrier_off(card->dev);
+ qeth_default_link_info(card);
}
return NULL;
case IPA_CMD_STARTLAN:
@@ -4744,92 +4788,6 @@ out_free:
return rc;
}
-static int qeth_query_card_info_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
-{
- struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
- struct qeth_link_info *link_info = reply->param;
- struct qeth_query_card_info *card_info;
-
- QETH_CARD_TEXT(card, 2, "qcrdincb");
- if (qeth_setadpparms_inspect_rc(cmd))
- return -EIO;
-
- card_info = &cmd->data.setadapterparms.data.card_info;
- netdev_dbg(card->dev,
- "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
- card_info->card_type, card_info->port_mode,
- card_info->port_speed);
-
- switch (card_info->port_mode) {
- case CARD_INFO_PORTM_FULLDUPLEX:
- link_info->duplex = DUPLEX_FULL;
- break;
- case CARD_INFO_PORTM_HALFDUPLEX:
- link_info->duplex = DUPLEX_HALF;
- break;
- default:
- link_info->duplex = DUPLEX_UNKNOWN;
- }
-
- switch (card_info->card_type) {
- case CARD_INFO_TYPE_1G_COPPER_A:
- case CARD_INFO_TYPE_1G_COPPER_B:
- link_info->speed = SPEED_1000;
- link_info->port = PORT_TP;
- break;
- case CARD_INFO_TYPE_1G_FIBRE_A:
- case CARD_INFO_TYPE_1G_FIBRE_B:
- link_info->speed = SPEED_1000;
- link_info->port = PORT_FIBRE;
- break;
- case CARD_INFO_TYPE_10G_FIBRE_A:
- case CARD_INFO_TYPE_10G_FIBRE_B:
- link_info->speed = SPEED_10000;
- link_info->port = PORT_FIBRE;
- break;
- default:
- switch (card_info->port_speed) {
- case CARD_INFO_PORTS_10M:
- link_info->speed = SPEED_10;
- break;
- case CARD_INFO_PORTS_100M:
- link_info->speed = SPEED_100;
- break;
- case CARD_INFO_PORTS_1G:
- link_info->speed = SPEED_1000;
- break;
- case CARD_INFO_PORTS_10G:
- link_info->speed = SPEED_10000;
- break;
- case CARD_INFO_PORTS_25G:
- link_info->speed = SPEED_25000;
- break;
- default:
- link_info->speed = SPEED_UNKNOWN;
- }
-
- link_info->port = PORT_OTHER;
- }
-
- return 0;
-}
-
-int qeth_query_card_info(struct qeth_card *card,
- struct qeth_link_info *link_info)
-{
- struct qeth_cmd_buffer *iob;
-
- QETH_CARD_TEXT(card, 2, "qcrdinfo");
- if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
- return -EOPNOTSUPP;
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
- if (!iob)
- return -ENOMEM;
-
- return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
-}
-
static int qeth_init_link_info_oat_cb(struct qeth_card *card,
struct qeth_reply *reply_priv,
unsigned long data)
@@ -4839,6 +4797,7 @@ static int qeth_init_link_info_oat_cb(struct qeth_card *card,
struct qeth_query_oat_physical_if *phys_if;
struct qeth_query_oat_reply *reply;
+ QETH_CARD_TEXT(card, 2, "qoatincb");
if (qeth_setadpparms_inspect_rc(cmd))
return -EIO;
@@ -4918,41 +4877,7 @@ static int qeth_init_link_info_oat_cb(struct qeth_card *card,
static void qeth_init_link_info(struct qeth_card *card)
{
- card->info.link_info.duplex = DUPLEX_FULL;
-
- if (IS_IQD(card) || IS_VM_NIC(card)) {
- card->info.link_info.speed = SPEED_10000;
- card->info.link_info.port = PORT_FIBRE;
- card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
- } else {
- switch (card->info.link_type) {
- case QETH_LINK_TYPE_FAST_ETH:
- case QETH_LINK_TYPE_LANE_ETH100:
- card->info.link_info.speed = SPEED_100;
- card->info.link_info.port = PORT_TP;
- break;
- case QETH_LINK_TYPE_GBIT_ETH:
- case QETH_LINK_TYPE_LANE_ETH1000:
- card->info.link_info.speed = SPEED_1000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_10GBIT_ETH:
- card->info.link_info.speed = SPEED_10000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_25GBIT_ETH:
- card->info.link_info.speed = SPEED_25000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- default:
- dev_info(&card->gdev->dev, "Unknown link type %x\n",
- card->info.link_type);
- card->info.link_info.speed = SPEED_UNKNOWN;
- card->info.link_info.port = PORT_OTHER;
- }
-
- card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
- }
+ qeth_default_link_info(card);
/* Get more accurate data via QUERY OAT: */
if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
@@ -5461,6 +5386,7 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
qeth_clear_working_pool_list(card);
qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
+ qeth_default_link_info(card);
rc = qeth_stop_channel(&card->data);
rc2 = qeth_stop_channel(&card->write);
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index b0b36b2132fe..9eba0a32e9f9 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -428,8 +428,8 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct qeth_card *card = netdev->ml_priv;
- struct qeth_link_info link_info;
+ QETH_CARD_TEXT(card, 4, "ethtglks");
cmd->base.speed = card->info.link_info.speed;
cmd->base.duplex = card->info.link_info.duplex;
cmd->base.port = card->info.link_info.port;
@@ -439,16 +439,6 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
- /* Check if we can obtain more accurate information. */
- if (!qeth_query_card_info(card, &link_info)) {
- if (link_info.speed != SPEED_UNKNOWN)
- cmd->base.speed = link_info.speed;
- if (link_info.duplex != DUPLEX_UNKNOWN)
- cmd->base.duplex = link_info.duplex;
- if (link_info.port != PORT_OTHER)
- cmd->base.port = link_info.port;
- }
-
qeth_set_ethtool_link_modes(cmd, card->info.link_info.link_mode);
return 0;
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
index da55133da8fe..15c25fefe91a 100644
--- a/drivers/s390/scsi/zfcp_diag.h
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -2,7 +2,7 @@
/*
* zfcp device driver
*
- * Definitions for handling diagnostics in the the zfcp device driver.
+ * Definitions for handling diagnostics in the zfcp device driver.
*
* Copyright IBM Corp. 2018, 2020
*/
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 511bf8e0a436..b61acbb09be3 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -145,27 +145,33 @@ void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
+ int ret = -EIO;
+
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
- if (zfcp_fsf_open_wka_port(wka_port))
+ if (zfcp_fsf_open_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+ goto out;
+ }
}
- mutex_unlock(&wka_port->mutex);
-
- wait_event(wka_port->completion_wq,
+ wait_event(wka_port->opened,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
- return 0;
+ ret = 0;
+ goto out;
}
- return -EIO;
+out:
+ mutex_unlock(&wka_port->mutex);
+ return ret;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
@@ -181,9 +187,12 @@ static void zfcp_fc_wka_port_offline(struct work_struct *work)
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ goto out;
}
+ wait_event(wka_port->closed,
+ wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
out:
mutex_unlock(&wka_port->mutex);
}
@@ -193,13 +202,15 @@ static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
- schedule_delayed_work(&wka_port->work, HZ / 100);
+ queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work,
+ msecs_to_jiffies(10));
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
- init_waitqueue_head(&wka_port->completion_wq);
+ init_waitqueue_head(&wka_port->opened);
+ init_waitqueue_head(&wka_port->closed);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 8aaf409ce9cb..97755407ce1b 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -185,7 +185,8 @@ enum zfcp_fc_wka_status {
/**
* struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
* @adapter: Pointer to adapter structure this WKA port belongs to
- * @completion_wq: Wait for completion of open/close command
+ * @opened: Wait for completion of open command
+ * @closed: Wait for completion of close command
* @status: Current status of WKA port
* @refcount: Reference count to keep port open as long as it is in use
* @d_id: FC destination id or well-known-address
@@ -195,7 +196,8 @@ enum zfcp_fc_wka_status {
*/
struct zfcp_fc_wka_port {
struct zfcp_adapter *adapter;
- wait_queue_head_t completion_wq;
+ wait_queue_head_t opened;
+ wait_queue_head_t closed;
enum zfcp_fc_wka_status status;
atomic_t refcount;
u32 d_id;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4f1e4385ce58..19223b075568 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1907,7 +1907,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
out:
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->opened);
}
/**
@@ -1966,7 +1966,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
}
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->closed);
}
/**
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index dbf3e50444e6..cb67fa80fb12 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -672,7 +672,7 @@ ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n",
ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n",
atomic_read(&zfcp_sdev->status));
-struct attribute *zfcp_sdev_attrs[] = {
+static struct attribute *zfcp_sdev_attrs[] = {
&dev_attr_fcp_lun.attr,
&dev_attr_wwpn.attr,
&dev_attr_hba_id.attr,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index aa96f67dd0b1..a10dbe632ef9 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -532,6 +532,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = -ENOMEM;
goto out_err;
}
+
+ vq->num_max = info->num;
+
/* it may have been reduced */
info->num = virtqueue_get_vring_size(vq);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index a897c8f914cf..f2abffce2659 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2515,12 +2515,26 @@ static int blogic_resultcode(struct blogic_adapter *adapter,
return (hoststatus << 16) | tgt_status;
}
+/*
+ * turn the dma address from an inbox into a ccb pointer
+ * This is rather inefficient.
+ */
+static struct blogic_ccb *
+blogic_inbox_to_ccb(struct blogic_adapter *adapter, struct blogic_inbox *inbox)
+{
+ struct blogic_ccb *ccb;
+
+ for (ccb = adapter->all_ccbs; ccb; ccb = ccb->next_all)
+ if (inbox->ccb == ccb->dma_handle)
+ break;
+
+ return ccb;
+}
/*
blogic_scan_inbox scans the Incoming Mailboxes saving any
Incoming Mailbox entries for completion processing.
*/
-
static void blogic_scan_inbox(struct blogic_adapter *adapter)
{
/*
@@ -2540,17 +2554,14 @@ static void blogic_scan_inbox(struct blogic_adapter *adapter)
enum blogic_cmplt_code comp_code;
while ((comp_code = next_inbox->comp_code) != BLOGIC_INBOX_FREE) {
- /*
- We are only allowed to do this because we limit our
- architectures we run on to machines where bus_to_virt(
- actually works. There *needs* to be a dma_addr_to_virt()
- in the new PCI DMA mapping interface to replace
- bus_to_virt() or else this code is going to become very
- innefficient.
- */
- struct blogic_ccb *ccb =
- (struct blogic_ccb *) bus_to_virt(next_inbox->ccb);
- if (comp_code != BLOGIC_CMD_NOTFOUND) {
+ struct blogic_ccb *ccb = blogic_inbox_to_ccb(adapter, next_inbox);
+ if (!ccb) {
+ /*
+ * This should never happen, unless the CCB list is
+ * corrupted in memory.
+ */
+ blogic_warn("Could not find CCB for dma address %x\n", adapter, next_inbox->ccb);
+ } else if (comp_code != BLOGIC_CMD_NOTFOUND) {
if (ccb->status == BLOGIC_CCB_ACTIVE ||
ccb->status == BLOGIC_CCB_RESET) {
/*
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 90253208a72f..3d9c56ac8224 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1712,7 +1712,7 @@ static unsigned char FlashPoint_InterruptPending(void *pCurrCard)
static int FlashPoint_HandleInterrupt(void *pcard)
{
struct sccb *currSCCB;
- unsigned char thisCard, result, bm_status, bm_int_st;
+ unsigned char thisCard, result, bm_status;
unsigned short hp_int;
unsigned char i, target;
struct sccb_card *pCurrCard = pcard;
@@ -1723,7 +1723,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
MDISABLE_INT(ioport);
- if ((bm_int_st = RD_HARPOON(ioport + hp_int_status)) & EXT_STATUS_ON)
+ if (RD_HARPOON(ioport + hp_int_status) & EXT_STATUS_ON)
bm_status = RD_HARPOON(ioport + hp_ext_status) &
(unsigned char)BAD_EXT_STATUS;
else
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a9fe5152addd..955cb69a5418 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -458,17 +458,6 @@ config SCSI_MVUMI
To compile this driver as a module, choose M here: the
module will be called mvumi.
-config SCSI_DPT_I2O
- tristate "Adaptec I2O RAID support "
- depends on SCSI && PCI && VIRT_TO_BUS
- help
- This driver supports all of Adaptec's I2O based RAID controllers as
- well as the DPT SmartRaid V cards. This is an Adaptec maintained
- driver by Deanna Bonds. See <file:Documentation/scsi/dpti.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called dpt_i2o.
-
config SCSI_ADVANSYS
tristate "AdvanSys SCSI support"
depends on SCSI
@@ -513,7 +502,7 @@ config SCSI_HPTIOP
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
- depends on PCI && SCSI && VIRT_TO_BUS
+ depends on PCI && SCSI
help
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2ad3bc052531..f055bfd54a68 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -63,7 +63,6 @@ obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
-obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index cf703a1ecdda..74312400468b 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -24,8 +24,11 @@
struct a2091_hostdata {
struct WD33C93_hostdata wh;
struct a2091_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
static irqreturn_t a2091_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -45,15 +48,31 @@ static irqreturn_t a2091_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct a2091_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a2091_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(scsi_pointer->ptr);
+ dma_addr_t addr;
+
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
/* don't allow DMA if the physical address is bad */
if (addr & A2091_XFER_MASK) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
+
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
@@ -64,8 +83,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- /* get the physical address of the bounce buffer */
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+
+ /* will flush/invalidate cache for us */
+ addr = dma_map_single(hdata->dev, wh->dma_bounce_buffer,
+ wh->dma_bounce_len, DMA_DIR(dir_in));
+ /* can't map buffer; use PIO */
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map bounce buffer %p\n",
+ wh->dma_bounce_buffer);
+ return 1;
+ }
/* the bounce buffer may not be in the first 16M of physmem */
if (addr & A2091_XFER_MASK) {
@@ -76,11 +108,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
- scsi_pointer->this_residual);
- }
+ scsi_pointer->dma_handle = addr;
}
/* setup dma direction */
@@ -95,13 +123,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, scsi_pointer->this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, scsi_pointer->this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
+
/* start DMA */
regs->ST_DMA = 1;
@@ -142,6 +165,10 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* restore the CONTROL bits (minus the direction flag) */
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir)
@@ -178,6 +205,11 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wd33c93_regs wdregs;
struct a2091_hostdata *hdata;
+ if (dma_set_mask_and_coherent(&z->dev, DMA_BIT_MASK(24))) {
+ dev_warn(&z->dev, "cannot use 24 bit DMA\n");
+ return -ENODEV;
+ }
+
if (!request_mem_region(z->resource.start, 256, "wd33c93"))
return -EBUSY;
@@ -198,6 +230,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
+ hdata->dev = &z->dev;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index dd161885eed1..2c5cb1a02e86 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/page.h>
@@ -25,8 +26,11 @@
struct a3000_hostdata {
struct WD33C93_hostdata wh;
struct a3000_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
static irqreturn_t a3000_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -49,20 +53,38 @@ static irqreturn_t a3000_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct a3000_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a3000_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(scsi_pointer->ptr);
+ dma_addr_t addr;
+
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
/*
* if the physical address has the wrong alignment, or if
* physical address is bad, or if it is a write and at the
* end of a physical memory chunk, then allocate a bounce
* buffer
+ * MSch 20220629 - only wrong alignment tested - bounce
+ * buffer returned by kmalloc is guaranteed to be aligned
*/
if (addr & A3000_XFER_MASK) {
+ WARN_ONCE(1, "Invalid alignment for DMA!");
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
@@ -70,6 +92,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* can't allocate memory; use PIO */
if (!wh->dma_bounce_buffer) {
wh->dma_bounce_len = 0;
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
return 1;
}
@@ -79,7 +102,15 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
scsi_pointer->this_residual);
}
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev,
+ "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
}
/* setup dma direction */
@@ -94,13 +125,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, scsi_pointer->this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, scsi_pointer->this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
/* start DMA */
mb(); /* make sure setup is completed */
@@ -151,6 +176,10 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
mb(); /* make sure CNTR is updated before next IO */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (SCpnt) {
@@ -193,6 +222,11 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wd33c93_regs wdregs;
struct a3000_hostdata *hdata;
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_warn(&pdev->dev, "cannot use 32 bit DMA\n");
+ return -ENODEV;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
@@ -216,6 +250,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
+ hdata->dev = &pdev->dev;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 81462f4ddb90..4d4cb47b3846 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1050,7 +1050,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
vpdpage83data.type1.productid));
/* Convert to ascii based serial number.
- * The LSB is the the end.
+ * The LSB is the end.
*/
for (i = 0; i < 8; i++) {
u8 temp =
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 73506a459bf8..91d196f26b76 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -159,7 +159,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
flags |= OPEN_REQUIRED;
if ((dev->dev_type == SAS_SATA_DEV) ||
(dev->tproto & SAS_PROTOCOL_STP)) {
- struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
+ struct smp_rps_resp *rps_resp = &dev->sata_dev.rps_resp;
if (rps_resp->frame_type == SMP_RESPONSE &&
rps_resp->function == SMP_REPORT_PHY_SATA &&
rps_resp->result == SMP_RESP_FUNC_ACC) {
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 3bb0adefbe06..50a577ac3bb4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -231,6 +231,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
+completion_check:
/* check if we raced, task just got cleaned up under us */
spin_lock_bh(&session->back_lock);
if (!abrt_task || !abrt_task->sc) {
@@ -238,7 +239,13 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(abrt_task);
+ if (!iscsi_get_task(abrt_task)) {
+ spin_unlock(&session->back_lock);
+ /* We are just about to call iscsi_free_task so wait for it. */
+ udelay(5);
+ goto completion_check;
+ }
+
abrt_io_task = abrt_task->dd_data;
conn = abrt_task->conn;
beiscsi_conn = conn->dd_data;
@@ -323,7 +330,15 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ /*
+ * The task has completed in the driver and is
+ * completing in libiscsi. Just ignore it here. When we
+ * call iscsi_eh_device_reset, it will wait for us.
+ */
+ continue;
+ }
+
io_task = task->dd_data;
/* mark WRB invalid which have been not processed by FW yet */
if (is_chip_be2_be3r(phba)) {
@@ -5745,7 +5760,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
cancel_work_sync(&phba->sess_work);
beiscsi_iface_destroy_default(phba);
- iscsi_host_remove(phba->shost);
+ iscsi_host_remove(phba->shost, false);
beiscsi_disable_port(phba, 1);
/* after cancelling boot_work */
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 15fbd09baa94..a3c800e04a2e 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -909,7 +909,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
{
struct Scsi_Host *shost = hba->shost;
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 908854869864..7ab29eaec6f3 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -63,7 +63,7 @@ static int verbose = 1;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose,"be verbose (default: on)");
-static int debug = 0;
+static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"enable/disable debug messages, also prints more "
"detailed sense codes on scsi errors (default: off)");
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 4365d52c6430..af281e271f88 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -328,7 +328,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
chba = cdev->hbas[i];
if (chba) {
cdev->hbas[i] = NULL;
- iscsi_host_remove(chba->shost);
+ iscsi_host_remove(chba->shost, false);
pci_dev_put(cdev->pdev);
iscsi_host_free(chba->shost);
}
@@ -1455,7 +1455,7 @@ void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
if (conn) {
log_debug(1 << CXGBI_DBG_SOCK,
"csk 0x%p, cid %d.\n", csk, conn->id);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
}
EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 244fc27215dc..631eda2d467e 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -16,6 +16,7 @@
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <asm/xive.h>
#include <misc/ocxl.h>
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
deleted file mode 100644
index e1fbbf55c09d..000000000000
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ /dev/null
@@ -1,441 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _SCSI_I2O_H
-#define _SCSI_I2O_H
-
-/* I2O kernel space accessible structures/APIs
- *
- * (c) Copyright 1999, 2000 Red Hat Software
- *
- *************************************************************************
- *
- * This header file defined the I2O APIs/structures for use by
- * the I2O kernel modules.
- */
-
-#ifdef __KERNEL__ /* This file to be included by kernel only */
-
-#include <linux/i2o-dev.h>
-
-#include <linux/notifier.h>
-#include <linux/atomic.h>
-
-
-/*
- * Tunable parameters first
- */
-
-/* How many different OSM's are we allowing */
-#define MAX_I2O_MODULES 64
-
-#define I2O_EVT_CAPABILITY_OTHER 0x01
-#define I2O_EVT_CAPABILITY_CHANGED 0x02
-
-#define I2O_EVT_SENSOR_STATE_CHANGED 0x01
-
-//#ifdef __KERNEL__ /* ioctl stuff only thing exported to users */
-
-#define I2O_MAX_MANAGERS 4
-
-/*
- * I2O Interface Objects
- */
-
-#include <linux/wait.h>
-typedef wait_queue_head_t adpt_wait_queue_head_t;
-#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
-typedef wait_queue_entry_t adpt_wait_queue_entry_t;
-
-/*
- * message structures
- */
-
-struct i2o_message
-{
- u8 version_offset;
- u8 flags;
- u16 size;
- u32 target_tid:12;
- u32 init_tid:12;
- u32 function:8;
- u32 initiator_context;
- /* List follows */
-};
-
-struct adpt_device;
-struct _adpt_hba;
-struct i2o_device
-{
- struct i2o_device *next; /* Chain */
- struct i2o_device *prev;
-
- char dev_name[8]; /* linux /dev name if available */
- i2o_lct_entry lct_data;/* Device LCT information */
- u32 flags;
- struct proc_dir_entry* proc_entry; /* /proc dir */
- struct adpt_device *owner;
- struct _adpt_hba *controller; /* Controlling IOP */
-};
-
-/*
- * Each I2O controller has one of these objects
- */
-
-struct i2o_controller
-{
- char name[16];
- int unit;
- int type;
- int enabled;
-
- struct notifier_block *event_notifer; /* Events */
- atomic_t users;
- struct i2o_device *devices; /* I2O device chain */
- struct i2o_controller *next; /* Controller chain */
-
-};
-
-/*
- * I2O System table entry
- */
-struct i2o_sys_tbl_entry
-{
- u16 org_id;
- u16 reserved1;
- u32 iop_id:12;
- u32 reserved2:20;
- u16 seg_num:12;
- u16 i2o_version:4;
- u8 iop_state;
- u8 msg_type;
- u16 frame_size;
- u16 reserved3;
- u32 last_changed;
- u32 iop_capabilities;
- u32 inbound_low;
- u32 inbound_high;
-};
-
-struct i2o_sys_tbl
-{
- u8 num_entries;
- u8 version;
- u16 reserved1;
- u32 change_ind;
- u32 reserved2;
- u32 reserved3;
- struct i2o_sys_tbl_entry iops[];
-};
-
-/*
- * I2O classes / subclasses
- */
-
-/* Class ID and Code Assignments
- * (LCT.ClassID.Version field)
- */
-#define I2O_CLASS_VERSION_10 0x00
-#define I2O_CLASS_VERSION_11 0x01
-
-/* Class code names
- * (from v1.5 Table 6-1 Class Code Assignments.)
- */
-
-#define I2O_CLASS_EXECUTIVE 0x000
-#define I2O_CLASS_DDM 0x001
-#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
-#define I2O_CLASS_SEQUENTIAL_STORAGE 0x011
-#define I2O_CLASS_LAN 0x020
-#define I2O_CLASS_WAN 0x030
-#define I2O_CLASS_FIBRE_CHANNEL_PORT 0x040
-#define I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL 0x041
-#define I2O_CLASS_SCSI_PERIPHERAL 0x051
-#define I2O_CLASS_ATE_PORT 0x060
-#define I2O_CLASS_ATE_PERIPHERAL 0x061
-#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
-#define I2O_CLASS_FLOPPY_DEVICE 0x071
-#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
-#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
-#define I2O_CLASS_PEER_TRANSPORT 0x091
-
-/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes
- */
-
-#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
-
-/* Subclasses
- */
-
-#define I2O_SUBCLASS_i960 0x001
-#define I2O_SUBCLASS_HDM 0x020
-#define I2O_SUBCLASS_ISM 0x021
-
-/* Operation functions */
-
-#define I2O_PARAMS_FIELD_GET 0x0001
-#define I2O_PARAMS_LIST_GET 0x0002
-#define I2O_PARAMS_MORE_GET 0x0003
-#define I2O_PARAMS_SIZE_GET 0x0004
-#define I2O_PARAMS_TABLE_GET 0x0005
-#define I2O_PARAMS_FIELD_SET 0x0006
-#define I2O_PARAMS_LIST_SET 0x0007
-#define I2O_PARAMS_ROW_ADD 0x0008
-#define I2O_PARAMS_ROW_DELETE 0x0009
-#define I2O_PARAMS_TABLE_CLEAR 0x000A
-
-/*
- * I2O serial number conventions / formats
- * (circa v1.5)
- */
-
-#define I2O_SNFORMAT_UNKNOWN 0
-#define I2O_SNFORMAT_BINARY 1
-#define I2O_SNFORMAT_ASCII 2
-#define I2O_SNFORMAT_UNICODE 3
-#define I2O_SNFORMAT_LAN48_MAC 4
-#define I2O_SNFORMAT_WAN 5
-
-/* Plus new in v2.0 (Yellowstone pdf doc)
- */
-
-#define I2O_SNFORMAT_LAN64_MAC 6
-#define I2O_SNFORMAT_DDM 7
-#define I2O_SNFORMAT_IEEE_REG64 8
-#define I2O_SNFORMAT_IEEE_REG128 9
-#define I2O_SNFORMAT_UNKNOWN2 0xff
-
-/* Transaction Reply Lists (TRL) Control Word structure */
-
-#define TRL_SINGLE_FIXED_LENGTH 0x00
-#define TRL_SINGLE_VARIABLE_LENGTH 0x40
-#define TRL_MULTIPLE_FIXED_LENGTH 0x80
-
-/*
- * Messaging API values
- */
-
-#define I2O_CMD_ADAPTER_ASSIGN 0xB3
-#define I2O_CMD_ADAPTER_READ 0xB2
-#define I2O_CMD_ADAPTER_RELEASE 0xB5
-#define I2O_CMD_BIOS_INFO_SET 0xA5
-#define I2O_CMD_BOOT_DEVICE_SET 0xA7
-#define I2O_CMD_CONFIG_VALIDATE 0xBB
-#define I2O_CMD_CONN_SETUP 0xCA
-#define I2O_CMD_DDM_DESTROY 0xB1
-#define I2O_CMD_DDM_ENABLE 0xD5
-#define I2O_CMD_DDM_QUIESCE 0xC7
-#define I2O_CMD_DDM_RESET 0xD9
-#define I2O_CMD_DDM_SUSPEND 0xAF
-#define I2O_CMD_DEVICE_ASSIGN 0xB7
-#define I2O_CMD_DEVICE_RELEASE 0xB9
-#define I2O_CMD_HRT_GET 0xA8
-#define I2O_CMD_ADAPTER_CLEAR 0xBE
-#define I2O_CMD_ADAPTER_CONNECT 0xC9
-#define I2O_CMD_ADAPTER_RESET 0xBD
-#define I2O_CMD_LCT_NOTIFY 0xA2
-#define I2O_CMD_OUTBOUND_INIT 0xA1
-#define I2O_CMD_PATH_ENABLE 0xD3
-#define I2O_CMD_PATH_QUIESCE 0xC5
-#define I2O_CMD_PATH_RESET 0xD7
-#define I2O_CMD_STATIC_MF_CREATE 0xDD
-#define I2O_CMD_STATIC_MF_RELEASE 0xDF
-#define I2O_CMD_STATUS_GET 0xA0
-#define I2O_CMD_SW_DOWNLOAD 0xA9
-#define I2O_CMD_SW_UPLOAD 0xAB
-#define I2O_CMD_SW_REMOVE 0xAD
-#define I2O_CMD_SYS_ENABLE 0xD1
-#define I2O_CMD_SYS_MODIFY 0xC1
-#define I2O_CMD_SYS_QUIESCE 0xC3
-#define I2O_CMD_SYS_TAB_SET 0xA3
-
-#define I2O_CMD_UTIL_NOP 0x00
-#define I2O_CMD_UTIL_ABORT 0x01
-#define I2O_CMD_UTIL_CLAIM 0x09
-#define I2O_CMD_UTIL_RELEASE 0x0B
-#define I2O_CMD_UTIL_PARAMS_GET 0x06
-#define I2O_CMD_UTIL_PARAMS_SET 0x05
-#define I2O_CMD_UTIL_EVT_REGISTER 0x13
-#define I2O_CMD_UTIL_EVT_ACK 0x14
-#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
-#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
-#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
-#define I2O_CMD_UTIL_LOCK 0x17
-#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
-#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
-
-#define I2O_CMD_SCSI_EXEC 0x81
-#define I2O_CMD_SCSI_ABORT 0x83
-#define I2O_CMD_SCSI_BUSRESET 0x27
-
-#define I2O_CMD_BLOCK_READ 0x30
-#define I2O_CMD_BLOCK_WRITE 0x31
-#define I2O_CMD_BLOCK_CFLUSH 0x37
-#define I2O_CMD_BLOCK_MLOCK 0x49
-#define I2O_CMD_BLOCK_MUNLOCK 0x4B
-#define I2O_CMD_BLOCK_MMOUNT 0x41
-#define I2O_CMD_BLOCK_MEJECT 0x43
-
-#define I2O_PRIVATE_MSG 0xFF
-
-/*
- * Init Outbound Q status
- */
-
-#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01
-#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02
-#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03
-#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04
-
-/*
- * I2O Get Status State values
- */
-
-#define ADAPTER_STATE_INITIALIZING 0x01
-#define ADAPTER_STATE_RESET 0x02
-#define ADAPTER_STATE_HOLD 0x04
-#define ADAPTER_STATE_READY 0x05
-#define ADAPTER_STATE_OPERATIONAL 0x08
-#define ADAPTER_STATE_FAILED 0x10
-#define ADAPTER_STATE_FAULTED 0x11
-
-/* I2O API function return values */
-
-#define I2O_RTN_NO_ERROR 0
-#define I2O_RTN_NOT_INIT 1
-#define I2O_RTN_FREE_Q_EMPTY 2
-#define I2O_RTN_TCB_ERROR 3
-#define I2O_RTN_TRANSACTION_ERROR 4
-#define I2O_RTN_ADAPTER_ALREADY_INIT 5
-#define I2O_RTN_MALLOC_ERROR 6
-#define I2O_RTN_ADPTR_NOT_REGISTERED 7
-#define I2O_RTN_MSG_REPLY_TIMEOUT 8
-#define I2O_RTN_NO_STATUS 9
-#define I2O_RTN_NO_FIRM_VER 10
-#define I2O_RTN_NO_LINK_SPEED 11
-
-/* Reply message status defines for all messages */
-
-#define I2O_REPLY_STATUS_SUCCESS 0x00
-#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
-#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
-#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
-#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
-#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
-#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
-#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
-#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
-#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
-#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
-#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
-
-/* Status codes and Error Information for Parameter functions */
-
-#define I2O_PARAMS_STATUS_SUCCESS 0x00
-#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
-#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
-#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
-#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
-#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
-#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
-#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
-#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
-#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
-#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
-#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
-#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
-#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
-#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
-#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
-#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
-
-/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
- * messages: Table 3-2 Detailed Status Codes.*/
-
-#define I2O_DSC_SUCCESS 0x0000
-#define I2O_DSC_BAD_KEY 0x0002
-#define I2O_DSC_TCL_ERROR 0x0003
-#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
-#define I2O_DSC_NO_SUCH_PAGE 0x0005
-#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
-#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
-#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
-#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
-#define I2O_DSC_DEVICE_LOCKED 0x000B
-#define I2O_DSC_DEVICE_RESET 0x000C
-#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
-#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
-#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
-#define I2O_DSC_INVALID_OFFSET 0x0010
-#define I2O_DSC_INVALID_PARAMETER 0x0011
-#define I2O_DSC_INVALID_REQUEST 0x0012
-#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
-#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
-#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
-#define I2O_DSC_MISSING_PARAMETER 0x0016
-#define I2O_DSC_TIMEOUT 0x0017
-#define I2O_DSC_UNKNOWN_ERROR 0x0018
-#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
-#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
-#define I2O_DSC_DEVICE_BUSY 0x001B
-#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
-
-/* Device Claim Types */
-#define I2O_CLAIM_PRIMARY 0x01000000
-#define I2O_CLAIM_MANAGEMENT 0x02000000
-#define I2O_CLAIM_AUTHORIZED 0x03000000
-#define I2O_CLAIM_SECONDARY 0x04000000
-
-/* Message header defines for VersionOffset */
-#define I2OVER15 0x0001
-#define I2OVER20 0x0002
-/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */
-#define I2OVERSION I2OVER15
-#define SGL_OFFSET_0 I2OVERSION
-#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
-#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
-#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
-#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
-#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
-#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
-#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
-#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
-
-#define TRL_OFFSET_5 (0x0050 | I2OVERSION)
-#define TRL_OFFSET_6 (0x0060 | I2OVERSION)
-
- /* msg header defines for MsgFlags */
-#define MSG_STATIC 0x0100
-#define MSG_64BIT_CNTXT 0x0200
-#define MSG_MULTI_TRANS 0x1000
-#define MSG_FAIL 0x2000
-#define MSG_LAST 0x4000
-#define MSG_REPLY 0x8000
-
- /* minimum size msg */
-#define THREE_WORD_MSG_SIZE 0x00030000
-#define FOUR_WORD_MSG_SIZE 0x00040000
-#define FIVE_WORD_MSG_SIZE 0x00050000
-#define SIX_WORD_MSG_SIZE 0x00060000
-#define SEVEN_WORD_MSG_SIZE 0x00070000
-#define EIGHT_WORD_MSG_SIZE 0x00080000
-#define NINE_WORD_MSG_SIZE 0x00090000
-#define TEN_WORD_MSG_SIZE 0x000A0000
-#define I2O_MESSAGE_SIZE(x) ((x)<<16)
-
-
-/* Special TID Assignments */
-
-#define ADAPTER_TID 0
-#define HOST_TID 1
-
-#define MSG_FRAME_SIZE 128
-#define NMBR_MSG_FRAMES 128
-
-#define MSG_POOL_SIZE 16384
-
-#define I2O_POST_WAIT_OK 0
-#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
-
-
-#endif /* __KERNEL__ */
-
-#endif /* _SCSI_I2O_H */
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
deleted file mode 100644
index 25e9251f8c78..000000000000
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/***************************************************************************
- dpti_ioctl.h - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2001 by Adaptec
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-
-/***************************************************************************
- * This file is generated from osd_unix.h *
- * *************************************************************************/
-
-#ifndef _dpti_ioctl_h
-#define _dpti_ioctl_h
-
-// IOCTL interface commands
-
-#ifndef _IOWR
-# define _IOWR(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IOW
-# define _IOW(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IOR
-# define _IOR(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IO
-# define _IO(x,y) (((x)<<8)|y)
-#endif
-/* EATA PassThrough Command */
-#define EATAUSRCMD _IOWR('D',65,EATA_CP)
-/* Set Debug Level If Enabled */
-#define DPT_DEBUG _IOW('D',66,int)
-/* Get Signature Structure */
-#define DPT_SIGNATURE _IOR('D',67,dpt_sig_S)
-#if defined __bsdi__
-#define DPT_SIGNATURE_PACKED _IOR('D',67,dpt_sig_S_Packed)
-#endif
-/* Get Number Of DPT Adapters */
-#define DPT_NUMCTRLS _IOR('D',68,int)
-/* Get Adapter Info Structure */
-#define DPT_CTRLINFO _IOR('D',69,CtrlInfo)
-/* Get Statistics If Enabled */
-#define DPT_STATINFO _IO('D',70)
-/* Clear Stats If Enabled */
-#define DPT_CLRSTAT _IO('D',71)
-/* Get System Info Structure */
-#define DPT_SYSINFO _IOR('D',72,sysInfo_S)
-/* Set Timeout Value */
-#define DPT_TIMEOUT _IO('D',73)
-/* Get config Data */
-#define DPT_CONFIG _IO('D',74)
-/* Get Blink LED Code */
-#define DPT_BLINKLED _IOR('D',75,int)
-/* Get Statistical information (if available) */
-#define DPT_STATS_INFO _IOR('D',80,STATS_DATA)
-/* Clear the statistical information */
-#define DPT_STATS_CLEAR _IO('D',81)
-/* Get Performance metrics */
-#define DPT_PERF_INFO _IOR('D',82,dpt_perf_t)
-/* Send an I2O command */
-#define I2OUSRCMD _IO('D',76)
-/* Inform driver to re-acquire LCT information */
-#define I2ORESCANCMD _IO('D',77)
-/* Inform driver to reset adapter */
-#define I2ORESETCMD _IO('D',78)
-/* See if the target is mounted */
-#define DPT_TARGET_BUSY _IOR('D',79, TARGET_BUSY_T)
-
-
- /* Structure Returned From Get Controller Info */
-
-typedef struct {
- uCHAR state; /* Operational state */
- uCHAR id; /* Host adapter SCSI id */
- int vect; /* Interrupt vector number */
- int base; /* Base I/O address */
- int njobs; /* # of jobs sent to HA */
- int qdepth; /* Controller queue depth. */
- int wakebase; /* mpx wakeup base index. */
- uINT SGsize; /* Scatter/Gather list size. */
- unsigned heads; /* heads for drives on cntlr. */
- unsigned sectors; /* sectors for drives on cntlr. */
- uCHAR do_drive32; /* Flag for Above 16 MB Ability */
- uCHAR BusQuiet; /* SCSI Bus Quiet Flag */
- char idPAL[4]; /* 4 Bytes Of The ID Pal */
- uCHAR primary; /* 1 For Primary, 0 For Secondary */
- uCHAR eataVersion; /* EATA Version */
- uINT cpLength; /* EATA Command Packet Length */
- uINT spLength; /* EATA Status Packet Length */
- uCHAR drqNum; /* DRQ Index (0,5,6,7) */
- uCHAR flag1; /* EATA Flags 1 (Byte 9) */
- uCHAR flag2; /* EATA Flags 2 (Byte 30) */
-} CtrlInfo;
-
-typedef struct {
- uSHORT length; // Remaining length of this
- uSHORT drvrHBAnum; // Relative HBA # used by the driver
- uINT baseAddr; // Base I/O address
- uSHORT blinkState; // Blink LED state (0=Not in blink LED)
- uCHAR pciBusNum; // PCI Bus # (Optional)
- uCHAR pciDeviceNum; // PCI Device # (Optional)
- uSHORT hbaFlags; // Miscellaneous HBA flags
- uSHORT Interrupt; // Interrupt set for this device.
-# if (defined(_DPT_ARC))
- uINT baseLength;
- ADAPTER_OBJECT *AdapterObject;
- LARGE_INTEGER DmaLogicalAddress;
- PVOID DmaVirtualAddress;
- LARGE_INTEGER ReplyLogicalAddress;
- PVOID ReplyVirtualAddress;
-# else
- uINT reserved1; // Reserved for future expansion
- uINT reserved2; // Reserved for future expansion
- uINT reserved3; // Reserved for future expansion
-# endif
-} drvrHBAinfo_S;
-
-typedef struct TARGET_BUSY
-{
- uLONG channel;
- uLONG id;
- uLONG lun;
- uLONG isBusy;
-} TARGET_BUSY_T;
-
-#endif
-
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
deleted file mode 100644
index a6644b332b53..000000000000
--- a/drivers/scsi/dpt/dptsig.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/* BSDI dptsig.h,v 1.7 1998/06/03 19:15:00 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __DPTSIG_H_
-#define __DPTSIG_H_
-#ifdef _SINIX_ADDON
-#include "dpt.h"
-#endif
-/* DPT SIGNATURE SPEC AND HEADER FILE */
-/* Signature Version 1 (sorry no 'A') */
-
-/* to make sure we are talking the same size under all OS's */
-typedef unsigned char sigBYTE;
-typedef unsigned short sigWORD;
-typedef unsigned int sigINT;
-
-/*
- * use sigWORDLittleEndian for:
- * dsCapabilities
- * dsDeviceSupp
- * dsAdapterSupp
- * dsApplication
- * use sigLONGLittleEndian for:
- * dsOS
- * so that the sig can be standardised to Little Endian
- */
-#if (defined(_DPT_BIG_ENDIAN))
-# define sigWORDLittleEndian(x) ((((x)&0xFF)<<8)|(((x)>>8)&0xFF))
-# define sigLONGLittleEndian(x) \
- ((((x)&0xFF)<<24) | \
- (((x)&0xFF00)<<8) | \
- (((x)&0xFF0000L)>>8) | \
- (((x)&0xFF000000L)>>24))
-#else
-# define sigWORDLittleEndian(x) (x)
-# define sigLONGLittleEndian(x) (x)
-#endif
-
-/* must make sure the structure is not word or double-word aligned */
-/* --------------------------------------------------------------- */
-/* Borland will ignore the following pragma: */
-/* Word alignment is OFF by default. If in the, IDE make */
-/* sure that Options | Compiler | Code Generation | Word Alignment */
-/* is not checked. If using BCC, do not use the -a option. */
-
-#ifndef NO_PACK
-#if defined (_DPT_AIX)
-#pragma options align=packed
-#else
-#pragma pack(1)
-#endif /* aix */
-#endif
-/* For the Macintosh */
-#ifdef STRUCTALIGNMENTSUPPORTED
-#pragma options align=mac68k
-#endif
-
-
-/* Current Signature Version - sigBYTE dsSigVersion; */
-/* ------------------------------------------------------------------ */
-#define SIG_VERSION 1
-
-/* Processor Family - sigBYTE dsProcessorFamily; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-/* What type of processor the file is meant to run on. */
-/* This will let us know whether to read sigWORDs as high/low or low/high. */
-#define PROC_INTEL 0x00 /* Intel 80x86/ia64 */
-#define PROC_MOTOROLA 0x01 /* Motorola 68K */
-#define PROC_MIPS4000 0x02 /* MIPS RISC 4000 */
-#define PROC_ALPHA 0x03 /* DEC Alpha */
-#define PROC_POWERPC 0x04 /* IBM Power PC */
-#define PROC_i960 0x05 /* Intel i960 */
-#define PROC_ULTRASPARC 0x06 /* SPARC processor */
-
-/* Specific Minimim Processor - sigBYTE dsProcessor; FLAG BITS */
-/* ------------------------------------------------------------------ */
-/* Different bit definitions dependent on processor_family */
-
-/* PROC_INTEL: */
-#define PROC_8086 0x01 /* Intel 8086 */
-#define PROC_286 0x02 /* Intel 80286 */
-#define PROC_386 0x04 /* Intel 80386 */
-#define PROC_486 0x08 /* Intel 80486 */
-#define PROC_PENTIUM 0x10 /* Intel 586 aka P5 aka Pentium */
-#define PROC_SEXIUM 0x20 /* Intel 686 aka P6 aka Pentium Pro or MMX */
-#define PROC_IA64 0x40 /* Intel IA64 processor */
-
-/* PROC_i960: */
-#define PROC_960RX 0x01 /* Intel 80960RC/RD */
-#define PROC_960HX 0x02 /* Intel 80960HA/HD/HT */
-
-/* PROC_MOTOROLA: */
-#define PROC_68000 0x01 /* Motorola 68000 */
-#define PROC_68010 0x02 /* Motorola 68010 */
-#define PROC_68020 0x04 /* Motorola 68020 */
-#define PROC_68030 0x08 /* Motorola 68030 */
-#define PROC_68040 0x10 /* Motorola 68040 */
-
-/* PROC_POWERPC */
-#define PROC_PPC601 0x01 /* PowerPC 601 */
-#define PROC_PPC603 0x02 /* PowerPC 603 */
-#define PROC_PPC604 0x04 /* PowerPC 604 */
-
-/* PROC_MIPS4000: */
-#define PROC_R4000 0x01 /* MIPS R4000 */
-
-/* Filetype - sigBYTE dsFiletype; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-#define FT_EXECUTABLE 0 /* Executable Program */
-#define FT_SCRIPT 1 /* Script/Batch File??? */
-#define FT_HBADRVR 2 /* HBA Driver */
-#define FT_OTHERDRVR 3 /* Other Driver */
-#define FT_IFS 4 /* Installable Filesystem Driver */
-#define FT_ENGINE 5 /* DPT Engine */
-#define FT_COMPDRVR 6 /* Compressed Driver Disk */
-#define FT_LANGUAGE 7 /* Foreign Language file */
-#define FT_FIRMWARE 8 /* Downloadable or actual Firmware */
-#define FT_COMMMODL 9 /* Communications Module */
-#define FT_INT13 10 /* INT 13 style HBA Driver */
-#define FT_HELPFILE 11 /* Help file */
-#define FT_LOGGER 12 /* Event Logger */
-#define FT_INSTALL 13 /* An Install Program */
-#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */
-#define FT_RESOURCE 15 /* Storage Manager Resource File */
-#define FT_MODEM_DB 16 /* Storage Manager Modem Database */
-
-/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define FTF_DLL 0x01 /* Dynamic Link Library */
-#define FTF_NLM 0x02 /* Netware Loadable Module */
-#define FTF_OVERLAYS 0x04 /* Uses overlays */
-#define FTF_DEBUG 0x08 /* Debug version */
-#define FTF_TSR 0x10 /* TSR */
-#define FTF_SYS 0x20 /* DOS Loadable driver */
-#define FTF_PROTECTED 0x40 /* Runs in protected mode */
-#define FTF_APP_SPEC 0x80 /* Application Specific */
-#define FTF_ROM (FTF_SYS|FTF_TSR) /* Special Case */
-
-/* OEM - sigBYTE dsOEM; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-#define OEM_DPT 0 /* DPT */
-#define OEM_ATT 1 /* ATT */
-#define OEM_NEC 2 /* NEC */
-#define OEM_ALPHA 3 /* Alphatronix */
-#define OEM_AST 4 /* AST */
-#define OEM_OLIVETTI 5 /* Olivetti */
-#define OEM_SNI 6 /* Siemens/Nixdorf */
-#define OEM_SUN 7 /* SUN Microsystems */
-
-/* Operating System - sigLONG dsOS; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define OS_DOS 0x00000001 /* PC/MS-DOS */
-#define OS_WINDOWS 0x00000002 /* Microsoft Windows 3.x */
-#define OS_WINDOWS_NT 0x00000004 /* Microsoft Windows NT */
-#define OS_OS2M 0x00000008 /* OS/2 1.2.x,MS 1.3.0,IBM 1.3.x - Monolithic */
-#define OS_OS2L 0x00000010 /* Microsoft OS/2 1.301 - LADDR */
-#define OS_OS22x 0x00000020 /* IBM OS/2 2.x */
-#define OS_NW286 0x00000040 /* Novell NetWare 286 */
-#define OS_NW386 0x00000080 /* Novell NetWare 386 */
-#define OS_GEN_UNIX 0x00000100 /* Generic Unix */
-#define OS_SCO_UNIX 0x00000200 /* SCO Unix */
-#define OS_ATT_UNIX 0x00000400 /* ATT Unix */
-#define OS_UNIXWARE 0x00000800 /* USL Unix */
-#define OS_INT_UNIX 0x00001000 /* Interactive Unix */
-#define OS_SOLARIS 0x00002000 /* SunSoft Solaris */
-#define OS_QNX 0x00004000 /* QNX for Tom Moch */
-#define OS_NEXTSTEP 0x00008000 /* NeXTSTEP/OPENSTEP/MACH */
-#define OS_BANYAN 0x00010000 /* Banyan Vines */
-#define OS_OLIVETTI_UNIX 0x00020000/* Olivetti Unix */
-#define OS_MAC_OS 0x00040000 /* Mac OS */
-#define OS_WINDOWS_95 0x00080000 /* Microsoft Windows '95 */
-#define OS_NW4x 0x00100000 /* Novell Netware 4.x */
-#define OS_BSDI_UNIX 0x00200000 /* BSDi Unix BSD/OS 2.0 and up */
-#define OS_AIX_UNIX 0x00400000 /* AIX Unix */
-#define OS_FREE_BSD 0x00800000 /* FreeBSD Unix */
-#define OS_LINUX 0x01000000 /* Linux */
-#define OS_DGUX_UNIX 0x02000000 /* Data General Unix */
-#define OS_SINIX_N 0x04000000 /* SNI SINIX-N */
-#define OS_PLAN9 0x08000000 /* ATT Plan 9 */
-#define OS_TSX 0x10000000 /* SNH TSX-32 */
-
-#define OS_OTHER 0x80000000 /* Other */
-
-/* Capabilities - sigWORD dsCapabilities; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define CAP_RAID0 0x0001 /* RAID-0 */
-#define CAP_RAID1 0x0002 /* RAID-1 */
-#define CAP_RAID3 0x0004 /* RAID-3 */
-#define CAP_RAID5 0x0008 /* RAID-5 */
-#define CAP_SPAN 0x0010 /* Spanning */
-#define CAP_PASS 0x0020 /* Provides passthrough */
-#define CAP_OVERLAP 0x0040 /* Passthrough supports overlapped commands */
-#define CAP_ASPI 0x0080 /* Supports ASPI Command Requests */
-#define CAP_ABOVE16MB 0x0100 /* ISA Driver supports greater than 16MB */
-#define CAP_EXTEND 0x8000 /* Extended info appears after description */
-#ifdef SNI_MIPS
-#define CAP_CACHEMODE 0x1000 /* dpt_force_cache is set in driver */
-#endif
-
-/* Devices Supported - sigWORD dsDeviceSupp; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define DEV_DASD 0x0001 /* DASD (hard drives) */
-#define DEV_TAPE 0x0002 /* Tape drives */
-#define DEV_PRINTER 0x0004 /* Printers */
-#define DEV_PROC 0x0008 /* Processors */
-#define DEV_WORM 0x0010 /* WORM drives */
-#define DEV_CDROM 0x0020 /* CD-ROM drives */
-#define DEV_SCANNER 0x0040 /* Scanners */
-#define DEV_OPTICAL 0x0080 /* Optical Drives */
-#define DEV_JUKEBOX 0x0100 /* Jukebox */
-#define DEV_COMM 0x0200 /* Communications Devices */
-#define DEV_OTHER 0x0400 /* Other Devices */
-#define DEV_ALL 0xFFFF /* All SCSI Devices */
-
-/* Adapters Families Supported - sigWORD dsAdapterSupp; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define ADF_2001 0x0001 /* PM2001 */
-#define ADF_2012A 0x0002 /* PM2012A */
-#define ADF_PLUS_ISA 0x0004 /* PM2011,PM2021 */
-#define ADF_PLUS_EISA 0x0008 /* PM2012B,PM2022 */
-#define ADF_SC3_ISA 0x0010 /* PM2021 */
-#define ADF_SC3_EISA 0x0020 /* PM2022,PM2122, etc */
-#define ADF_SC3_PCI 0x0040 /* SmartCache III PCI */
-#define ADF_SC4_ISA 0x0080 /* SmartCache IV ISA */
-#define ADF_SC4_EISA 0x0100 /* SmartCache IV EISA */
-#define ADF_SC4_PCI 0x0200 /* SmartCache IV PCI */
-#define ADF_SC5_PCI 0x0400 /* Fifth Generation I2O products */
-/*
- * Combinations of products
- */
-#define ADF_ALL_2000 (ADF_2001|ADF_2012A)
-#define ADF_ALL_PLUS (ADF_PLUS_ISA|ADF_PLUS_EISA)
-#define ADF_ALL_SC3 (ADF_SC3_ISA|ADF_SC3_EISA|ADF_SC3_PCI)
-#define ADF_ALL_SC4 (ADF_SC4_ISA|ADF_SC4_EISA|ADF_SC4_PCI)
-#define ADF_ALL_SC5 (ADF_SC5_PCI)
-/* All EATA Cacheing Products */
-#define ADF_ALL_CACHE (ADF_ALL_PLUS|ADF_ALL_SC3|ADF_ALL_SC4)
-/* All EATA Bus Mastering Products */
-#define ADF_ALL_MASTER (ADF_2012A|ADF_ALL_CACHE)
-/* All EATA Adapter Products */
-#define ADF_ALL_EATA (ADF_2001|ADF_ALL_MASTER)
-#define ADF_ALL ADF_ALL_EATA
-
-/* Application - sigWORD dsApplication; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define APP_DPTMGR 0x0001 /* DPT Storage Manager */
-#define APP_ENGINE 0x0002 /* DPT Engine */
-#define APP_SYTOS 0x0004 /* Sytron Sytos Plus */
-#define APP_CHEYENNE 0x0008 /* Cheyenne ARCServe + ARCSolo */
-#define APP_MSCDEX 0x0010 /* Microsoft CD-ROM extensions */
-#define APP_NOVABACK 0x0020 /* NovaStor Novaback */
-#define APP_AIM 0x0040 /* Archive Information Manager */
-
-/* Requirements - sigBYTE dsRequirements; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define REQ_SMARTROM 0x01 /* Requires SmartROM to be present */
-#define REQ_DPTDDL 0x02 /* Requires DPTDDL.SYS to be loaded */
-#define REQ_HBA_DRIVER 0x04 /* Requires an HBA driver to be loaded */
-#define REQ_ASPI_TRAN 0x08 /* Requires an ASPI Transport Modules */
-#define REQ_ENGINE 0x10 /* Requires a DPT Engine to be loaded */
-#define REQ_COMM_ENG 0x20 /* Requires a DPT Communications Engine */
-
-/*
- * You may adjust dsDescription_size with an override to a value less than
- * 50 so that the structure allocates less real space.
- */
-#if (!defined(dsDescription_size))
-# define dsDescription_size 50
-#endif
-
-typedef struct dpt_sig {
- char dsSignature[6]; /* ALWAYS "dPtSiG" */
- sigBYTE dsSigVersion; /* signature version (currently 1) */
- sigBYTE dsProcessorFamily; /* what type of processor */
- sigBYTE dsProcessor; /* precise processor */
- sigBYTE dsFiletype; /* type of file */
- sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */
- sigBYTE dsOEM; /* OEM file was created for */
- sigINT dsOS; /* which Operating systems */
- sigWORD dsCapabilities; /* RAID levels, etc. */
- sigWORD dsDeviceSupp; /* Types of SCSI devices supported */
- sigWORD dsAdapterSupp; /* DPT adapter families supported */
- sigWORD dsApplication; /* applications file is for */
- sigBYTE dsRequirements; /* Other driver dependencies */
- sigBYTE dsVersion; /* 1 */
- sigBYTE dsRevision; /* 'J' */
- sigBYTE dsSubRevision; /* '9' ' ' if N/A */
- sigBYTE dsMonth; /* creation month */
- sigBYTE dsDay; /* creation day */
- sigBYTE dsYear; /* creation year since 1980 (1993=13) */
- /* description (NULL terminated) */
- char dsDescription[dsDescription_size];
-} dpt_sig_S;
-/* 32 bytes minimum - with no description. Put NULL at description[0] */
-/* 81 bytes maximum - with 49 character description plus NULL. */
-
-/* This line added at Roycroft's request */
-/* Microsoft's NT compiler gets confused if you do a pack and don't */
-/* restore it. */
-
-#ifndef NO_UNPACK
-#if defined (_DPT_AIX)
-#pragma options align=reset
-#elif defined (UNPACK_FOUR)
-#pragma pack(4)
-#else
-#pragma pack()
-#endif /* aix */
-#endif
-/* For the Macintosh */
-#ifdef STRUCTALIGNMENTSUPPORTED
-#pragma options align=reset
-#endif
-
-#endif
diff --git a/drivers/scsi/dpt/osd_defs.h b/drivers/scsi/dpt/osd_defs.h
deleted file mode 100644
index de3ae5722982..000000000000
--- a/drivers/scsi/dpt/osd_defs.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* BSDI osd_defs.h,v 1.4 1998/06/03 19:14:58 karels Exp */
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef _OSD_DEFS_H
-#define _OSD_DEFS_H
-
-/*File - OSD_DEFS.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains the OS dependent defines. This file is included
- *in osd_util.h and provides the OS specific defines for that file.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Doug Anderson
- *Date: 1/31/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Definitions - Defines & Constants ----------------------------------------- */
-
- /* Define the operating system */
-#if (defined(__linux__))
-# define _DPT_LINUX
-#elif (defined(__bsdi__))
-# define _DPT_BSDI
-#elif (defined(__FreeBSD__))
-# define _DPT_FREE_BSD
-#else
-# define _DPT_SCO
-#endif
-
-#if defined (ZIL_CURSES)
-#define _DPT_CURSES
-#else
-#define _DPT_MOTIF
-#endif
-
- /* Redefine 'far' to nothing - no far pointer type required in UNIX */
-#define far
-
- /* Define the mutually exclusive semaphore type */
-#define SEMAPHORE_T unsigned int *
- /* Define a handle to a DLL */
-#define DLL_HANDLE_T unsigned int *
-
-#endif
diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h
deleted file mode 100644
index b2613c2eaac7..000000000000
--- a/drivers/scsi/dpt/osd_util.h
+++ /dev/null
@@ -1,358 +0,0 @@
-/* BSDI osd_util.h,v 1.8 1998/06/03 19:14:58 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __OSD_UTIL_H
-#define __OSD_UTIL_H
-
-/*File - OSD_UTIL.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains defines and function prototypes that are
- *operating system dependent. The resources defined in this file
- *are not specific to any particular application.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Doug Anderson
- *Date: 1/7/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Definitions - Defines & Constants ----------------------------------------- */
-
-/*----------------------------- */
-/* Operating system selections: */
-/*----------------------------- */
-
-/*#define _DPT_MSDOS */
-/*#define _DPT_WIN_3X */
-/*#define _DPT_WIN_4X */
-/*#define _DPT_WIN_NT */
-/*#define _DPT_NETWARE */
-/*#define _DPT_OS2 */
-/*#define _DPT_SCO */
-/*#define _DPT_UNIXWARE */
-/*#define _DPT_SOLARIS */
-/*#define _DPT_NEXTSTEP */
-/*#define _DPT_BANYAN */
-
-/*-------------------------------- */
-/* Include the OS specific defines */
-/*-------------------------------- */
-
-/*#define OS_SELECTION From Above List */
-/*#define SEMAPHORE_T ??? */
-/*#define DLL_HANDLE_T ??? */
-
-#if (defined(KERNEL) && (defined(__FreeBSD__) || defined(__bsdi__)))
-# include "i386/isa/dpt_osd_defs.h"
-#else
-# include "osd_defs.h"
-#endif
-
-#ifndef DPT_UNALIGNED
- #define DPT_UNALIGNED
-#endif
-
-#ifndef DPT_EXPORT
- #define DPT_EXPORT
-#endif
-
-#ifndef DPT_IMPORT
- #define DPT_IMPORT
-#endif
-
-#ifndef DPT_RUNTIME_IMPORT
- #define DPT_RUNTIME_IMPORT DPT_IMPORT
-#endif
-
-/*--------------------- */
-/* OS dependent defines */
-/*--------------------- */
-
-#if defined (_DPT_MSDOS) || defined (_DPT_WIN_3X)
- #define _DPT_16_BIT
-#else
- #define _DPT_32_BIT
-#endif
-
-#if defined (_DPT_SCO) || defined (_DPT_UNIXWARE) || defined (_DPT_SOLARIS) || defined (_DPT_AIX) || defined (SNI_MIPS) || defined (_DPT_BSDI) || defined (_DPT_FREE_BSD) || defined(_DPT_LINUX)
- #define _DPT_UNIX
-#endif
-
-#if defined (_DPT_WIN_3x) || defined (_DPT_WIN_4X) || defined (_DPT_WIN_NT) \
- || defined (_DPT_OS2)
- #define _DPT_DLL_SUPPORT
-#endif
-
-#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X) && !defined (_DPT_NETWARE)
- #define _DPT_PREEMPTIVE
-#endif
-
-#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X)
- #define _DPT_MULTI_THREADED
-#endif
-
-#if !defined (_DPT_MSDOS)
- #define _DPT_MULTI_TASKING
-#endif
-
- /* These exist for platforms that */
- /* chunk when accessing mis-aligned */
- /* data */
-#if defined (SNI_MIPS) || defined (_DPT_SOLARIS)
- #if defined (_DPT_BIG_ENDIAN)
- #if !defined (_DPT_STRICT_ALIGN)
- #define _DPT_STRICT_ALIGN
- #endif
- #endif
-#endif
-
- /* Determine if in C or C++ mode */
-#ifdef __cplusplus
- #define _DPT_CPP
-#else
- #define _DPT_C
-#endif
-
-/*-------------------------------------------------------------------*/
-/* Under Solaris the compiler refuses to accept code like: */
-/* { {"DPT"}, 0, NULL .... }, */
-/* and complains about the {"DPT"} part by saying "cannot use { } */
-/* to initialize char*". */
-/* */
-/* By defining these ugly macros we can get around this and also */
-/* not have to copy and #ifdef large sections of code. I know that */
-/* these macros are *really* ugly, but they should help reduce */
-/* maintenance in the long run. */
-/* */
-/*-------------------------------------------------------------------*/
-#if !defined (DPTSQO)
- #if defined (_DPT_SOLARIS)
- #define DPTSQO
- #define DPTSQC
- #else
- #define DPTSQO {
- #define DPTSQC }
- #endif /* solaris */
-#endif /* DPTSQO */
-
-
-/*---------------------- */
-/* OS dependent typedefs */
-/*---------------------- */
-
-#if defined (_DPT_MSDOS) || defined (_DPT_SCO)
- #define BYTE unsigned char
- #define WORD unsigned short
-#endif
-
-#ifndef _DPT_TYPEDEFS
- #define _DPT_TYPEDEFS
- typedef unsigned char uCHAR;
- typedef unsigned short uSHORT;
- typedef unsigned int uINT;
- typedef unsigned long uLONG;
-
- typedef union {
- uCHAR u8[4];
- uSHORT u16[2];
- uLONG u32;
- } access_U;
-#endif
-
-#if !defined (NULL)
- #define NULL 0
-#endif
-
-
-/*Prototypes - function ----------------------------------------------------- */
-
-#ifdef __cplusplus
- extern "C" { /* Declare all these functions as "C" functions */
-#endif
-
-/*------------------------ */
-/* Byte reversal functions */
-/*------------------------ */
-
- /* Reverses the byte ordering of a 2 byte variable */
-#if (!defined(osdSwap2))
- uSHORT osdSwap2(DPT_UNALIGNED uSHORT *);
-#endif // !osdSwap2
-
- /* Reverses the byte ordering of a 4 byte variable and shifts left 8 bits */
-#if (!defined(osdSwap3))
- uLONG osdSwap3(DPT_UNALIGNED uLONG *);
-#endif // !osdSwap3
-
-
-#ifdef _DPT_NETWARE
- #include "novpass.h" /* For DPT_Bswapl() prototype */
- /* Inline the byte swap */
- #ifdef __cplusplus
- inline uLONG osdSwap4(uLONG *inLong) {
- return *inLong = DPT_Bswapl(*inLong);
- }
- #else
- #define osdSwap4(inLong) DPT_Bswapl(inLong)
- #endif // cplusplus
-#else
- /* Reverses the byte ordering of a 4 byte variable */
-# if (!defined(osdSwap4))
- uLONG osdSwap4(DPT_UNALIGNED uLONG *);
-# endif // !osdSwap4
-
- /* The following functions ALWAYS swap regardless of the *
- * presence of DPT_BIG_ENDIAN */
-
- uSHORT trueSwap2(DPT_UNALIGNED uSHORT *);
- uLONG trueSwap4(DPT_UNALIGNED uLONG *);
-
-#endif // netware
-
-
-/*-------------------------------------*
- * Network order swap functions *
- * *
- * These functions/macros will be used *
- * by the structure insert()/extract() *
- * functions. *
- *
- * We will enclose all structure *
- * portability modifications inside *
- * #ifdefs. When we are ready, we *
- * will #define DPT_PORTABLE to begin *
- * using the modifications. *
- *-------------------------------------*/
-uLONG netSwap4(uLONG val);
-
-#if defined (_DPT_BIG_ENDIAN)
-
-// for big-endian we need to swap
-
-#ifndef NET_SWAP_2
-#define NET_SWAP_2(x) (((x) >> 8) | ((x) << 8))
-#endif // NET_SWAP_2
-
-#ifndef NET_SWAP_4
-#define NET_SWAP_4(x) netSwap4((x))
-#endif // NET_SWAP_4
-
-#else
-
-// for little-endian we don't need to do anything
-
-#ifndef NET_SWAP_2
-#define NET_SWAP_2(x) (x)
-#endif // NET_SWAP_2
-
-#ifndef NET_SWAP_4
-#define NET_SWAP_4(x) (x)
-#endif // NET_SWAP_4
-
-#endif // big endian
-
-
-
-/*----------------------------------- */
-/* Run-time loadable module functions */
-/*----------------------------------- */
-
- /* Loads the specified run-time loadable DLL */
-DLL_HANDLE_T osdLoadModule(uCHAR *);
- /* Unloads the specified run-time loadable DLL */
-uSHORT osdUnloadModule(DLL_HANDLE_T);
- /* Returns a pointer to a function inside a run-time loadable DLL */
-void * osdGetFnAddr(DLL_HANDLE_T,uCHAR *);
-
-/*--------------------------------------- */
-/* Mutually exclusive semaphore functions */
-/*--------------------------------------- */
-
- /* Create a named semaphore */
-SEMAPHORE_T osdCreateNamedSemaphore(char *);
- /* Create a mutually exlusive semaphore */
-SEMAPHORE_T osdCreateSemaphore(void);
- /* create an event semaphore */
-SEMAPHORE_T osdCreateEventSemaphore(void);
- /* create a named event semaphore */
-SEMAPHORE_T osdCreateNamedEventSemaphore(char *);
-
- /* Destroy the specified mutually exclusive semaphore object */
-uSHORT osdDestroySemaphore(SEMAPHORE_T);
- /* Request access to the specified mutually exclusive semaphore */
-uLONG osdRequestSemaphore(SEMAPHORE_T,uLONG);
- /* Release access to the specified mutually exclusive semaphore */
-uSHORT osdReleaseSemaphore(SEMAPHORE_T);
- /* wait for a event to happen */
-uLONG osdWaitForEventSemaphore(SEMAPHORE_T, uLONG);
- /* signal an event */
-uLONG osdSignalEventSemaphore(SEMAPHORE_T);
- /* reset the event */
-uLONG osdResetEventSemaphore(SEMAPHORE_T);
-
-/*----------------- */
-/* Thread functions */
-/*----------------- */
-
- /* Releases control to the task switcher in non-preemptive */
- /* multitasking operating systems. */
-void osdSwitchThreads(void);
-
- /* Starts a thread function */
-uLONG osdStartThread(void *,void *);
-
-/* what is my thread id */
-uLONG osdGetThreadID(void);
-
-/* wakes up the specifed thread */
-void osdWakeThread(uLONG);
-
-/* osd sleep for x milliseconds */
-void osdSleep(uLONG);
-
-#define DPT_THREAD_PRIORITY_LOWEST 0x00
-#define DPT_THREAD_PRIORITY_NORMAL 0x01
-#define DPT_THREAD_PRIORITY_HIGHEST 0x02
-
-uCHAR osdSetThreadPriority(uLONG tid, uCHAR priority);
-
-#ifdef __cplusplus
- } /* end the xtern "C" declaration */
-#endif
-
-#endif /* osd_util_h */
diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h
deleted file mode 100644
index a4aa1c31ff72..000000000000
--- a/drivers/scsi/dpt/sys_info.h
+++ /dev/null
@@ -1,417 +0,0 @@
-/* BSDI sys_info.h,v 1.6 1998/06/03 19:14:59 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __SYS_INFO_H
-#define __SYS_INFO_H
-
-/*File - SYS_INFO.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains structure definitions for the OS dependent
- *layer system information buffers.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Don Kemper
- *Date: 5/10/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Include Files ------------------------------------------------------------- */
-
-#include "osd_util.h"
-
-#ifndef NO_PACK
-#if defined (_DPT_AIX)
-#pragma options align=packed
-#else
-#pragma pack(1)
-#endif /* aix */
-#endif // no unpack
-
-
-/*struct - driveParam_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the drive parameters seen during
- *booting.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct driveParam_S {
-#else
- typedef struct {
-#endif
-
- uSHORT cylinders; /* Up to 1024 */
- uCHAR heads; /* Up to 255 */
- uCHAR sectors; /* Up to 63 */
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } driveParam_S;
-#endif
-/*driveParam_S - end */
-
-
-/*struct - sysInfo_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the command system information that
- *should be returned by every OS dependent layer.
- *
- *---------------------------------------------------------------------------*/
-
-/*flags - bit definitions */
-#define SI_CMOS_Valid 0x0001
-#define SI_NumDrivesValid 0x0002
-#define SI_ProcessorValid 0x0004
-#define SI_MemorySizeValid 0x0008
-#define SI_DriveParamsValid 0x0010
-#define SI_SmartROMverValid 0x0020
-#define SI_OSversionValid 0x0040
-#define SI_OSspecificValid 0x0080 /* 1 if OS structure returned */
-#define SI_BusTypeValid 0x0100
-
-#define SI_ALL_VALID 0x0FFF /* All Std SysInfo is valid */
-#define SI_NO_SmartROM 0x8000
-
-/*busType - definitions */
-#define SI_ISA_BUS 0x00
-#define SI_MCA_BUS 0x01
-#define SI_EISA_BUS 0x02
-#define SI_PCI_BUS 0x04
-
-#ifdef __cplusplus
- struct sysInfo_S {
-#else
- typedef struct {
-#endif
-
- uCHAR drive0CMOS; /* CMOS Drive 0 Type */
- uCHAR drive1CMOS; /* CMOS Drive 1 Type */
- uCHAR numDrives; /* 0040:0075 contents */
- uCHAR processorFamily; /* Same as DPTSIG's definition */
- uCHAR processorType; /* Same as DPTSIG's definition */
- uCHAR smartROMMajorVersion;
- uCHAR smartROMMinorVersion; /* SmartROM version */
- uCHAR smartROMRevision;
- uSHORT flags; /* See bit definitions above */
- uSHORT conventionalMemSize; /* in KB */
- uINT extendedMemSize; /* in KB */
- uINT osType; /* Same as DPTSIG's definition */
- uCHAR osMajorVersion;
- uCHAR osMinorVersion; /* The OS version */
- uCHAR osRevision;
-#ifdef _SINIX_ADDON
- uCHAR busType; /* See defininitions above */
- uSHORT osSubRevision;
- uCHAR pad[2]; /* For alignment */
-#else
- uCHAR osSubRevision;
- uCHAR busType; /* See defininitions above */
- uCHAR pad[3]; /* For alignment */
-#endif
- driveParam_S drives[16]; /* SmartROM Logical Drives */
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } sysInfo_S;
-#endif
-/*sysInfo_S - end */
-
-
-/*struct - DOS_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *DOS workstation.
- *
- *---------------------------------------------------------------------------*/
-
-/*flags - bit definitions */
-#define DI_DOS_HIGH 0x01 /* DOS is loaded high */
-#define DI_DPMI_VALID 0x02 /* DPMI version is valid */
-
-#ifdef __cplusplus
- struct DOS_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR flags; /* See bit definitions above */
- uSHORT driverLocation; /* SmartROM BIOS address */
- uSHORT DOS_version;
- uSHORT DPMI_version;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } DOS_Info_S;
-#endif
-/*DOS_Info_S - end */
-
-
-/*struct - Netware_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *Netware machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct Netware_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR driverName[13]; /* ie PM12NW31.DSK */
- uCHAR serverName[48];
- uCHAR netwareVersion; /* The Netware OS version */
- uCHAR netwareSubVersion;
- uCHAR netwareRevision;
- uSHORT maxConnections; /* Probably 250 or 1000 */
- uSHORT connectionsInUse;
- uSHORT maxVolumes;
- uCHAR unused;
- uCHAR SFTlevel;
- uCHAR TTSlevel;
-
- uCHAR clibMajorVersion; /* The CLIB.NLM version */
- uCHAR clibMinorVersion;
- uCHAR clibRevision;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } Netware_Info_S;
-#endif
-/*Netware_Info_S - end */
-
-
-/*struct - OS2_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to an
- *OS/2 machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct OS2_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } OS2_Info_S;
-#endif
-/*OS2_Info_S - end */
-
-
-/*struct - WinNT_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *Windows NT machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct WinNT_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } WinNT_Info_S;
-#endif
-/*WinNT_Info_S - end */
-
-
-/*struct - SCO_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to an
- *SCO UNIX machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct SCO_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } SCO_Info_S;
-#endif
-/*SCO_Info_S - end */
-
-
-/*struct - USL_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *USL UNIX machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct USL_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } USL_Info_S;
-#endif
-/*USL_Info_S - end */
-
-
- /* Restore default structure packing */
-#ifndef NO_UNPACK
-#if defined (_DPT_AIX)
-#pragma options align=reset
-#elif defined (UNPACK_FOUR)
-#pragma pack(4)
-#else
-#pragma pack()
-#endif /* aix */
-#endif // no unpack
-
-#endif // __SYS_INFO_H
-
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
deleted file mode 100644
index 2e9155ba7408..000000000000
--- a/drivers/scsi/dpt_i2o.c
+++ /dev/null
@@ -1,3545 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/***************************************************************************
- dpti.c - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2000 by Adaptec
-
- July 30, 2001 First version being submitted
- for inclusion in the kernel. V2.4
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-/***************************************************************************
- * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
- - Support 2.6 kernel and DMA-mapping
- - ioctl fix for raid tools
- - use schedule_timeout in long long loop
- **************************************************************************/
-
-/*#define DEBUG 1 */
-/*#define UARTDELAY 1 */
-
-#include <linux/module.h>
-#include <linux/pgtable.h>
-
-MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
-MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
-
-////////////////////////////////////////////////////////////////
-
-#include <linux/ioctl.h> /* For SCSI-Passthrough */
-#include <linux/uaccess.h>
-
-#include <linux/stat.h>
-#include <linux/slab.h> /* for kmalloc() */
-#include <linux/pci.h> /* for PCI support */
-#include <linux/proc_fs.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h> /* for udelay */
-#include <linux/interrupt.h>
-#include <linux/kernel.h> /* for printk */
-#include <linux/sched.h>
-#include <linux/reboot.h>
-#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/mutex.h>
-
-#include <asm/processor.h> /* for boot_cpu_data */
-#include <asm/io.h> /* for virt_to_bus, etc. */
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-
-#include "dpt/dptsig.h"
-#include "dpti.h"
-
-/*============================================================================
- * Create a binary signature - this is read by dptsig
- * Needed for our management apps
- *============================================================================
- */
-static DEFINE_MUTEX(adpt_mutex);
-static dpt_sig_S DPTI_sig = {
- {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
-#ifdef __i386__
- PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
-#elif defined(__ia64__)
- PROC_INTEL, PROC_IA64,
-#elif defined(__sparc__)
- PROC_ULTRASPARC, PROC_ULTRASPARC,
-#elif defined(__alpha__)
- PROC_ALPHA, PROC_ALPHA,
-#else
- (-1),(-1),
-#endif
- FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
- ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
- DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
-};
-
-
-
-
-/*============================================================================
- * Globals
- *============================================================================
- */
-
-static DEFINE_MUTEX(adpt_configuration_lock);
-
-static struct i2o_sys_tbl *sys_tbl;
-static dma_addr_t sys_tbl_pa;
-static int sys_tbl_ind;
-static int sys_tbl_len;
-
-static adpt_hba* hba_chain = NULL;
-static int hba_count = 0;
-
-static struct class *adpt_sysfs_class;
-
-static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-#ifdef CONFIG_COMPAT
-static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
-#endif
-
-static const struct file_operations adpt_fops = {
- .unlocked_ioctl = adpt_unlocked_ioctl,
- .open = adpt_open,
- .release = adpt_close,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_adpt_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
-/* Structures and definitions for synchronous message posting.
- * See adpt_i2o_post_wait() for description
- * */
-struct adpt_i2o_post_wait_data
-{
- int status;
- u32 id;
- adpt_wait_queue_head_t *wq;
- struct adpt_i2o_post_wait_data *next;
-};
-
-static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
-static u32 adpt_post_wait_id = 0;
-static DEFINE_SPINLOCK(adpt_post_wait_lock);
-
-
-/*============================================================================
- * Functions
- *============================================================================
- */
-
-static inline int dpt_dma64(adpt_hba *pHba)
-{
- return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
-}
-
-static inline u32 dma_high(dma_addr_t addr)
-{
- return upper_32_bits(addr);
-}
-
-static inline u32 dma_low(dma_addr_t addr)
-{
- return (u32)addr;
-}
-
-static u8 adpt_read_blink_led(adpt_hba* host)
-{
- if (host->FwDebugBLEDflag_P) {
- if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
- return readb(host->FwDebugBLEDvalue_P);
- }
- }
- return 0;
-}
-
-/*============================================================================
- * Scsi host template interface functions
- *============================================================================
- */
-
-#ifdef MODULE
-static struct pci_device_id dptids[] = {
- { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- { 0, }
-};
-#endif
-
-MODULE_DEVICE_TABLE(pci,dptids);
-
-static int adpt_detect(struct scsi_host_template* sht)
-{
- struct pci_dev *pDev = NULL;
- adpt_hba *pHba;
- adpt_hba *next;
-
- PINFO("Detecting Adaptec I2O RAID controllers...\n");
-
- /* search for all Adatpec I2O RAID cards */
- while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
- if(pDev->device == PCI_DPT_DEVICE_ID ||
- pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
- if(adpt_install_hba(sht, pDev) ){
- PERROR("Could not Init an I2O RAID device\n");
- PERROR("Will not try to detect others.\n");
- return hba_count-1;
- }
- pci_dev_get(pDev);
- }
- }
-
- /* In INIT state, Activate IOPs */
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- // Activate does get status , init outbound, and get hrt
- if (adpt_i2o_activate_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
- }
- }
-
-
- /* Active IOPs in HOLD state */
-
-rebuild_sys_tab:
- if (hba_chain == NULL)
- return 0;
-
- /*
- * If build_sys_table fails, we kill everything and bail
- * as we can't init the IOPs w/o a system table
- */
- if (adpt_i2o_build_sys_table() < 0) {
- adpt_i2o_sys_shutdown();
- return 0;
- }
-
- PDEBUG("HBA's in HOLD state\n");
-
- /* If IOP don't get online, we need to rebuild the System table */
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (adpt_i2o_online_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
- goto rebuild_sys_tab;
- }
- }
-
- /* Active IOPs now in OPERATIONAL state */
- PDEBUG("HBA's in OPERATIONAL state\n");
-
- printk("dpti: If you have a lot of devices this could take a few minutes.\n");
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
- if (adpt_i2o_lct_get(pHba) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
-
- if (adpt_i2o_parse_lct(pHba) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
- adpt_inquiry(pHba);
- }
-
- adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
- if (IS_ERR(adpt_sysfs_class)) {
- printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
- adpt_sysfs_class = NULL;
- }
-
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- if (adpt_scsi_host_alloc(pHba, sht) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
- pHba->initialized = TRUE;
- pHba->state &= ~DPTI_STATE_RESET;
- if (adpt_sysfs_class) {
- struct device *dev = device_create(adpt_sysfs_class,
- NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
- "dpti%d", pHba->unit);
- if (IS_ERR(dev)) {
- printk(KERN_WARNING"dpti%d: unable to "
- "create device in dpt_i2o class\n",
- pHba->unit);
- }
- }
- }
-
- // Register our control device node
- // nodes will need to be created in /dev to access this
- // the nodes can not be created from within the driver
- if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
- adpt_i2o_sys_shutdown();
- return 0;
- }
- return hba_count;
-}
-
-
-static void adpt_release(adpt_hba *pHba)
-{
- struct Scsi_Host *shost = pHba->host;
-
- scsi_remove_host(shost);
-// adpt_i2o_quiesce_hba(pHba);
- adpt_i2o_delete_hba(pHba);
- scsi_host_put(shost);
-}
-
-
-static void adpt_inquiry(adpt_hba* pHba)
-{
- u32 msg[17];
- u32 *mptr;
- u32 *lenptr;
- int direction;
- int scsidir;
- u32 len;
- u32 reqlen;
- u8* buf;
- dma_addr_t addr;
- u8 scb[16];
- s32 rcode;
-
- memset(msg, 0, sizeof(msg));
- buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
- if(!buf){
- printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
- return;
- }
- memset((void*)buf, 0, 36);
-
- len = 36;
- direction = 0x00000000;
- scsidir =0x40000000; // DATA IN (iop<--dev)
-
- if (dpt_dma64(pHba))
- reqlen = 17; // SINGLE SGE, 64 bit
- else
- reqlen = 14; // SINGLE SGE, 32 bit
- /* Stick the headers on */
- msg[0] = reqlen<<16 | SGL_OFFSET_12;
- msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
- msg[2] = 0;
- msg[3] = 0;
- // Adaptec/DPT Private stuff
- msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
- msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
- /* Direction, disconnect ok | sense data | simple queue , CDBLen */
- // I2O_SCB_FLAG_ENABLE_DISCONNECT |
- // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
- // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
- msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
-
- mptr=msg+7;
-
- memset(scb, 0, sizeof(scb));
- // Write SCSI command into the message - always 16 byte block
- scb[0] = INQUIRY;
- scb[1] = 0;
- scb[2] = 0;
- scb[3] = 0;
- scb[4] = 36;
- scb[5] = 0;
- // Don't care about the rest of scb
-
- memcpy(mptr, scb, sizeof(scb));
- mptr+=4;
- lenptr=mptr++; /* Remember me - fill in when we know */
-
- /* Now fill in the SGList and command */
- *lenptr = len;
- if (dpt_dma64(pHba)) {
- *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
- *mptr++ = 1 << PAGE_SHIFT;
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = dma_low(addr);
- *mptr++ = dma_high(addr);
- } else {
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = addr;
- }
-
- // Send it on it's way
- rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
- if (rcode != 0) {
- sprintf(pHba->detail, "Adaptec I2O RAID");
- printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
- if (rcode != -ETIME && rcode != -EINTR)
- dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
- } else {
- memset(pHba->detail, 0, sizeof(pHba->detail));
- memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
- memcpy(&(pHba->detail[16]), " Model: ", 8);
- memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
- memcpy(&(pHba->detail[40]), " FW: ", 4);
- memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
- pHba->detail[48] = '\0'; /* precautionary */
- dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
- }
- adpt_i2o_status_get(pHba);
- return ;
-}
-
-
-static int adpt_slave_configure(struct scsi_device * device)
-{
- struct Scsi_Host *host = device->host;
-
- if (host->can_queue && device->tagged_supported) {
- scsi_change_queue_depth(device,
- host->can_queue - 1);
- }
- return 0;
-}
-
-static int adpt_queue_lck(struct scsi_cmnd *cmd)
-{
- adpt_hba* pHba = NULL;
- struct adpt_device* pDev = NULL; /* dpt per device information */
-
- /*
- * SCSI REQUEST_SENSE commands will be executed automatically by the
- * Host Adapter for any errors, so they should not be executed
- * explicitly unless the Sense Data is zero indicating that no error
- * occurred.
- */
-
- if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
- cmd->result = (DID_OK << 16);
- scsi_done(cmd);
- return 0;
- }
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- if (!pHba) {
- return FAILED;
- }
-
- rmb();
- if ((pHba->state) & DPTI_STATE_RESET)
- return SCSI_MLQUEUE_HOST_BUSY;
-
- // TODO if the cmd->device if offline then I may need to issue a bus rescan
- // followed by a get_lct to see if the device is there anymore
- if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
- /*
- * First command request for this device. Set up a pointer
- * to the device structure. This should be a TEST_UNIT_READY
- * command from scan_scsis_single.
- */
- if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
- // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
- // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
- cmd->result = (DID_NO_CONNECT << 16);
- scsi_done(cmd);
- return 0;
- }
- cmd->device->hostdata = pDev;
- }
- pDev->pScsi_dev = cmd->device;
-
- /*
- * If we are being called from when the device is being reset,
- * delay processing of the command until later.
- */
- if (pDev->state & DPTI_DEV_RESET ) {
- return FAILED;
- }
- return adpt_scsi_to_i2o(pHba, cmd, pDev);
-}
-
-static DEF_SCSI_QCMD(adpt_queue)
-
-static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
- sector_t capacity, int geom[])
-{
- int heads=-1;
- int sectors=-1;
- int cylinders=-1;
-
- // *** First lets set the default geometry ****
-
- // If the capacity is less than ox2000
- if (capacity < 0x2000 ) { // floppy
- heads = 18;
- sectors = 2;
- }
- // else if between 0x2000 and 0x20000
- else if (capacity < 0x20000) {
- heads = 64;
- sectors = 32;
- }
- // else if between 0x20000 and 0x40000
- else if (capacity < 0x40000) {
- heads = 65;
- sectors = 63;
- }
- // else if between 0x4000 and 0x80000
- else if (capacity < 0x80000) {
- heads = 128;
- sectors = 63;
- }
- // else if greater than 0x80000
- else {
- heads = 255;
- sectors = 63;
- }
- cylinders = sector_div(capacity, heads * sectors);
-
- // Special case if CDROM
- if(sdev->type == 5) { // CDROM
- heads = 252;
- sectors = 63;
- cylinders = 1111;
- }
-
- geom[0] = heads;
- geom[1] = sectors;
- geom[2] = cylinders;
-
- PDEBUG("adpt_bios_param: exit\n");
- return 0;
-}
-
-
-static const char *adpt_info(struct Scsi_Host *host)
-{
- adpt_hba* pHba;
-
- pHba = (adpt_hba *) host->hostdata[0];
- return (char *) (pHba->detail);
-}
-
-static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
-{
- struct adpt_device* d;
- int id;
- int chan;
- adpt_hba* pHba;
- int unit;
-
- // Find HBA (host bus adapter) we are looking for
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->host == host) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if (pHba == NULL) {
- return 0;
- }
- host = pHba->host;
-
- seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
- seq_printf(m, "%s\n", pHba->detail);
- seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
- pHba->host->host_no, pHba->name, host->irq);
- seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
- host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
-
- seq_puts(m, "Devices:\n");
- for(chan = 0; chan < MAX_CHANNEL; chan++) {
- for(id = 0; id < MAX_ID; id++) {
- d = pHba->channel[chan].device[id];
- while(d) {
- seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
- seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
-
- unit = d->pI2o_dev->lct_data.tid;
- seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
- unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
- scsi_device_online(d->pScsi_dev)? "online":"offline");
- d = d->next_lun;
- }
- }
- }
- return 0;
-}
-
-/*
- * Turn a pointer to ioctl reply data into an u32 'context'
- */
-static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
-{
-#if BITS_PER_LONG == 32
- return (u32)(unsigned long)reply;
-#else
- ulong flags = 0;
- u32 nr, i;
-
- spin_lock_irqsave(pHba->host->host_lock, flags);
- nr = ARRAY_SIZE(pHba->ioctl_reply_context);
- for (i = 0; i < nr; i++) {
- if (pHba->ioctl_reply_context[i] == NULL) {
- pHba->ioctl_reply_context[i] = reply;
- break;
- }
- }
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- if (i >= nr) {
- printk(KERN_WARNING"%s: Too many outstanding "
- "ioctl commands\n", pHba->name);
- return (u32)-1;
- }
-
- return i;
-#endif
-}
-
-/*
- * Go from an u32 'context' to a pointer to ioctl reply data.
- */
-static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
-{
-#if BITS_PER_LONG == 32
- return (void *)(unsigned long)context;
-#else
- void *p = pHba->ioctl_reply_context[context];
- pHba->ioctl_reply_context[context] = NULL;
-
- return p;
-#endif
-}
-
-/*===========================================================================
- * Error Handling routines
- *===========================================================================
- */
-
-static int adpt_abort(struct scsi_cmnd * cmd)
-{
- adpt_hba* pHba = NULL; /* host bus adapter structure */
- struct adpt_device* dptdevice; /* dpt per device information */
- u32 msg[5];
- int rcode;
-
- pHba = (adpt_hba*) cmd->device->host->hostdata[0];
- printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
- if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
- printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
- return FAILED;
- }
-
- memset(msg, 0, sizeof(msg));
- msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
- msg[2] = 0;
- msg[3]= 0;
- /* Add 1 to avoid firmware treating it as invalid command */
- msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- if(rcode == -EOPNOTSUPP ){
- printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
- return SUCCESS;
-}
-
-
-#define I2O_DEVICE_RESET 0x27
-// This is the same for BLK and SCSI devices
-// NOTE this is wrong in the i2o.h definitions
-// This is not currently supported by our adapter but we issue it anyway
-static int adpt_device_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 msg[4];
- u32 rcode;
- int old_state;
- struct adpt_device* d = cmd->device->hostdata;
-
- pHba = (void*) cmd->device->host->hostdata[0];
- printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
- if (!d) {
- printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
- return FAILED;
- }
- memset(msg, 0, sizeof(msg));
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
- msg[2] = 0;
- msg[3] = 0;
-
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- old_state = d->state;
- d->state |= DPTI_DEV_RESET;
- rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
- d->state = old_state;
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- if(rcode == -EOPNOTSUPP ){
- printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
- return FAILED;
- } else {
- printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
- return SUCCESS;
- }
-}
-
-
-#define I2O_HBA_BUS_RESET 0x87
-// This version of bus reset is called by the eh_error handler
-static int adpt_bus_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 msg[4];
- u32 rcode;
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- memset(msg, 0, sizeof(msg));
- printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
- msg[2] = 0;
- msg[3] = 0;
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
- return FAILED;
- } else {
- printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
- return SUCCESS;
- }
-}
-
-// This version of reset is called by the eh_error_handler
-static int __adpt_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- int rcode;
- char name[32];
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- strncpy(name, pHba->name, sizeof(name));
- printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
- rcode = adpt_hba_reset(pHba);
- if(rcode == 0){
- printk(KERN_WARNING"%s: HBA reset complete\n", name);
- return SUCCESS;
- } else {
- printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
- return FAILED;
- }
-}
-
-static int adpt_reset(struct scsi_cmnd* cmd)
-{
- int rc;
-
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __adpt_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
-
- return rc;
-}
-
-// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
-static int adpt_hba_reset(adpt_hba* pHba)
-{
- int rcode;
-
- pHba->state |= DPTI_STATE_RESET;
-
- // Activate does get status , init outbound, and get hrt
- if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
- printk(KERN_ERR "%s: Could not activate\n", pHba->name);
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
-
- if ((rcode=adpt_i2o_build_sys_table()) < 0) {
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- PDEBUG("%s: in HOLD state\n",pHba->name);
-
- if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
-
- if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
-
- if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- pHba->state &= ~DPTI_STATE_RESET;
-
- scsi_host_complete_all_commands(pHba->host, DID_RESET);
- return 0; /* return success */
-}
-
-/*===========================================================================
- *
- *===========================================================================
- */
-
-
-static void adpt_i2o_sys_shutdown(void)
-{
- adpt_hba *pHba, *pNext;
- struct adpt_i2o_post_wait_data *p1, *old;
-
- printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
- printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
- /* Delete all IOPs from the controller chain */
- /* They should have already been released by the
- * scsi-core
- */
- for (pHba = hba_chain; pHba; pHba = pNext) {
- pNext = pHba->next;
- adpt_i2o_delete_hba(pHba);
- }
-
- /* Remove any timedout entries from the wait queue. */
-// spin_lock_irqsave(&adpt_post_wait_lock, flags);
- /* Nothing should be outstanding at this point so just
- * free them
- */
- for(p1 = adpt_post_wait_queue; p1;) {
- old = p1;
- p1 = p1->next;
- kfree(old);
- }
-// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
- adpt_post_wait_queue = NULL;
-
- printk(KERN_INFO "Adaptec I2O controllers down.\n");
-}
-
-static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
-{
-
- adpt_hba* pHba = NULL;
- adpt_hba* p = NULL;
- ulong base_addr0_phys = 0;
- ulong base_addr1_phys = 0;
- u32 hba_map0_area_size = 0;
- u32 hba_map1_area_size = 0;
- void __iomem *base_addr_virt = NULL;
- void __iomem *msg_addr_virt = NULL;
- int dma64 = 0;
-
- int raptorFlag = FALSE;
-
- if(pci_enable_device(pDev)) {
- return -EINVAL;
- }
-
- if (pci_request_regions(pDev, "dpt_i2o")) {
- PERROR("dpti: adpt_config_hba: pci request region failed\n");
- return -EINVAL;
- }
-
- pci_set_master(pDev);
-
- /*
- * See if we should enable dma64 mode.
- */
- if (sizeof(dma_addr_t) > 4 &&
- dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
- dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
- dma64 = 1;
-
- if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
- return -EINVAL;
-
- /* adapter only supports message blocks below 4GB */
- dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
-
- base_addr0_phys = pci_resource_start(pDev,0);
- hba_map0_area_size = pci_resource_len(pDev,0);
-
- // Check if standard PCI card or single BAR Raptor
- if(pDev->device == PCI_DPT_DEVICE_ID){
- if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
- // Raptor card with this device id needs 4M
- hba_map0_area_size = 0x400000;
- } else { // Not Raptor - it is a PCI card
- if(hba_map0_area_size > 0x100000 ){
- hba_map0_area_size = 0x100000;
- }
- }
- } else {// Raptor split BAR config
- // Use BAR1 in this configuration
- base_addr1_phys = pci_resource_start(pDev,1);
- hba_map1_area_size = pci_resource_len(pDev,1);
- raptorFlag = TRUE;
- }
-
-#if BITS_PER_LONG == 64
- /*
- * The original Adaptec 64 bit driver has this comment here:
- * "x86_64 machines need more optimal mappings"
- *
- * I assume some HBAs report ridiculously large mappings
- * and we need to limit them on platforms with IOMMUs.
- */
- if (raptorFlag == TRUE) {
- if (hba_map0_area_size > 128)
- hba_map0_area_size = 128;
- if (hba_map1_area_size > 524288)
- hba_map1_area_size = 524288;
- } else {
- if (hba_map0_area_size > 524288)
- hba_map0_area_size = 524288;
- }
-#endif
-
- base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
- if (!base_addr_virt) {
- pci_release_regions(pDev);
- PERROR("dpti: adpt_config_hba: io remap failed\n");
- return -EINVAL;
- }
-
- if(raptorFlag == TRUE) {
- msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
- if (!msg_addr_virt) {
- PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
- iounmap(base_addr_virt);
- pci_release_regions(pDev);
- return -EINVAL;
- }
- } else {
- msg_addr_virt = base_addr_virt;
- }
-
- // Allocate and zero the data structure
- pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
- if (!pHba) {
- if (msg_addr_virt != base_addr_virt)
- iounmap(msg_addr_virt);
- iounmap(base_addr_virt);
- pci_release_regions(pDev);
- return -ENOMEM;
- }
-
- mutex_lock(&adpt_configuration_lock);
-
- if(hba_chain != NULL){
- for(p = hba_chain; p->next; p = p->next);
- p->next = pHba;
- } else {
- hba_chain = pHba;
- }
- pHba->next = NULL;
- pHba->unit = hba_count;
- sprintf(pHba->name, "dpti%d", hba_count);
- hba_count++;
-
- mutex_unlock(&adpt_configuration_lock);
-
- pHba->pDev = pDev;
- pHba->base_addr_phys = base_addr0_phys;
-
- // Set up the Virtual Base Address of the I2O Device
- pHba->base_addr_virt = base_addr_virt;
- pHba->msg_addr_virt = msg_addr_virt;
- pHba->irq_mask = base_addr_virt+0x30;
- pHba->post_port = base_addr_virt+0x40;
- pHba->reply_port = base_addr_virt+0x44;
-
- pHba->hrt = NULL;
- pHba->lct = NULL;
- pHba->lct_size = 0;
- pHba->status_block = NULL;
- pHba->post_count = 0;
- pHba->state = DPTI_STATE_RESET;
- pHba->pDev = pDev;
- pHba->devices = NULL;
- pHba->dma64 = dma64;
-
- // Initializing the spinlocks
- spin_lock_init(&pHba->state_lock);
-
- if(raptorFlag == 0){
- printk(KERN_INFO "Adaptec I2O RAID controller"
- " %d at %p size=%x irq=%d%s\n",
- hba_count-1, base_addr_virt,
- hba_map0_area_size, pDev->irq,
- dma64 ? " (64-bit DMA)" : "");
- } else {
- printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
- hba_count-1, pDev->irq,
- dma64 ? " (64-bit DMA)" : "");
- printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
- printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
- }
-
- if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
- printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
- adpt_i2o_delete_hba(pHba);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static void adpt_i2o_delete_hba(adpt_hba* pHba)
-{
- adpt_hba* p1;
- adpt_hba* p2;
- struct i2o_device* d;
- struct i2o_device* next;
- int i;
- int j;
- struct adpt_device* pDev;
- struct adpt_device* pNext;
-
-
- mutex_lock(&adpt_configuration_lock);
- if(pHba->host){
- free_irq(pHba->host->irq, pHba);
- }
- p2 = NULL;
- for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
- if(p1 == pHba) {
- if(p2) {
- p2->next = p1->next;
- } else {
- hba_chain = p1->next;
- }
- break;
- }
- }
-
- hba_count--;
- mutex_unlock(&adpt_configuration_lock);
-
- iounmap(pHba->base_addr_virt);
- pci_release_regions(pHba->pDev);
- if(pHba->msg_addr_virt != pHba->base_addr_virt){
- iounmap(pHba->msg_addr_virt);
- }
- if(pHba->FwDebugBuffer_P)
- iounmap(pHba->FwDebugBuffer_P);
- if(pHba->hrt) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
- pHba->hrt, pHba->hrt_pa);
- }
- if(pHba->lct) {
- dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
- pHba->lct, pHba->lct_pa);
- }
- if(pHba->status_block) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
- pHba->status_block, pHba->status_block_pa);
- }
- if(pHba->reply_pool) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- pHba->reply_pool, pHba->reply_pool_pa);
- }
-
- for(d = pHba->devices; d ; d = next){
- next = d->next;
- kfree(d);
- }
- for(i = 0 ; i < pHba->top_scsi_channel ; i++){
- for(j = 0; j < MAX_ID; j++){
- if(pHba->channel[i].device[j] != NULL){
- for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
- pNext = pDev->next_lun;
- kfree(pDev);
- }
- }
- }
- }
- pci_dev_put(pHba->pDev);
- if (adpt_sysfs_class)
- device_destroy(adpt_sysfs_class,
- MKDEV(DPTI_I2O_MAJOR, pHba->unit));
- kfree(pHba);
-
- if(hba_count <= 0){
- unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
- if (adpt_sysfs_class) {
- class_destroy(adpt_sysfs_class);
- adpt_sysfs_class = NULL;
- }
- }
-}
-
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
-{
- struct adpt_device* d;
-
- if (chan >= MAX_CHANNEL)
- return NULL;
-
- d = pHba->channel[chan].device[id];
- if(!d || d->tid == 0) {
- return NULL;
- }
-
- /* If it is the only lun at that address then this should match*/
- if(d->scsi_lun == lun){
- return d;
- }
-
- /* else we need to look through all the luns */
- for(d=d->next_lun ; d ; d = d->next_lun){
- if(d->scsi_lun == lun){
- return d;
- }
- }
- return NULL;
-}
-
-
-static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
-{
- // I used my own version of the WAIT_QUEUE_HEAD
- // to handle some version differences
- // When embedded in the kernel this could go back to the vanilla one
- ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
- int status = 0;
- ulong flags = 0;
- struct adpt_i2o_post_wait_data *p1, *p2;
- struct adpt_i2o_post_wait_data *wait_data =
- kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
- DECLARE_WAITQUEUE(wait, current);
-
- if (!wait_data)
- return -ENOMEM;
-
- /*
- * The spin locking is needed to keep anyone from playing
- * with the queue pointers and id while we do the same
- */
- spin_lock_irqsave(&adpt_post_wait_lock, flags);
- // TODO we need a MORE unique way of getting ids
- // to support async LCT get
- wait_data->next = adpt_post_wait_queue;
- adpt_post_wait_queue = wait_data;
- adpt_post_wait_id++;
- adpt_post_wait_id &= 0x7fff;
- wait_data->id = adpt_post_wait_id;
- spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
-
- wait_data->wq = &adpt_wq_i2o_post;
- wait_data->status = -ETIMEDOUT;
-
- add_wait_queue(&adpt_wq_i2o_post, &wait);
-
- msg[2] |= 0x80000000 | ((u32)wait_data->id);
- timeout *= HZ;
- if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
- set_current_state(TASK_INTERRUPTIBLE);
- if(pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (!timeout)
- schedule();
- else{
- timeout = schedule_timeout(timeout);
- if (timeout == 0) {
- // I/O issued, but cannot get result in
- // specified time. Freeing resorces is
- // dangerous.
- status = -ETIME;
- }
- }
- if(pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- }
- remove_wait_queue(&adpt_wq_i2o_post, &wait);
-
- if(status == -ETIMEDOUT){
- printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
- // We will have to free the wait_data memory during shutdown
- return status;
- }
-
- /* Remove the entry from the queue. */
- p2 = NULL;
- spin_lock_irqsave(&adpt_post_wait_lock, flags);
- for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
- if(p1 == wait_data) {
- if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
- status = -EOPNOTSUPP;
- }
- if(p2) {
- p2->next = p1->next;
- } else {
- adpt_post_wait_queue = p1->next;
- }
- break;
- }
- }
- spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
-
- kfree(wait_data);
-
- return status;
-}
-
-
-static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
-{
-
- u32 m = EMPTY_QUEUE;
- u32 __iomem *msg;
- ulong timeout = jiffies + 30*HZ;
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m == EMPTY_QUEUE);
-
- msg = pHba->msg_addr_virt + m;
- memcpy_toio(msg, data, len);
- wmb();
-
- //post message
- writel(m, pHba->post_port);
- wmb();
-
- return 0;
-}
-
-
-static void adpt_i2o_post_wait_complete(u32 context, int status)
-{
- struct adpt_i2o_post_wait_data *p1 = NULL;
- /*
- * We need to search through the adpt_post_wait
- * queue to see if the given message is still
- * outstanding. If not, it means that the IOP
- * took longer to respond to the message than we
- * had allowed and timer has already expired.
- * Not much we can do about that except log
- * it for debug purposes, increase timeout, and recompile
- *
- * Lock needed to keep anyone from moving queue pointers
- * around while we're looking through them.
- */
-
- context &= 0x7fff;
-
- spin_lock(&adpt_post_wait_lock);
- for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
- if(p1->id == context) {
- p1->status = status;
- spin_unlock(&adpt_post_wait_lock);
- wake_up_interruptible(p1->wq);
- return;
- }
- }
- spin_unlock(&adpt_post_wait_lock);
- // If this happens we lose commands that probably really completed
- printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
- printk(KERN_DEBUG" Tasks in wait queue:\n");
- for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
- printk(KERN_DEBUG" %d\n",p1->id);
- }
- return;
-}
-
-static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
-{
- u32 msg[8];
- u8* status;
- dma_addr_t addr;
- u32 m = EMPTY_QUEUE ;
- ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
-
- if(pHba->initialized == FALSE) { // First time reset should be quick
- timeout = jiffies + (25*HZ);
- } else {
- adpt_i2o_quiesce_hba(pHba);
- }
-
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"Timeout waiting for message!\n");
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (m == EMPTY_QUEUE);
-
- status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
- if(status == NULL) {
- adpt_send_nop(pHba, m);
- printk(KERN_ERR"IOP reset failed - no free memory.\n");
- return -ENOMEM;
- }
-
- msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2]=0;
- msg[3]=0;
- msg[4]=0;
- msg[5]=0;
- msg[6]=dma_low(addr);
- msg[7]=dma_high(addr);
-
- memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
- wmb();
- writel(m, pHba->post_port);
- wmb();
-
- while(*status == 0){
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we cannot
- free these because controller may awake and corrupt
- those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
- return -ETIMEDOUT;
- }
- rmb();
- schedule_timeout_uninterruptible(1);
- }
-
- if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
- PDEBUG("%s: Reset in progress...\n", pHba->name);
- // Here we wait for message frame to become available
- // indicated that reset has finished
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we
- cannot free these because controller may
- awake and corrupt those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (m == EMPTY_QUEUE);
- // Flush the offset
- adpt_send_nop(pHba, m);
- }
- adpt_i2o_status_get(pHba);
- if(*status == 0x02 ||
- pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
- printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
- pHba->name);
- } else {
- PDEBUG("%s: Reset completed.\n", pHba->name);
- }
-
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
-#ifdef UARTDELAY
- // This delay is to allow someone attached to the card through the debug UART to
- // set up the dump levels that they want before the rest of the initialization sequence
- adpt_delay(20000);
-#endif
- return 0;
-}
-
-
-static int adpt_i2o_parse_lct(adpt_hba* pHba)
-{
- int i;
- int max;
- int tid;
- struct i2o_device *d;
- i2o_lct *lct = pHba->lct;
- u8 bus_no = 0;
- s16 scsi_id;
- u64 scsi_lun;
- u32 buf[10]; // larger than 7, or 8 ...
- struct adpt_device* pDev;
-
- if (lct == NULL) {
- printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
- return -1;
- }
-
- max = lct->table_size;
- max -= 3;
- max /= 9;
-
- for(i=0;i<max;i++) {
- if( lct->lct_entry[i].user_tid != 0xfff){
- /*
- * If we have hidden devices, we need to inform the upper layers about
- * the possible maximum id reference to handle device access when
- * an array is disassembled. This code has no other purpose but to
- * allow us future access to devices that are currently hidden
- * behind arrays, hotspares or have not been configured (JBOD mode).
- */
- if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
- lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
- lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
- continue;
- }
- tid = lct->lct_entry[i].tid;
- // I2O_DPT_DEVICE_INFO_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
- continue;
- }
- bus_no = buf[0]>>16;
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
- continue;
- }
- if (scsi_id >= MAX_ID){
- printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
- continue;
- }
- if(bus_no > pHba->top_scsi_channel){
- pHba->top_scsi_channel = bus_no;
- }
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- continue;
- }
- d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
- if(d==NULL)
- {
- printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
- return -ENOMEM;
- }
-
- d->controller = pHba;
- d->next = NULL;
-
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
-
- d->flags = 0;
- tid = d->lct_data.tid;
- adpt_i2o_report_hba_unit(pHba, d);
- adpt_i2o_install_device(pHba, d);
- }
- bus_no = 0;
- for(d = pHba->devices; d ; d = d->next) {
- if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
- d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
- tid = d->lct_data.tid;
- // TODO get the bus_no from hrt-but for now they are in order
- //bus_no =
- if(bus_no > pHba->top_scsi_channel){
- pHba->top_scsi_channel = bus_no;
- }
- pHba->channel[bus_no].type = d->lct_data.class_id;
- pHba->channel[bus_no].tid = tid;
- if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
- {
- pHba->channel[bus_no].scsi_id = buf[1];
- PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
- }
- // TODO remove - this is just until we get from hrt
- bus_no++;
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
- break;
- }
- }
- }
-
- // Setup adpt_device table
- for(d = pHba->devices; d ; d = d->next) {
- if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
- d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
- d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
-
- tid = d->lct_data.tid;
- scsi_id = -1;
- // I2O_DPT_DEVICE_INFO_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
- bus_no = buf[0]>>16;
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- continue;
- }
- if (scsi_id >= MAX_ID) {
- continue;
- }
- if( pHba->channel[bus_no].device[scsi_id] == NULL){
- pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- pHba->channel[bus_no].device[scsi_id] = pDev;
- } else {
- for( pDev = pHba->channel[bus_no].device[scsi_id];
- pDev->next_lun; pDev = pDev->next_lun){
- }
- pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
- if(pDev->next_lun == NULL) {
- return -ENOMEM;
- }
- pDev = pDev->next_lun;
- }
- pDev->tid = tid;
- pDev->scsi_channel = bus_no;
- pDev->scsi_id = scsi_id;
- pDev->scsi_lun = scsi_lun;
- pDev->pI2o_dev = d;
- d->owner = pDev;
- pDev->type = (buf[0])&0xff;
- pDev->flags = (buf[0]>>8)&0xff;
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- }
- if(scsi_id == -1){
- printk(KERN_WARNING"Could not find SCSI ID for %s\n",
- d->lct_data.identity_tag);
- }
- }
- }
- return 0;
-}
-
-
-/*
- * Each I2O controller has a chain of devices on it - these match
- * the useful parts of the LCT of the board.
- */
-
-static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
-{
- mutex_lock(&adpt_configuration_lock);
- d->controller=pHba;
- d->owner=NULL;
- d->next=pHba->devices;
- d->prev=NULL;
- if (pHba->devices != NULL){
- pHba->devices->prev=d;
- }
- pHba->devices=d;
- *d->dev_name = 0;
-
- mutex_unlock(&adpt_configuration_lock);
- return 0;
-}
-
-static int adpt_open(struct inode *inode, struct file *file)
-{
- int minor;
- adpt_hba* pHba;
-
- mutex_lock(&adpt_mutex);
- //TODO check for root access
- //
- minor = iminor(inode);
- if (minor >= hba_count) {
- mutex_unlock(&adpt_mutex);
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- if (pHba == NULL) {
- mutex_unlock(&adpt_configuration_lock);
- mutex_unlock(&adpt_mutex);
- return -ENXIO;
- }
-
-// if(pHba->in_use){
- // mutex_unlock(&adpt_configuration_lock);
-// return -EBUSY;
-// }
-
- pHba->in_use = 1;
- mutex_unlock(&adpt_configuration_lock);
- mutex_unlock(&adpt_mutex);
-
- return 0;
-}
-
-static int adpt_close(struct inode *inode, struct file *file)
-{
- int minor;
- adpt_hba* pHba;
-
- minor = iminor(inode);
- if (minor >= hba_count) {
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if (pHba == NULL) {
- return -ENXIO;
- }
-
- pHba->in_use = 0;
-
- return 0;
-}
-
-
-static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
-{
- u32 msg[MAX_MESSAGE_SIZE];
- u32* reply = NULL;
- u32 size = 0;
- u32 reply_size = 0;
- u32 __user *user_msg = arg;
- u32 __user * user_reply = NULL;
- void **sg_list = NULL;
- u32 sg_offset = 0;
- u32 sg_count = 0;
- int sg_index = 0;
- u32 i = 0;
- u32 rcode = 0;
- void *p = NULL;
- dma_addr_t addr;
- ulong flags = 0;
-
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- return -EFAULT;
- }
- size = size>>16;
-
- user_reply = &user_msg[size];
- if(size > MAX_MESSAGE_SIZE){
- return -EFAULT;
- }
- size *= 4; // Convert to bytes
-
- /* Copy in the user's I2O command */
- if(copy_from_user(msg, user_msg, size)) {
- return -EFAULT;
- }
- get_user(reply_size, &user_reply[0]);
- reply_size = reply_size>>16;
- if(reply_size > REPLY_FRAME_SIZE){
- reply_size = REPLY_FRAME_SIZE;
- }
- reply_size *= 4;
- reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
- if(reply == NULL) {
- printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
- return -ENOMEM;
- }
- sg_offset = (msg[0]>>4)&0xf;
- msg[2] = 0x40000000; // IOCTL context
- msg[3] = adpt_ioctl_to_context(pHba, reply);
- if (msg[3] == (u32)-1) {
- rcode = -EBUSY;
- goto free;
- }
-
- sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
- if (!sg_list) {
- rcode = -ENOMEM;
- goto free;
- }
- if(sg_offset) {
- // TODO add 64 bit API
- struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
- if (sg_count > pHba->sg_tablesize){
- printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
- rcode = -EINVAL;
- goto free;
- }
-
- for(i = 0; i < sg_count; i++) {
- int sg_size;
-
- if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
- printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
- rcode = -EINVAL;
- goto cleanup;
- }
- sg_size = sg[i].flag_count & 0xffffff;
- /* Allocate memory for the transfer */
- p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
- if(!p) {
- printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- pHba->name,sg_size,i,sg_count);
- rcode = -ENOMEM;
- goto cleanup;
- }
- sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
- /* Copy in the user's SG buffer if necessary */
- if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
- // sg_simple_element API is 32 bit
- if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
- printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- /* sg_simple_element API is 32 bit, but addr < 4GB */
- sg[i].addr_bus = addr;
- }
- }
-
- do {
- /*
- * Stop any new commands from enterring the
- * controller while processing the ioctl
- */
- if (pHba->host) {
- scsi_block_requests(pHba->host);
- spin_lock_irqsave(pHba->host->host_lock, flags);
- }
- rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
- if (rcode != 0)
- printk("adpt_i2o_passthru: post wait failed %d %p\n",
- rcode, reply);
- if (pHba->host) {
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- scsi_unblock_requests(pHba->host);
- }
- } while (rcode == -ETIMEDOUT);
-
- if(rcode){
- goto cleanup;
- }
-
- if(sg_offset) {
- /* Copy back the Scatter Gather buffers back to user space */
- u32 j;
- // TODO add 64 bit API
- struct sg_simple_element* sg;
- int sg_size;
-
- // re-acquire the original message to handle correctly the sg copy operation
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- rcode = -EFAULT;
- goto cleanup;
- }
- size = size>>16;
- size *= 4;
- if (size > MAX_MESSAGE_SIZE) {
- rcode = -EINVAL;
- goto cleanup;
- }
- /* Copy in the user's I2O command */
- if (copy_from_user (msg, user_msg, size)) {
- rcode = -EFAULT;
- goto cleanup;
- }
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
-
- // TODO add 64 bit API
- sg = (struct sg_simple_element*)(msg + sg_offset);
- for (j = 0; j < sg_count; j++) {
- /* Copy out the SG list to user's buffer if necessary */
- if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
- sg_size = sg[j].flag_count & 0xffffff;
- // sg_simple_element API is 32 bit
- if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
- printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- }
- }
-
- /* Copy back the reply to user space */
- if (reply_size) {
- // we wrote our own values for context - now restore the user supplied ones
- if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
- printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
- rcode = -EFAULT;
- }
- if(copy_to_user(user_reply, reply, reply_size)) {
- printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
- rcode = -EFAULT;
- }
- }
-
-
-cleanup:
- if (rcode != -ETIME && rcode != -EINTR) {
- struct sg_simple_element *sg =
- (struct sg_simple_element*) (msg +sg_offset);
- while(sg_index) {
- if(sg_list[--sg_index]) {
- dma_free_coherent(&pHba->pDev->dev,
- sg[sg_index].flag_count & 0xffffff,
- sg_list[sg_index],
- sg[sg_index].addr_bus);
- }
- }
- }
-
-free:
- kfree(sg_list);
- kfree(reply);
- return rcode;
-}
-
-#if defined __ia64__
-static void adpt_ia64_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_IA64;
-}
-#endif
-
-#if defined __sparc__
-static void adpt_sparc_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_ULTRASPARC;
-}
-#endif
-#if defined __alpha__
-static void adpt_alpha_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_ALPHA;
-}
-#endif
-
-#if defined __i386__
-
-#include <uapi/asm/vm86.h>
-
-static void adpt_i386_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- switch (boot_cpu_data.x86) {
- case CPU_386:
- si->processorType = PROC_386;
- break;
- case CPU_486:
- si->processorType = PROC_486;
- break;
- case CPU_586:
- si->processorType = PROC_PENTIUM;
- break;
- default: // Just in case
- si->processorType = PROC_PENTIUM;
- break;
- }
-}
-#endif
-
-/*
- * This routine returns information about the system. This does not effect
- * any logic and if the info is wrong - it doesn't matter.
- */
-
-/* Get all the info we can not get from kernel services */
-static int adpt_system_info(void __user *buffer)
-{
- sysInfo_S si;
-
- memset(&si, 0, sizeof(si));
-
- si.osType = OS_LINUX;
- si.osMajorVersion = 0;
- si.osMinorVersion = 0;
- si.osRevision = 0;
- si.busType = SI_PCI_BUS;
- si.processorFamily = DPTI_sig.dsProcessorFamily;
-
-#if defined __i386__
- adpt_i386_info(&si);
-#elif defined (__ia64__)
- adpt_ia64_info(&si);
-#elif defined(__sparc__)
- adpt_sparc_info(&si);
-#elif defined (__alpha__)
- adpt_alpha_info(&si);
-#else
- si.processorType = 0xff ;
-#endif
- if (copy_to_user(buffer, &si, sizeof(si))){
- printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
-{
- int minor;
- int error = 0;
- adpt_hba* pHba;
- ulong flags = 0;
- void __user *argp = (void __user *)arg;
-
- minor = iminor(inode);
- if (minor >= DPTI_MAX_HBA){
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if(pHba == NULL){
- return -ENXIO;
- }
-
- while((volatile u32) pHba->state & DPTI_STATE_RESET )
- schedule_timeout_uninterruptible(2);
-
- switch (cmd) {
- // TODO: handle 3 cases
- case DPT_SIGNATURE:
- if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
- return -EFAULT;
- }
- break;
- case I2OUSRCMD:
- return adpt_i2o_passthru(pHba, argp);
-
- case DPT_CTRLINFO:{
- drvrHBAinfo_S HbaInfo;
-
-#define FLG_OSD_PCI_VALID 0x0001
-#define FLG_OSD_DMA 0x0002
-#define FLG_OSD_I2O 0x0004
- memset(&HbaInfo, 0, sizeof(HbaInfo));
- HbaInfo.drvrHBAnum = pHba->unit;
- HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
- HbaInfo.blinkState = adpt_read_blink_led(pHba);
- HbaInfo.pciBusNum = pHba->pDev->bus->number;
- HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
- HbaInfo.Interrupt = pHba->pDev->irq;
- HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
- if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
- printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
- return -EFAULT;
- }
- break;
- }
- case DPT_SYSINFO:
- return adpt_system_info(argp);
- case DPT_BLINKLED:{
- u32 value;
- value = (u32)adpt_read_blink_led(pHba);
- if (copy_to_user(argp, &value, sizeof(value))) {
- return -EFAULT;
- }
- break;
- }
- case I2ORESETCMD: {
- struct Scsi_Host *shost = pHba->host;
-
- if (shost)
- spin_lock_irqsave(shost->host_lock, flags);
- adpt_hba_reset(pHba);
- if (shost)
- spin_unlock_irqrestore(shost->host_lock, flags);
- break;
- }
- case I2ORESCANCMD:
- adpt_rescan(pHba);
- break;
- default:
- return -EINVAL;
- }
-
- return error;
-}
-
-static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
-{
- struct inode *inode;
- long ret;
-
- inode = file_inode(file);
-
- mutex_lock(&adpt_mutex);
- ret = adpt_ioctl(inode, file, cmd, arg);
- mutex_unlock(&adpt_mutex);
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long compat_adpt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct inode *inode;
- long ret;
-
- inode = file_inode(file);
-
- mutex_lock(&adpt_mutex);
-
- switch(cmd) {
- case DPT_SIGNATURE:
- case I2OUSRCMD:
- case DPT_CTRLINFO:
- case DPT_SYSINFO:
- case DPT_BLINKLED:
- case I2ORESETCMD:
- case I2ORESCANCMD:
- case (DPT_TARGET_BUSY & 0xFFFF):
- case DPT_TARGET_BUSY:
- ret = adpt_ioctl(inode, file, cmd, arg);
- break;
- default:
- ret = -ENOIOCTLCMD;
- }
-
- mutex_unlock(&adpt_mutex);
-
- return ret;
-}
-#endif
-
-static irqreturn_t adpt_isr(int irq, void *dev_id)
-{
- struct scsi_cmnd* cmd;
- adpt_hba* pHba = dev_id;
- u32 m;
- void __iomem *reply;
- u32 status=0;
- u32 context;
- ulong flags = 0;
- int handled = 0;
-
- if (pHba == NULL){
- printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
- return IRQ_NONE;
- }
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
-
- while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
- m = readl(pHba->reply_port);
- if(m == EMPTY_QUEUE){
- // Try twice then give up
- rmb();
- m = readl(pHba->reply_port);
- if(m == EMPTY_QUEUE){
- // This really should not happen
- printk(KERN_ERR"dpti: Could not get reply frame\n");
- goto out;
- }
- }
- if (pHba->reply_pool_pa <= m &&
- m < pHba->reply_pool_pa +
- (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
- reply = (u8 *)pHba->reply_pool +
- (m - pHba->reply_pool_pa);
- } else {
- /* Ick, we should *never* be here */
- printk(KERN_ERR "dpti: reply frame not from pool\n");
- reply = (u8 *)bus_to_virt(m);
- }
-
- if (readl(reply) & MSG_FAIL) {
- u32 old_m = readl(reply+28);
- void __iomem *msg;
- u32 old_context;
- PDEBUG("%s: Failed message\n",pHba->name);
- if(old_m >= 0x100000){
- printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
- writel(m,pHba->reply_port);
- continue;
- }
- // Transaction context is 0 in failed reply frame
- msg = pHba->msg_addr_virt + old_m;
- old_context = readl(msg+12);
- writel(old_context, reply+12);
- adpt_send_nop(pHba, old_m);
- }
- context = readl(reply+8);
- if(context & 0x40000000){ // IOCTL
- void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
- if( p != NULL) {
- memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
- }
- // All IOCTLs will also be post wait
- }
- if(context & 0x80000000){ // Post wait message
- status = readl(reply+16);
- if(status >> 24){
- status &= 0xffff; /* Get detail status */
- } else {
- status = I2O_POST_WAIT_OK;
- }
- if(!(context & 0x40000000)) {
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL) {
- printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
- }
- }
- adpt_i2o_post_wait_complete(context, status);
- } else { // SCSI message
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL){
- scsi_dma_unmap(cmd);
- adpt_i2o_scsi_complete(reply, cmd);
- }
- }
- writel(m, pHba->reply_port);
- wmb();
- rmb();
- }
- handled = 1;
-out: if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- return IRQ_RETVAL(handled);
-}
-
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
-{
- int i;
- u32 msg[MAX_MESSAGE_SIZE];
- u32* mptr;
- u32* lptr;
- u32 *lenptr;
- int direction;
- int scsidir;
- int nseg;
- u32 len;
- u32 reqlen;
- s32 rcode;
- dma_addr_t addr;
-
- memset(msg, 0 , sizeof(msg));
- len = scsi_bufflen(cmd);
- direction = 0x00000000;
-
- scsidir = 0x00000000; // DATA NO XFER
- if(len) {
- /*
- * Set SCBFlags to indicate if data is being transferred
- * in or out, or no data transfer
- * Note: Do not have to verify index is less than 0 since
- * cmd->cmnd[0] is an unsigned char
- */
- switch(cmd->sc_data_direction){
- case DMA_FROM_DEVICE:
- scsidir =0x40000000; // DATA IN (iop<--dev)
- break;
- case DMA_TO_DEVICE:
- direction=0x04000000; // SGL OUT
- scsidir =0x80000000; // DATA OUT (iop-->dev)
- break;
- case DMA_NONE:
- break;
- case DMA_BIDIRECTIONAL:
- scsidir =0x40000000; // DATA IN (iop<--dev)
- // Assume In - and continue;
- break;
- default:
- printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
- pHba->name, cmd->cmnd[0]);
- cmd->result = (DID_ERROR <<16);
- scsi_done(cmd);
- return 0;
- }
- }
- // msg[0] is set later
- // I2O_CMD_SCSI_EXEC
- msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
- msg[2] = 0;
- /* Add 1 to avoid firmware treating it as invalid command */
- msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
- // Our cards use the transaction context as the tag for queueing
- // Adaptec/DPT Private stuff
- msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
- msg[5] = d->tid;
- /* Direction, disconnect ok | sense data | simple queue , CDBLen */
- // I2O_SCB_FLAG_ENABLE_DISCONNECT |
- // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
- // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
- msg[6] = scsidir|0x20a00000|cmd->cmd_len;
-
- mptr=msg+7;
-
- // Write SCSI command into the message - always 16 byte block
- memset(mptr, 0, 16);
- memcpy(mptr, cmd->cmnd, cmd->cmd_len);
- mptr+=4;
- lenptr=mptr++; /* Remember me - fill in when we know */
- if (dpt_dma64(pHba)) {
- reqlen = 16; // SINGLE SGE
- *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
- *mptr++ = 1 << PAGE_SHIFT;
- } else {
- reqlen = 14; // SINGLE SGE
- }
- /* Now fill in the SGList and command */
-
- nseg = scsi_dma_map(cmd);
- BUG_ON(nseg < 0);
- if (nseg) {
- struct scatterlist *sg;
-
- len = 0;
- scsi_for_each_sg(cmd, sg, nseg, i) {
- lptr = mptr;
- *mptr++ = direction|0x10000000|sg_dma_len(sg);
- len+=sg_dma_len(sg);
- addr = sg_dma_address(sg);
- *mptr++ = dma_low(addr);
- if (dpt_dma64(pHba))
- *mptr++ = dma_high(addr);
- /* Make this an end of list */
- if (i == nseg - 1)
- *lptr = direction|0xD0000000|sg_dma_len(sg);
- }
- reqlen = mptr - msg;
- *lenptr = len;
-
- if(cmd->underflow && len != cmd->underflow){
- printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
- len, cmd->underflow);
- }
- } else {
- *lenptr = len = 0;
- reqlen = 12;
- }
-
- /* Stick the headers on */
- msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
-
- // Send it on it's way
- rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
- if (rcode == 0) {
- return 0;
- }
- return rcode;
-}
-
-
-static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
-{
- struct Scsi_Host *host;
-
- host = scsi_host_alloc(sht, sizeof(adpt_hba*));
- if (host == NULL) {
- printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
- return -1;
- }
- host->hostdata[0] = (unsigned long)pHba;
- pHba->host = host;
-
- host->irq = pHba->pDev->irq;
- /* no IO ports, so don't have to set host->io_port and
- * host->n_io_port
- */
- host->io_port = 0;
- host->n_io_port = 0;
- /* see comments in scsi_host.h */
- host->max_id = 16;
- host->max_lun = 256;
- host->max_channel = pHba->top_scsi_channel + 1;
- host->cmd_per_lun = 1;
- host->unique_id = (u32)sys_tbl_pa + pHba->unit;
- host->sg_tablesize = pHba->sg_tablesize;
- host->can_queue = pHba->post_fifo_size;
-
- return 0;
-}
-
-
-static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
-{
- adpt_hba* pHba;
- u32 hba_status;
- u32 dev_status;
- u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
- // I know this would look cleaner if I just read bytes
- // but the model I have been using for all the rest of the
- // io is in 4 byte words - so I keep that model
- u16 detailed_status = readl(reply+16) &0xffff;
- dev_status = (detailed_status & 0xff);
- hba_status = detailed_status >> 8;
-
- // calculate resid for sg
- scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
-
- pHba = (adpt_hba*) cmd->device->host->hostdata[0];
-
- cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
-
- if(!(reply_flags & MSG_FAIL)) {
- switch(detailed_status & I2O_SCSI_DSC_MASK) {
- case I2O_SCSI_DSC_SUCCESS:
- cmd->result = (DID_OK << 16);
- // handle underflow
- if (readl(reply+20) < cmd->underflow) {
- cmd->result = (DID_ERROR <<16);
- printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
- }
- break;
- case I2O_SCSI_DSC_REQUEST_ABORTED:
- cmd->result = (DID_ABORT << 16);
- break;
- case I2O_SCSI_DSC_PATH_INVALID:
- case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
- case I2O_SCSI_DSC_SELECTION_TIMEOUT:
- case I2O_SCSI_DSC_COMMAND_TIMEOUT:
- case I2O_SCSI_DSC_NO_ADAPTER:
- case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
- printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
- cmd->result = (DID_TIME_OUT << 16);
- break;
- case I2O_SCSI_DSC_ADAPTER_BUSY:
- case I2O_SCSI_DSC_BUS_BUSY:
- cmd->result = (DID_BUS_BUSY << 16);
- break;
- case I2O_SCSI_DSC_SCSI_BUS_RESET:
- case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
- cmd->result = (DID_RESET << 16);
- break;
- case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
- printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
- cmd->result = (DID_PARITY << 16);
- break;
- case I2O_SCSI_DSC_UNABLE_TO_ABORT:
- case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
- case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
- case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
- case I2O_SCSI_DSC_AUTOSENSE_FAILED:
- case I2O_SCSI_DSC_DATA_OVERRUN:
- case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
- case I2O_SCSI_DSC_SEQUENCE_FAILURE:
- case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
- case I2O_SCSI_DSC_PROVIDE_FAILURE:
- case I2O_SCSI_DSC_REQUEST_TERMINATED:
- case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
- case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
- case I2O_SCSI_DSC_MESSAGE_RECEIVED:
- case I2O_SCSI_DSC_INVALID_CDB:
- case I2O_SCSI_DSC_LUN_INVALID:
- case I2O_SCSI_DSC_SCSI_TID_INVALID:
- case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
- case I2O_SCSI_DSC_NO_NEXUS:
- case I2O_SCSI_DSC_CDB_RECEIVED:
- case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
- case I2O_SCSI_DSC_QUEUE_FROZEN:
- case I2O_SCSI_DSC_REQUEST_INVALID:
- default:
- printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- hba_status, dev_status, cmd->cmnd[0]);
- cmd->result = (DID_ERROR << 16);
- break;
- }
-
- // copy over the request sense data if it was a check
- // condition status
- if (dev_status == SAM_STAT_CHECK_CONDITION) {
- u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
- // Copy over the sense data
- memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
- if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
- cmd->sense_buffer[2] == DATA_PROTECT ){
- /* This is to handle an array failed */
- cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- hba_status, dev_status, cmd->cmnd[0]);
-
- }
- }
- } else {
- /* In this condtion we could not talk to the tid
- * the card rejected it. We should signal a retry
- * for a limitted number of retries.
- */
- cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
- }
-
- cmd->result |= (dev_status);
-
- scsi_done(cmd);
-}
-
-
-static s32 adpt_rescan(adpt_hba* pHba)
-{
- s32 rcode;
- ulong flags = 0;
-
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
- if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
- goto out;
- if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
- goto out;
- rcode = 0;
-out: if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- return rcode;
-}
-
-
-static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
-{
- int i;
- int max;
- int tid;
- struct i2o_device *d;
- i2o_lct *lct = pHba->lct;
- u8 bus_no = 0;
- s16 scsi_id;
- u64 scsi_lun;
- u32 buf[10]; // at least 8 u32's
- struct adpt_device* pDev = NULL;
- struct i2o_device* pI2o_dev = NULL;
-
- if (lct == NULL) {
- printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
- return -1;
- }
-
- max = lct->table_size;
- max -= 3;
- max /= 9;
-
- // Mark each drive as unscanned
- for (d = pHba->devices; d; d = d->next) {
- pDev =(struct adpt_device*) d->owner;
- if(!pDev){
- continue;
- }
- pDev->state |= DPTI_DEV_UNSCANNED;
- }
-
- printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
-
- for(i=0;i<max;i++) {
- if( lct->lct_entry[i].user_tid != 0xfff){
- continue;
- }
-
- if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
- lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
- lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
- tid = lct->lct_entry[i].tid;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
- printk(KERN_ERR"%s: Could not query device\n",pHba->name);
- continue;
- }
- bus_no = buf[0]>>16;
- if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
- printk(KERN_WARNING
- "%s: Channel number %d out of range\n",
- pHba->name, bus_no);
- continue;
- }
-
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- pDev = pHba->channel[bus_no].device[scsi_id];
- /* da lun */
- while(pDev) {
- if(pDev->scsi_lun == scsi_lun) {
- break;
- }
- pDev = pDev->next_lun;
- }
- if(!pDev ) { // Something new add it
- d = kmalloc(sizeof(struct i2o_device),
- GFP_ATOMIC);
- if(d==NULL)
- {
- printk(KERN_CRIT "Out of memory for I2O device data.\n");
- return -ENOMEM;
- }
-
- d->controller = pHba;
- d->next = NULL;
-
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
-
- d->flags = 0;
- adpt_i2o_report_hba_unit(pHba, d);
- adpt_i2o_install_device(pHba, d);
-
- pDev = pHba->channel[bus_no].device[scsi_id];
- if( pDev == NULL){
- pDev =
- kzalloc(sizeof(struct adpt_device),
- GFP_ATOMIC);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- pHba->channel[bus_no].device[scsi_id] = pDev;
- } else {
- while (pDev->next_lun) {
- pDev = pDev->next_lun;
- }
- pDev = pDev->next_lun =
- kzalloc(sizeof(struct adpt_device),
- GFP_ATOMIC);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- }
- pDev->tid = d->lct_data.tid;
- pDev->scsi_channel = bus_no;
- pDev->scsi_id = scsi_id;
- pDev->scsi_lun = scsi_lun;
- pDev->pI2o_dev = d;
- d->owner = pDev;
- pDev->type = (buf[0])&0xff;
- pDev->flags = (buf[0]>>8)&0xff;
- // Too late, SCSI system has made up it's mind, but what the hey ...
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- continue;
- } // end of new i2o device
-
- // We found an old device - check it
- while(pDev) {
- if(pDev->scsi_lun == scsi_lun) {
- if(!scsi_device_online(pDev->pScsi_dev)) {
- printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
- pHba->name,bus_no,scsi_id,scsi_lun);
- if (pDev->pScsi_dev) {
- scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
- }
- }
- d = pDev->pI2o_dev;
- if(d->lct_data.tid != tid) { // something changed
- pDev->tid = tid;
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
- if (pDev->pScsi_dev) {
- pDev->pScsi_dev->changed = TRUE;
- pDev->pScsi_dev->removable = TRUE;
- }
- }
- // Found it - mark it scanned
- pDev->state = DPTI_DEV_ONLINE;
- break;
- }
- pDev = pDev->next_lun;
- }
- }
- }
- for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
- pDev =(struct adpt_device*) pI2o_dev->owner;
- if(!pDev){
- continue;
- }
- // Drive offline drives that previously existed but could not be found
- // in the LCT table
- if (pDev->state & DPTI_DEV_UNSCANNED){
- pDev->state = DPTI_DEV_OFFLINE;
- printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
- if (pDev->pScsi_dev) {
- scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
- }
- }
- }
- return 0;
-}
-
-/*============================================================================
- * Routines from i2o subsystem
- *============================================================================
- */
-
-
-
-/*
- * Bring an I2O controller into HOLD state. See the spec.
- */
-static int adpt_i2o_activate_hba(adpt_hba* pHba)
-{
- int rcode;
-
- if(pHba->initialized ) {
- if (adpt_i2o_status_get(pHba) < 0) {
- if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
- printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
- return rcode;
- }
- if (adpt_i2o_status_get(pHba) < 0) {
- printk(KERN_INFO "HBA not responding.\n");
- return -1;
- }
- }
-
- if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
- printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
- return -1;
- }
-
- if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
- pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
- pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
- pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
- adpt_i2o_reset_hba(pHba);
- if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
- printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
- return -1;
- }
- }
- } else {
- if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
- printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
- return rcode;
- }
-
- }
-
- if (adpt_i2o_init_outbound_q(pHba) < 0) {
- return -1;
- }
-
- /* In HOLD state */
-
- if (adpt_i2o_hrt_get(pHba) < 0) {
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Bring a controller online into OPERATIONAL state.
- */
-
-static int adpt_i2o_online_hba(adpt_hba* pHba)
-{
- if (adpt_i2o_systab_send(pHba) < 0)
- return -1;
- /* In READY state */
-
- if (adpt_i2o_enable_hba(pHba) < 0)
- return -1;
-
- /* In OPERATIONAL state */
- return 0;
-}
-
-static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
-{
- u32 __iomem *msg;
- ulong timeout = jiffies + 5*HZ;
-
- while(m == EMPTY_QUEUE){
- rmb();
- m = readl(pHba->post_port);
- if(m != EMPTY_QUEUE){
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
- return 2;
- }
- schedule_timeout_uninterruptible(1);
- }
- msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
- writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
- writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
- writel( 0,&msg[2]);
- wmb();
-
- writel(m, pHba->post_port);
- wmb();
- return 0;
-}
-
-static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
-{
- u8 *status;
- dma_addr_t addr;
- u32 __iomem *msg = NULL;
- int i;
- ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
- u32 m;
-
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
-
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m == EMPTY_QUEUE);
-
- msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
-
- status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
- if (!status) {
- adpt_send_nop(pHba, m);
- printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
- pHba->name);
- return -ENOMEM;
- }
-
- writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
- writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
- writel(0, &msg[2]);
- writel(0x0106, &msg[3]); /* Transaction context */
- writel(4096, &msg[4]); /* Host page frame size */
- writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
- writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
- writel((u32)addr, &msg[7]);
-
- writel(m, pHba->post_port);
- wmb();
-
- // Wait for the reply status to come back
- do {
- if (*status) {
- if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
- break;
- }
- }
- rmb();
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we
- cannot free these because controller may
- awake and corrupt those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (1);
-
- // If the command was successful, fill the fifo with our reply
- // message packets
- if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
- return -2;
- }
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
-
- if(pHba->reply_pool != NULL) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- pHba->reply_pool, pHba->reply_pool_pa);
- }
-
- pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- &pHba->reply_pool_pa, GFP_KERNEL);
- if (!pHba->reply_pool) {
- printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
- return -ENOMEM;
- }
-
- for(i = 0; i < pHba->reply_fifo_size; i++) {
- writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
- pHba->reply_port);
- wmb();
- }
- adpt_i2o_status_get(pHba);
- return 0;
-}
-
-
-/*
- * I2O System Table. Contains information about
- * all the IOPs in the system. Used to inform IOPs
- * about each other's existence.
- *
- * sys_tbl_ver is the CurrentChangeIndicator that is
- * used by IOPs to track changes.
- */
-
-
-
-static s32 adpt_i2o_status_get(adpt_hba* pHba)
-{
- ulong timeout;
- u32 m;
- u32 __iomem *msg;
- u8 *status_block=NULL;
-
- if(pHba->status_block == NULL) {
- pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(i2o_status_block),
- &pHba->status_block_pa, GFP_KERNEL);
- if(pHba->status_block == NULL) {
- printk(KERN_ERR
- "dpti%d: Get Status Block failed; Out of memory. \n",
- pHba->unit);
- return -ENOMEM;
- }
- }
- memset(pHba->status_block, 0, sizeof(i2o_status_block));
- status_block = (u8*)(pHba->status_block);
- timeout = jiffies+TMOUT_GETSTATUS*HZ;
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s: Timeout waiting for message !\n",
- pHba->name);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m==EMPTY_QUEUE);
-
-
- msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
-
- writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
- writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
- writel(1, &msg[2]);
- writel(0, &msg[3]);
- writel(0, &msg[4]);
- writel(0, &msg[5]);
- writel( dma_low(pHba->status_block_pa), &msg[6]);
- writel( dma_high(pHba->status_block_pa), &msg[7]);
- writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
-
- //post message
- writel(m, pHba->post_port);
- wmb();
-
- while(status_block[87]!=0xff){
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR"dpti%d: Get status timeout.\n",
- pHba->unit);
- return -ETIMEDOUT;
- }
- rmb();
- schedule_timeout_uninterruptible(1);
- }
-
- // Set up our number of outbound and inbound messages
- pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
- if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
- pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
- }
-
- pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
- if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
- pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
- }
-
- // Calculate the Scatter Gather list size
- if (dpt_dma64(pHba)) {
- pHba->sg_tablesize
- = ((pHba->status_block->inbound_frame_size * 4
- - 14 * sizeof(u32))
- / (sizeof(struct sg_simple_element) + sizeof(u32)));
- } else {
- pHba->sg_tablesize
- = ((pHba->status_block->inbound_frame_size * 4
- - 12 * sizeof(u32))
- / sizeof(struct sg_simple_element));
- }
- if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
- pHba->sg_tablesize = SG_LIST_ELEMENTS;
- }
-
-
-#ifdef DEBUG
- printk("dpti%d: State = ",pHba->unit);
- switch(pHba->status_block->iop_state) {
- case 0x01:
- printk("INIT\n");
- break;
- case 0x02:
- printk("RESET\n");
- break;
- case 0x04:
- printk("HOLD\n");
- break;
- case 0x05:
- printk("READY\n");
- break;
- case 0x08:
- printk("OPERATIONAL\n");
- break;
- case 0x10:
- printk("FAILED\n");
- break;
- case 0x11:
- printk("FAULTED\n");
- break;
- default:
- printk("%x (unknown!!)\n",pHba->status_block->iop_state);
- }
-#endif
- return 0;
-}
-
-/*
- * Get the IOP's Logical Configuration Table
- */
-static int adpt_i2o_lct_get(adpt_hba* pHba)
-{
- u32 msg[8];
- int ret;
- u32 buf[16];
-
- if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
- pHba->lct_size = pHba->status_block->expected_lct_size;
- }
- do {
- if (pHba->lct == NULL) {
- pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
- pHba->lct_size, &pHba->lct_pa,
- GFP_ATOMIC);
- if(pHba->lct == NULL) {
- printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- }
- memset(pHba->lct, 0, pHba->lct_size);
-
- msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
- msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = 0xFFFFFFFF; /* All devices */
- msg[5] = 0x00000000; /* Report now */
- msg[6] = 0xD0000000|pHba->lct_size;
- msg[7] = (u32)pHba->lct_pa;
-
- if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
- printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
- pHba->name, ret);
- printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
- return ret;
- }
-
- if ((pHba->lct->table_size << 2) > pHba->lct_size) {
- pHba->lct_size = pHba->lct->table_size << 2;
- dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
- pHba->lct, pHba->lct_pa);
- pHba->lct = NULL;
- }
- } while (pHba->lct == NULL);
-
- PDEBUG("%s: Hardware resource table read.\n", pHba->name);
-
-
- // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
- pHba->FwDebugBufferSize = buf[1];
- pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
- pHba->FwDebugBufferSize);
- if (pHba->FwDebugBuffer_P) {
- pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_FLAGS_OFFSET;
- pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_BLED_OFFSET;
- pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
- pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_STR_LENGTH_OFFSET;
- pHba->FwDebugBuffer_P += buf[2];
- pHba->FwDebugFlags = 0;
- }
- }
-
- return 0;
-}
-
-static int adpt_i2o_build_sys_table(void)
-{
- adpt_hba* pHba = hba_chain;
- int count = 0;
-
- if (sys_tbl)
- dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
- sys_tbl, sys_tbl_pa);
-
- sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
- (hba_count) * sizeof(struct i2o_sys_tbl_entry);
-
- sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
- sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
- if (!sys_tbl) {
- printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
- return -ENOMEM;
- }
-
- sys_tbl->num_entries = hba_count;
- sys_tbl->version = I2OVERSION;
- sys_tbl->change_ind = sys_tbl_ind++;
-
- for(pHba = hba_chain; pHba; pHba = pHba->next) {
- u64 addr;
- // Get updated Status Block so we have the latest information
- if (adpt_i2o_status_get(pHba)) {
- sys_tbl->num_entries--;
- continue; // try next one
- }
-
- sys_tbl->iops[count].org_id = pHba->status_block->org_id;
- sys_tbl->iops[count].iop_id = pHba->unit + 2;
- sys_tbl->iops[count].seg_num = 0;
- sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
- sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
- sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
- sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
- sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
- sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
- addr = pHba->base_addr_phys + 0x40;
- sys_tbl->iops[count].inbound_low = dma_low(addr);
- sys_tbl->iops[count].inbound_high = dma_high(addr);
-
- count++;
- }
-
-#ifdef DEBUG
-{
- u32 *table = (u32*)sys_tbl;
- printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
- for(count = 0; count < (sys_tbl_len >>2); count++) {
- printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
- count, table[count]);
- }
-}
-#endif
-
- return 0;
-}
-
-
-/*
- * Dump the information block associated with a given unit (TID)
- */
-
-static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
-{
- char buf[64];
- int unit = d->lct_data.tid;
-
- printk(KERN_INFO "TID %3.3d ", unit);
-
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
- {
- buf[16]=0;
- printk(" Vendor: %-12.12s", buf);
- }
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
- {
- buf[16]=0;
- printk(" Device: %-12.12s", buf);
- }
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
- {
- buf[8]=0;
- printk(" Rev: %-12.12s\n", buf);
- }
-#ifdef DEBUG
- printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
- printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
- printk(KERN_INFO "\tFlags: ");
-
- if(d->lct_data.device_flags&(1<<0))
- printk("C"); // ConfigDialog requested
- if(d->lct_data.device_flags&(1<<1))
- printk("U"); // Multi-user capable
- if(!(d->lct_data.device_flags&(1<<4)))
- printk("P"); // Peer service enabled!
- if(!(d->lct_data.device_flags&(1<<5)))
- printk("M"); // Mgmt service enabled!
- printk("\n");
-#endif
-}
-
-#ifdef DEBUG
-/*
- * Do i2o class name lookup
- */
-static const char *adpt_i2o_get_class_name(int class)
-{
- int idx = 16;
- static char *i2o_class_name[] = {
- "Executive",
- "Device Driver Module",
- "Block Device",
- "Tape Device",
- "LAN Interface",
- "WAN Interface",
- "Fibre Channel Port",
- "Fibre Channel Device",
- "SCSI Device",
- "ATE Port",
- "ATE Device",
- "Floppy Controller",
- "Floppy Device",
- "Secondary Bus Port",
- "Peer Transport Agent",
- "Peer Transport",
- "Unknown"
- };
-
- switch(class&0xFFF) {
- case I2O_CLASS_EXECUTIVE:
- idx = 0; break;
- case I2O_CLASS_DDM:
- idx = 1; break;
- case I2O_CLASS_RANDOM_BLOCK_STORAGE:
- idx = 2; break;
- case I2O_CLASS_SEQUENTIAL_STORAGE:
- idx = 3; break;
- case I2O_CLASS_LAN:
- idx = 4; break;
- case I2O_CLASS_WAN:
- idx = 5; break;
- case I2O_CLASS_FIBRE_CHANNEL_PORT:
- idx = 6; break;
- case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
- idx = 7; break;
- case I2O_CLASS_SCSI_PERIPHERAL:
- idx = 8; break;
- case I2O_CLASS_ATE_PORT:
- idx = 9; break;
- case I2O_CLASS_ATE_PERIPHERAL:
- idx = 10; break;
- case I2O_CLASS_FLOPPY_CONTROLLER:
- idx = 11; break;
- case I2O_CLASS_FLOPPY_DEVICE:
- idx = 12; break;
- case I2O_CLASS_BUS_ADAPTER_PORT:
- idx = 13; break;
- case I2O_CLASS_PEER_TRANSPORT_AGENT:
- idx = 14; break;
- case I2O_CLASS_PEER_TRANSPORT:
- idx = 15; break;
- }
- return i2o_class_name[idx];
-}
-#endif
-
-
-static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
-{
- u32 msg[6];
- int ret, size = sizeof(i2o_hrt);
-
- do {
- if (pHba->hrt == NULL) {
- pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
- size, &pHba->hrt_pa, GFP_KERNEL);
- if (pHba->hrt == NULL) {
- printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
- return -ENOMEM;
- }
- }
-
- msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
- msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2]= 0;
- msg[3]= 0;
- msg[4]= (0xD0000000 | size); /* Simple transaction */
- msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
-
- if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
- printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
- return ret;
- }
-
- if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
- int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
- dma_free_coherent(&pHba->pDev->dev, size,
- pHba->hrt, pHba->hrt_pa);
- size = newsize;
- pHba->hrt = NULL;
- }
- } while(pHba->hrt == NULL);
- return 0;
-}
-
-/*
- * Query one scalar group value or a whole scalar group.
- */
-static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
- int group, int field, void *buf, int buflen)
-{
- u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
- u8 *opblk_va;
- dma_addr_t opblk_pa;
- u8 *resblk_va;
- dma_addr_t resblk_pa;
-
- int size;
-
- /* 8 bytes for header */
- resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
- if (resblk_va == NULL) {
- printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
- return -ENOMEM;
- }
-
- opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(opblk), &opblk_pa, GFP_KERNEL);
- if (opblk_va == NULL) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- if (field == -1) /* whole group */
- opblk[4] = -1;
-
- memcpy(opblk_va, opblk, sizeof(opblk));
- size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
- opblk_va, opblk_pa, sizeof(opblk),
- resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
- dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
- if (size == -ETIME) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
- return -ETIME;
- } else if (size == -EINTR) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
- return -EINTR;
- }
-
- memcpy(buf, resblk_va+8, buflen); /* cut off header */
-
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- if (size < 0)
- return size;
-
- return buflen;
-}
-
-
-/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
- *
- * This function can be used for all UtilParamsGet/Set operations.
- * The OperationBlock is given in opblk-buffer,
- * and results are returned in resblk-buffer.
- * Note that the minimum sized resblk is 8 bytes and contains
- * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
- */
-static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk_va, dma_addr_t opblk_pa, int oplen,
- void *resblk_va, dma_addr_t resblk_pa, int reslen)
-{
- u32 msg[9];
- u32 *res = (u32 *)resblk_va;
- int wait_status;
-
- msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
- msg[1] = cmd << 24 | HOST_TID << 12 | tid;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = 0;
- msg[5] = 0x54000000 | oplen; /* OperationBlock */
- msg[6] = (u32)opblk_pa;
- msg[7] = 0xD0000000 | reslen; /* ResultBlock */
- msg[8] = (u32)resblk_pa;
-
- if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
- printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
- return wait_status; /* -DetailedStatus */
- }
-
- if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
- printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
- "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
- pHba->name,
- (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
- : "PARAMS_GET",
- res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
- return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
- }
-
- return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
-}
-
-
-static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
-{
- u32 msg[4];
- int ret;
-
- adpt_i2o_status_get(pHba);
-
- /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
-
- if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
- (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
- return 0;
- }
-
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
-
- if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
- printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
- pHba->unit, -ret);
- } else {
- printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
- }
-
- adpt_i2o_status_get(pHba);
- return ret;
-}
-
-
-/*
- * Enable IOP. Allows the IOP to resume external operations.
- */
-static int adpt_i2o_enable_hba(adpt_hba* pHba)
-{
- u32 msg[4];
- int ret;
-
- adpt_i2o_status_get(pHba);
- if(!pHba->status_block){
- return -ENOMEM;
- }
- /* Enable only allowed on READY state */
- if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
- return 0;
-
- if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
- return -EINVAL;
-
- msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2]= 0;
- msg[3]= 0;
-
- if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
- printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
- pHba->name, ret);
- } else {
- PDEBUG("%s: Enabled.\n", pHba->name);
- }
-
- adpt_i2o_status_get(pHba);
- return ret;
-}
-
-
-static int adpt_i2o_systab_send(adpt_hba* pHba)
-{
- u32 msg[12];
- int ret;
-
- msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
- msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
- msg[5] = 0; /* Segment 0 */
-
- /*
- * Provide three SGL-elements:
- * System table (SysTab), Private memory space declaration and
- * Private i/o space declaration
- */
- msg[6] = 0x54000000 | sys_tbl_len;
- msg[7] = (u32)sys_tbl_pa;
- msg[8] = 0x54000000 | 0;
- msg[9] = 0;
- msg[10] = 0xD4000000 | 0;
- msg[11] = 0;
-
- if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
- printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
- pHba->name, ret);
- }
-#ifdef DEBUG
- else {
- PINFO("%s: SysTab set.\n", pHba->name);
- }
-#endif
-
- return ret;
-}
-
-
-/*============================================================================
- *
- *============================================================================
- */
-
-
-#ifdef UARTDELAY
-
-static static void adpt_delay(int millisec)
-{
- int i;
- for (i = 0; i < millisec; i++) {
- udelay(1000); /* delay for one millisecond */
- }
-}
-
-#endif
-
-static struct scsi_host_template driver_template = {
- .module = THIS_MODULE,
- .name = "dpt_i2o",
- .proc_name = "dpt_i2o",
- .show_info = adpt_show_info,
- .info = adpt_info,
- .queuecommand = adpt_queue,
- .eh_abort_handler = adpt_abort,
- .eh_device_reset_handler = adpt_device_reset,
- .eh_bus_reset_handler = adpt_bus_reset,
- .eh_host_reset_handler = adpt_reset,
- .bios_param = adpt_bios_param,
- .slave_configure = adpt_slave_configure,
- .can_queue = MAX_TO_IOP_MESSAGES,
- .this_id = 7,
-};
-
-static int __init adpt_init(void)
-{
- int error;
- adpt_hba *pHba, *next;
-
- printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
-
- error = adpt_detect(&driver_template);
- if (error < 0)
- return error;
- if (hba_chain == NULL)
- return -ENODEV;
-
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- error = scsi_add_host(pHba->host, &pHba->pDev->dev);
- if (error)
- goto fail;
- scsi_scan_host(pHba->host);
- }
- return 0;
-fail:
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- scsi_remove_host(pHba->host);
- }
- return error;
-}
-
-static void __exit adpt_exit(void)
-{
- adpt_hba *pHba, *next;
-
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- adpt_release(pHba);
- }
-}
-
-module_init(adpt_init);
-module_exit(adpt_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
deleted file mode 100644
index 8a079e8d7f65..000000000000
--- a/drivers/scsi/dpti.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/***************************************************************************
- dpti.h - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2001 by Adaptec
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-
-#ifndef _DPT_H
-#define _DPT_H
-
-#define MAX_TO_IOP_MESSAGES (255)
-#define MAX_FROM_IOP_MESSAGES (255)
-
-
-/*
- * SCSI interface function Prototypes
- */
-
-static int adpt_detect(struct scsi_host_template * sht);
-static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd);
-static int adpt_abort(struct scsi_cmnd * cmd);
-static int adpt_reset(struct scsi_cmnd* cmd);
-static int adpt_slave_configure(struct scsi_device *);
-
-static const char *adpt_info(struct Scsi_Host *pSHost);
-static int adpt_bios_param(struct scsi_device * sdev, struct block_device *dev,
- sector_t, int geom[]);
-
-static int adpt_bus_reset(struct scsi_cmnd* cmd);
-static int adpt_device_reset(struct scsi_cmnd* cmd);
-
-
-/*
- * struct scsi_host_template (see scsi/scsi_host.h)
- */
-
-#define DPT_DRIVER_NAME "Adaptec I2O RAID"
-
-#ifndef HOSTS_C
-
-#include "dpt/sys_info.h"
-#include <linux/wait.h>
-#include "dpt/dpti_i2o.h"
-#include "dpt/dpti_ioctl.h"
-
-#define DPT_I2O_VERSION "2.4 Build 5go"
-#define DPT_VERSION 2
-#define DPT_REVISION '4'
-#define DPT_SUBREVISION '5'
-#define DPT_BETA ""
-#define DPT_MONTH 8
-#define DPT_DAY 7
-#define DPT_YEAR (2001-1980)
-
-#define DPT_DRIVER "dpt_i2o"
-#define DPTI_I2O_MAJOR (151)
-#define DPT_ORGANIZATION_ID (0x1B) /* For Private Messages */
-#define DPTI_MAX_HBA (16)
-#define MAX_CHANNEL (5) // Maximum Channel # Supported
-#define MAX_ID (128) // Maximum Target ID Supported
-
-/* Sizes in 4 byte words */
-#define REPLY_FRAME_SIZE (17)
-#define MAX_MESSAGE_SIZE (128)
-#define SG_LIST_ELEMENTS (56)
-
-#define EMPTY_QUEUE 0xffffffff
-#define I2O_INTERRUPT_PENDING_B (0x08)
-
-#define PCI_DPT_VENDOR_ID (0x1044) // DPT PCI Vendor ID
-#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID
-#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511)
-
-/* Debugging macro from Linux Device Drivers - Rubini */
-#undef PDEBUG
-#ifdef DEBUG
-//TODO add debug level switch
-# define PDEBUG(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
-# define PDEBUGV(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
-#else
-# define PDEBUG(fmt, args...) /* not debugging: nothing */
-# define PDEBUGV(fmt, args...) /* not debugging: nothing */
-#endif
-
-#define PERROR(fmt, args...) printk(KERN_ERR fmt, ##args)
-#define PWARN(fmt, args...) printk(KERN_WARNING fmt, ##args)
-#define PINFO(fmt, args...) printk(KERN_INFO fmt, ##args)
-#define PCRIT(fmt, args...) printk(KERN_CRIT fmt, ##args)
-
-#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
-
-// Command timeouts
-#define FOREVER (0)
-#define TMOUT_INQUIRY (20)
-#define TMOUT_FLUSH (360/45)
-#define TMOUT_ABORT (30)
-#define TMOUT_SCSI (300)
-#define TMOUT_IOPRESET (360)
-#define TMOUT_GETSTATUS (15)
-#define TMOUT_INITOUTBOUND (15)
-#define TMOUT_LCT (360)
-
-
-#define I2O_SCSI_DEVICE_DSC_MASK 0x00FF
-
-#define I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION 0x000A
-
-#define I2O_SCSI_DSC_MASK 0xFF00
-#define I2O_SCSI_DSC_SUCCESS 0x0000
-#define I2O_SCSI_DSC_REQUEST_ABORTED 0x0200
-#define I2O_SCSI_DSC_UNABLE_TO_ABORT 0x0300
-#define I2O_SCSI_DSC_COMPLETE_WITH_ERROR 0x0400
-#define I2O_SCSI_DSC_ADAPTER_BUSY 0x0500
-#define I2O_SCSI_DSC_REQUEST_INVALID 0x0600
-#define I2O_SCSI_DSC_PATH_INVALID 0x0700
-#define I2O_SCSI_DSC_DEVICE_NOT_PRESENT 0x0800
-#define I2O_SCSI_DSC_UNABLE_TO_TERMINATE 0x0900
-#define I2O_SCSI_DSC_SELECTION_TIMEOUT 0x0A00
-#define I2O_SCSI_DSC_COMMAND_TIMEOUT 0x0B00
-#define I2O_SCSI_DSC_MR_MESSAGE_RECEIVED 0x0D00
-#define I2O_SCSI_DSC_SCSI_BUS_RESET 0x0E00
-#define I2O_SCSI_DSC_PARITY_ERROR_FAILURE 0x0F00
-#define I2O_SCSI_DSC_AUTOSENSE_FAILED 0x1000
-#define I2O_SCSI_DSC_NO_ADAPTER 0x1100
-#define I2O_SCSI_DSC_DATA_OVERRUN 0x1200
-#define I2O_SCSI_DSC_UNEXPECTED_BUS_FREE 0x1300
-#define I2O_SCSI_DSC_SEQUENCE_FAILURE 0x1400
-#define I2O_SCSI_DSC_REQUEST_LENGTH_ERROR 0x1500
-#define I2O_SCSI_DSC_PROVIDE_FAILURE 0x1600
-#define I2O_SCSI_DSC_BDR_MESSAGE_SENT 0x1700
-#define I2O_SCSI_DSC_REQUEST_TERMINATED 0x1800
-#define I2O_SCSI_DSC_IDE_MESSAGE_SENT 0x3300
-#define I2O_SCSI_DSC_RESOURCE_UNAVAILABLE 0x3400
-#define I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT 0x3500
-#define I2O_SCSI_DSC_MESSAGE_RECEIVED 0x3600
-#define I2O_SCSI_DSC_INVALID_CDB 0x3700
-#define I2O_SCSI_DSC_LUN_INVALID 0x3800
-#define I2O_SCSI_DSC_SCSI_TID_INVALID 0x3900
-#define I2O_SCSI_DSC_FUNCTION_UNAVAILABLE 0x3A00
-#define I2O_SCSI_DSC_NO_NEXUS 0x3B00
-#define I2O_SCSI_DSC_SCSI_IID_INVALID 0x3C00
-#define I2O_SCSI_DSC_CDB_RECEIVED 0x3D00
-#define I2O_SCSI_DSC_LUN_ALREADY_ENABLED 0x3E00
-#define I2O_SCSI_DSC_BUS_BUSY 0x3F00
-#define I2O_SCSI_DSC_QUEUE_FROZEN 0x4000
-
-
-#ifndef TRUE
-#define TRUE 1
-#define FALSE 0
-#endif
-
-#define HBA_FLAGS_INSTALLED_B 0x00000001 // Adapter Was Installed
-#define HBA_FLAGS_BLINKLED_B 0x00000002 // Adapter In Blink LED State
-#define HBA_FLAGS_IN_RESET 0x00000040 /* in reset */
-#define HBA_HOSTRESET_FAILED 0x00000080 /* adpt_resethost failed */
-
-
-// Device state flags
-#define DPTI_DEV_ONLINE 0x00
-#define DPTI_DEV_UNSCANNED 0x01
-#define DPTI_DEV_RESET 0x02
-#define DPTI_DEV_OFFLINE 0x04
-
-
-struct adpt_device {
- struct adpt_device* next_lun;
- u32 flags;
- u32 type;
- u32 capacity;
- u32 block_size;
- u8 scsi_channel;
- u8 scsi_id;
- u64 scsi_lun;
- u8 state;
- u16 tid;
- struct i2o_device* pI2o_dev;
- struct scsi_device *pScsi_dev;
-};
-
-struct adpt_channel {
- struct adpt_device* device[MAX_ID]; /* used as an array of 128 scsi ids */
- u8 scsi_id;
- u8 type;
- u16 tid;
- u32 state;
- struct i2o_device* pI2o_dev;
-};
-
-// HBA state flags
-#define DPTI_STATE_RESET (0x01)
-
-typedef struct _adpt_hba {
- struct _adpt_hba *next;
- struct pci_dev *pDev;
- struct Scsi_Host *host;
- u32 state;
- spinlock_t state_lock;
- int unit;
- int host_no; /* SCSI host number */
- u8 initialized;
- u8 in_use; /* is the management node open*/
-
- char name[32];
- char detail[55];
-
- void __iomem *base_addr_virt;
- void __iomem *msg_addr_virt;
- ulong base_addr_phys;
- void __iomem *post_port;
- void __iomem *reply_port;
- void __iomem *irq_mask;
- u16 post_count;
- u32 post_fifo_size;
- u32 reply_fifo_size;
- u32* reply_pool;
- dma_addr_t reply_pool_pa;
- u32 sg_tablesize; // Scatter/Gather List Size.
- u8 top_scsi_channel;
- u8 top_scsi_id;
- u64 top_scsi_lun;
- u8 dma64;
-
- i2o_status_block* status_block;
- dma_addr_t status_block_pa;
- i2o_hrt* hrt;
- dma_addr_t hrt_pa;
- i2o_lct* lct;
- dma_addr_t lct_pa;
- uint lct_size;
- struct i2o_device* devices;
- struct adpt_channel channel[MAX_CHANNEL];
- struct proc_dir_entry* proc_entry; /* /proc dir */
-
- void __iomem *FwDebugBuffer_P; // Virtual Address Of FW Debug Buffer
- u32 FwDebugBufferSize; // FW Debug Buffer Size In Bytes
- void __iomem *FwDebugStrLength_P;// Virtual Addr Of FW Debug String Len
- void __iomem *FwDebugFlags_P; // Virtual Address Of FW Debug Flags
- void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
- void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
- u32 FwDebugFlags;
- u32 *ioctl_reply_context[4];
-} adpt_hba;
-
-struct sg_simple_element {
- u32 flag_count;
- u32 addr_bus;
-};
-
-/*
- * Function Prototypes
- */
-
-static void adpt_i2o_sys_shutdown(void);
-static int adpt_init(void);
-static int adpt_i2o_build_sys_table(void);
-static irqreturn_t adpt_isr(int irq, void *dev_id);
-
-static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d);
-static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
- int group, int field, void *buf, int buflen);
-#ifdef DEBUG
-static const char *adpt_i2o_get_class_name(int class);
-#endif
-static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk, dma_addr_t opblk_pa, int oplen,
- void *resblk, dma_addr_t resblk_pa, int reslen);
-static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
-static int adpt_i2o_lct_get(adpt_hba* pHba);
-static int adpt_i2o_parse_lct(adpt_hba* pHba);
-static int adpt_i2o_activate_hba(adpt_hba* pHba);
-static int adpt_i2o_enable_hba(adpt_hba* pHba);
-static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d);
-static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len);
-static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba);
-static s32 adpt_i2o_status_get(adpt_hba* pHba);
-static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
-static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
-static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd);
-static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
-static s32 adpt_hba_reset(adpt_hba* pHba);
-static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
-static s32 adpt_rescan(adpt_hba* pHba);
-static s32 adpt_i2o_reparse_lct(adpt_hba* pHba);
-static s32 adpt_send_nop(adpt_hba*pHba,u32 m);
-static void adpt_i2o_delete_hba(adpt_hba* pHba);
-static void adpt_inquiry(adpt_hba* pHba);
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun);
-static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
-static int adpt_i2o_online_hba(adpt_hba* pHba);
-static void adpt_i2o_post_wait_complete(u32, int);
-static int adpt_i2o_systab_send(adpt_hba* pHba);
-
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg);
-static int adpt_open(struct inode *inode, struct file *file);
-static int adpt_close(struct inode *inode, struct file *file);
-
-
-#ifdef UARTDELAY
-static void adpt_delay(int millisec);
-#endif
-
-#define PRINT_BUFFER_SIZE 512
-
-#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags
-#define HBA_FLAGS_DBG_KERNEL_PRINT_B 0x00010000 // Kernel Debugger Print
-#define HBA_FLAGS_DBG_FW_PRINT_B 0x00020000 // Firmware Debugger Print
-#define HBA_FLAGS_DBG_FUNCTION_ENTRY_B 0x00040000 // Function Entry Point
-#define HBA_FLAGS_DBG_FUNCTION_EXIT_B 0x00080000 // Function Exit
-#define HBA_FLAGS_DBG_ERROR_B 0x00100000 // Error Conditions
-#define HBA_FLAGS_DBG_INIT_B 0x00200000 // Init Prints
-#define HBA_FLAGS_DBG_OS_COMMANDS_B 0x00400000 // OS Command Info
-#define HBA_FLAGS_DBG_SCAN_B 0x00800000 // Device Scan
-
-#define FW_DEBUG_STR_LENGTH_OFFSET 0
-#define FW_DEBUG_FLAGS_OFFSET 4
-#define FW_DEBUG_BLED_OFFSET 8
-
-#define FW_DEBUG_FLAGS_NO_HEADERS_B 0x01
-#endif /* !HOSTS_C */
-#endif /* _DPT_H */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index c2a59109857a..6ec296321ffc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1488,7 +1488,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
fh = fc_frame_header_get(fp);
skb = fp_skb(fp);
- wlen = skb->len / FCOE_WORD_TO_BYTE;
if (!lport->link_up) {
kfree_skb(skb);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 9eba910aaa86..1077110ab273 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -544,6 +544,39 @@ static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
}
+static int fnic_scsi_drv_init(struct fnic *fnic)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+
+ /* Configure maximum outstanding IO reqs*/
+ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
+ host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
+ max_t(u32, FNIC_MIN_IO_REQ,
+ fnic->config.io_throttle_count));
+
+ fnic->fnic_max_tag_id = host->can_queue;
+ host->max_lun = fnic->config.luns_per_tgt;
+ host->max_id = FNIC_MAX_FCP_TARGET;
+ host->max_cmd_len = FCOE_MAX_CMD_LEN;
+
+ host->nr_hw_queues = fnic->wq_copy_count;
+ if (host->nr_hw_queues > 1)
+ shost_printk(KERN_ERR, host,
+ "fnic: blk-mq is not supported");
+
+ host->nr_hw_queues = fnic->wq_copy_count = 1;
+
+ shost_printk(KERN_INFO, host,
+ "fnic: can_queue: %d max_lun: %llu",
+ host->can_queue, host->max_lun);
+
+ shost_printk(KERN_INFO, host,
+ "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
+ host->max_id, host->max_cmd_len, host->nr_hw_queues);
+
+ return 0;
+}
+
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct Scsi_Host *host;
@@ -684,17 +717,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close;
}
- /* Configure Maximum Outstanding IO reqs*/
- if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
- host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
- max_t(u32, FNIC_MIN_IO_REQ,
- fnic->config.io_throttle_count));
- }
- fnic->fnic_max_tag_id = host->can_queue;
-
- host->max_lun = fnic->config.luns_per_tgt;
- host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FCOE_MAX_CMD_LEN;
+ fnic_scsi_drv_init(fnic);
fnic_get_res_counts(fnic);
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 2f6c56aabe1d..7d56a236a011 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -26,8 +26,12 @@
struct gvp11_hostdata {
struct WD33C93_hostdata wh;
struct gvp11_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define TO_DMA_MASK(m) (~((unsigned long long)m & 0xffffffff))
+
static irqreturn_t gvp11_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -54,17 +58,33 @@ void gvp11_setup(char *str, int *ints)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
unsigned short cntr = GVP11_DMAC_INT_ENABLE;
- unsigned long addr = virt_to_bus(scsi_pointer->ptr);
+ dma_addr_t addr;
int bank_mask;
static int scsi_alloc_out_of_range = 0;
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
+
/* use bounce buffer if the physical address is bad */
if (addr & wh->dma_xfer_mask) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
+
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
if (!scsi_alloc_out_of_range) {
@@ -87,10 +107,32 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
- /* check if the address of the bounce buffer is OK */
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
+ /* will flush/invalidate cache for us */
+ addr = dma_map_single(hdata->dev,
+ wh->dma_bounce_buffer,
+ wh->dma_bounce_len,
+ DMA_DIR(dir_in));
+ /* can't map buffer; use PIO */
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev,
+ "cannot map bounce buffer %p\n",
+ wh->dma_bounce_buffer);
+ return 1;
+ }
+ }
if (addr & wh->dma_xfer_mask) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
/* fall back to Chip RAM if address out of range */
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
kfree(wh->dma_bounce_buffer);
@@ -108,15 +150,19 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+ /* chip RAM can be mapped to phys. address directly */
+ addr = virt_to_phys(wh->dma_bounce_buffer);
+ /* no need to flush/invalidate cache */
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
+ /* finally, have OK mapping (punted for PIO else) */
+ scsi_pointer->dma_handle = addr;
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
- scsi_pointer->this_residual);
- }
}
/* setup dma direction */
@@ -129,13 +175,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, scsi_pointer->this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, scsi_pointer->this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
if (bank_mask)
@@ -161,6 +201,11 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* remove write bit from CONTROL bits */
regs->CNTR = GVP11_DMAC_INT_ENABLE;
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir && SCpnt)
@@ -287,6 +332,13 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
default_dma_xfer_mask = ent->driver_data;
+ if (dma_set_mask_and_coherent(&z->dev,
+ TO_DMA_MASK(default_dma_xfer_mask))) {
+ dev_warn(&z->dev, "cannot use DMA mask %llx\n",
+ TO_DMA_MASK(default_dma_xfer_mask));
+ return -ENODEV;
+ }
+
/*
* Rumors state that some GVP ram boards use the same product
* code as the SCSI controllers. Therefore if the board-size
@@ -327,9 +379,16 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
- if (gvp11_xfer_mask)
+ if (gvp11_xfer_mask) {
hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
- else
+ if (dma_set_mask_and_coherent(&z->dev,
+ TO_DMA_MASK(gvp11_xfer_mask))) {
+ dev_warn(&z->dev, "cannot use DMA mask %llx\n",
+ TO_DMA_MASK(gvp11_xfer_mask));
+ error = -ENODEV;
+ goto fail_check_or_alloc;
+ }
+ } else
hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
hdata->wh.no_sync = 0xff;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 764e859d0106..33af5b8dede2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -219,10 +219,15 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->lldd_task = NULL;
if (!sas_protocol_ata(task->task_proto)) {
- if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
+ if (slot->n_elem) {
+ if (task->task_proto & SAS_PROTOCOL_SSP)
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
+ task->data_dir);
+ else
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ }
if (slot->n_elem_dif) {
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
@@ -269,28 +274,23 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
}
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
- struct sas_task *task, int n_elem,
- int n_elem_req)
+ struct sas_task *task, int n_elem)
{
struct device *dev = hisi_hba->dev;
- if (!sas_protocol_ata(task->task_proto)) {
+ if (!sas_protocol_ata(task->task_proto) && n_elem) {
if (task->num_scatter) {
- if (n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(dev, task->scatter, task->num_scatter,
+ task->data_dir);
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
- if (n_elem_req)
- dma_unmap_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
}
}
}
static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
- struct sas_task *task, int *n_elem,
- int *n_elem_req)
+ struct sas_task *task, int *n_elem)
{
struct device *dev = hisi_hba->dev;
int rc;
@@ -308,9 +308,9 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
goto prep_out;
}
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
- *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
- if (!*n_elem_req) {
+ *n_elem = dma_map_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ if (!*n_elem) {
rc = -ENOMEM;
goto prep_out;
}
@@ -332,8 +332,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
err_out_dma_unmap:
/* It would be better to call dma_unmap_sg() here, but it's messy */
- hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
- *n_elem_req);
+ hisi_sas_dma_unmap(hisi_hba, task, *n_elem);
prep_out:
return rc;
}
@@ -457,7 +456,7 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
- int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
+ int n_elem = 0, n_elem_dif = 0;
struct domain_device *device = task->dev;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -568,8 +567,7 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
return -EINVAL;
}
- rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
- &n_elem_req);
+ rc = hisi_sas_dma_map(hisi_hba, task, &n_elem);
if (rc < 0)
goto prep_out;
@@ -605,8 +603,7 @@ err_out_dif_dma_unmap:
if (!sas_protocol_ata(task->task_proto))
hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
err_out_dma_unmap:
- hisi_sas_dma_unmap(hisi_hba, task, n_elem,
- n_elem_req);
+ hisi_sas_dma_unmap(hisi_hba, task, n_elem);
prep_out:
dev_err(dev, "task exec: failed[%d]!\n", rc);
return rc;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 4582791def32..349546bacb2b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1282,8 +1282,6 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 455d49299ddf..70e401fd432a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -805,8 +805,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
return -SAS_QUEUE_FULL;
}
/*
- * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
- */
+ * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
+ */
if (sata_dev ^ (start & 1))
break;
start++;
@@ -2428,8 +2428,6 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index eb86afb21aab..efe8c5be5870 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -481,6 +481,9 @@ struct hisi_sas_err_record_v3 {
#define RX_DATA_LEN_UNDERFLOW_OFF 6
#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
+#define RX_FIS_STATUS_ERR_OFF 0
+#define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF)
+
#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
#define HISI_SAS_MSI_COUNT_V3_HW 32
@@ -2161,6 +2164,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
hisi_sas_status_buf_addr_mem(slot);
u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
+ u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type);
u32 dw3 = le32_to_cpu(complete_hdr->dw3);
switch (task->task_proto) {
@@ -2188,7 +2192,10 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ if ((complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
+ (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
+ ts->stat = SAS_PROTO_RESPONSE;
+ } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
@@ -2311,8 +2318,6 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
@@ -2778,16 +2783,13 @@ static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
static int slave_configure_v3_hw(struct scsi_device *sdev)
{
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
- struct domain_device *ddev = sdev_to_domain_dev(sdev);
struct hisi_hba *hisi_hba = shost_priv(shost);
+ int ret = hisi_sas_slave_configure(sdev);
struct device *dev = hisi_hba->dev;
- int ret = sas_slave_configure(sdev);
unsigned int max_sectors;
if (ret)
return ret;
- if (!dev_is_sata(ddev))
- sas_change_queue_depth(sdev, 64);
if (sdev->type == TYPE_ENCLOSURE)
return 0;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 315c7ac730e9..0738238ed6cc 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -190,6 +190,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
transport_unregister_device(&shost->shost_gendev);
device_unregister(&shost->shost_dev);
device_del(&shost->shost_gendev);
+
+ /*
+ * After scsi_remove_host() has returned the scsi LLD module can be
+ * unloaded and/or the host resources can be released. Hence wait until
+ * the dependent SCSI targets and devices are gone before returning.
+ */
+ wait_event(shost->targets_wq, atomic_read(&shost->target_count) == 0);
+
+ scsi_mq_destroy_tags(shost);
}
EXPORT_SYMBOL(scsi_remove_host);
@@ -236,6 +245,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
shost->dma_dev = dma_dev;
+ if (dma_dev->dma_mask) {
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
error = scsi_mq_setup_tags(shost);
if (error)
goto fail;
@@ -295,8 +309,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
return error;
/*
- * Any host allocation in this function will be freed in
- * scsi_host_dev_release().
+ * Any resources associated with the SCSI host in this function except
+ * the tag set will be freed by scsi_host_dev_release().
*/
out_del_dev:
device_del(&shost->shost_dev);
@@ -312,6 +326,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
+ scsi_mq_destroy_tags(shost);
fail:
return error;
}
@@ -345,12 +360,9 @@ static void scsi_host_dev_release(struct device *dev)
kfree(dev_name(&shost->shost_dev));
}
- if (shost->tag_set.tags)
- scsi_mq_destroy_tags(shost);
-
kfree(shost->shost_data);
- ida_simple_remove(&host_index_ida, shost->host_no);
+ ida_free(&host_index_ida, shost->host_no);
if (shost->shost_state != SHOST_CREATED)
put_device(parent);
@@ -394,8 +406,9 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
+ init_waitqueue_head(&shost->targets_wq);
- index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
+ index = ida_alloc(&host_index_ida, GFP_KERNEL);
if (index < 0) {
kfree(shost);
return NULL;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 9fee70d6434a..29b1bd755afe 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -52,6 +52,10 @@ static struct iscsi_transport iscsi_sw_tcp_transport;
static unsigned int iscsi_max_lun = ~0;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+static bool iscsi_recv_from_iscsi_q;
+module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644);
+MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
+
static int iscsi_sw_tcp_dbg;
module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
S_IRUGO | S_IWUSR);
@@ -122,20 +126,13 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
return 0;
}
-static void iscsi_sw_tcp_data_ready(struct sock *sk)
+static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
{
- struct iscsi_conn *conn;
- struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
read_descriptor_t rd_desc;
- read_lock_bh(&sk->sk_callback_lock);
- conn = sk->sk_user_data;
- if (!conn) {
- read_unlock_bh(&sk->sk_callback_lock);
- return;
- }
- tcp_conn = conn->dd_data;
-
/*
* Use rd_desc to pass 'conn' to iscsi_tcp_recv.
* We set count to 1 because we want the network layer to
@@ -144,13 +141,48 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
*/
rd_desc.arg.data = conn;
rd_desc.count = 1;
- tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
- iscsi_sw_sk_state_check(sk);
+ tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
/* If we had to (atomically) map a highmem page,
* unmap it now. */
iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+
+ iscsi_sw_sk_state_check(sk);
+}
+
+static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
+{
+ struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
+ recvwork);
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
+
+ lock_sock(sk);
+ iscsi_sw_tcp_recv_data(conn);
+ release_sock(sk);
+}
+
+static void iscsi_sw_tcp_data_ready(struct sock *sk)
+{
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_conn *conn;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ if (tcp_sw_conn->queue_recv)
+ iscsi_conn_queue_recv(conn);
+ else
+ iscsi_sw_tcp_recv_data(conn);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -205,7 +237,7 @@ static void iscsi_sw_tcp_write_space(struct sock *sk)
old_write_space(sk);
ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
@@ -274,7 +306,10 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
copy = segment->size - offset;
if (segment->total_copied + segment->size < segment->total_size)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
+
+ if (tcp_sw_conn->queue_recv)
+ flags |= MSG_DONTWAIT;
/* Use sendpage if we can; else fall back to sendmsg */
if (!segment->data) {
@@ -557,6 +592,8 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
conn = cls_conn->dd_data;
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
+ INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
+ tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
@@ -610,6 +647,8 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
+ iscsi_suspend_rx(conn);
+
spin_lock_bh(&session->frwd_lock);
tcp_sw_conn->sock = NULL;
spin_unlock_bh(&session->frwd_lock);
@@ -898,7 +937,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
remove_session:
iscsi_session_teardown(cls_session);
remove_host:
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
free_host:
iscsi_host_free(shost);
return NULL;
@@ -915,7 +954,7 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
iscsi_host_free(shost);
}
@@ -1003,7 +1042,6 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
.slave_configure = iscsi_sw_tcp_slave_configure,
- .target_alloc = iscsi_target_alloc,
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 791453195099..850a018aefb9 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -28,6 +28,8 @@ struct iscsi_sw_tcp_send {
struct iscsi_sw_tcp_conn {
struct socket *sock;
+ struct work_struct recvwork;
+ bool queue_recv;
struct iscsi_sw_tcp_send out;
/* old values for socket callbacks */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 797abf4f5399..d95f4bcdeb2e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -83,7 +83,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
"%s " dbg_fmt, __func__, ##arg); \
} while (0);
-inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
+#define ISCSI_CMD_COMPL_WAIT 5
+
+inline void iscsi_conn_queue_xmit(struct iscsi_conn *conn)
{
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
@@ -91,7 +93,17 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
if (ihost->workq)
queue_work(ihost->workq, &conn->xmitwork);
}
-EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_xmit);
+
+inline void iscsi_conn_queue_recv(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ if (ihost->workq && !test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))
+ queue_work(ihost->workq, &conn->recvwork);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_recv);
static void __iscsi_update_cmdsn(struct iscsi_session *session,
uint32_t exp_cmdsn, uint32_t max_cmdsn)
@@ -472,12 +484,18 @@ static void iscsi_free_task(struct iscsi_task *task)
}
}
-void __iscsi_get_task(struct iscsi_task *task)
+bool iscsi_get_task(struct iscsi_task *task)
{
- refcount_inc(&task->refcount);
+ return refcount_inc_not_zero(&task->refcount);
}
-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+EXPORT_SYMBOL_GPL(iscsi_get_task);
+/**
+ * __iscsi_put_task - drop the refcount on a task
+ * @task: iscsi_task to drop the refcount on
+ *
+ * The back_lock must be held when calling in case it frees the task.
+ */
void __iscsi_put_task(struct iscsi_task *task)
{
if (refcount_dec_and_test(&task->refcount))
@@ -489,10 +507,11 @@ void iscsi_put_task(struct iscsi_task *task)
{
struct iscsi_session *session = task->conn->session;
- /* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
+ if (refcount_dec_and_test(&task->refcount)) {
+ spin_lock_bh(&session->back_lock);
+ iscsi_free_task(task);
+ spin_unlock_bh(&session->back_lock);
+ }
}
EXPORT_SYMBOL_GPL(iscsi_put_task);
@@ -557,16 +576,19 @@ static bool cleanup_queued_task(struct iscsi_task *task)
struct iscsi_conn *conn = task->conn;
bool early_complete = false;
- /* Bad target might have completed task while it was still running */
+ /*
+ * We might have raced where we handled a R2T early and got a response
+ * but have not yet taken the task off the requeue list, then a TMF or
+ * recovery happened and so we can still see it here.
+ */
if (task->state == ISCSI_TASK_COMPLETED)
early_complete = true;
if (!list_empty(&task->running)) {
list_del_init(&task->running);
/*
- * If it's on a list but still running, this could be from
- * a bad target sending a rsp early, cleanup from a TMF, or
- * session recovery.
+ * If it's on a list but still running this could be cleanup
+ * from a TMF or session recovery.
*/
if (task->state == ISCSI_TASK_RUNNING ||
task->state == ISCSI_TASK_COMPLETED)
@@ -587,20 +609,17 @@ static bool cleanup_queued_task(struct iscsi_task *task)
}
/*
- * session frwd lock must be held and if not called for a task that is still
- * pending or from the xmit thread, then xmit thread must be suspended
+ * session back and frwd lock must be held and if not called for a task that
+ * is still pending or from the xmit thread, then xmit thread must be suspended
*/
-static void fail_scsi_task(struct iscsi_task *task, int err)
+static void __fail_scsi_task(struct iscsi_task *task, int err)
{
struct iscsi_conn *conn = task->conn;
struct scsi_cmnd *sc;
int state;
- spin_lock_bh(&conn->session->back_lock);
- if (cleanup_queued_task(task)) {
- spin_unlock_bh(&conn->session->back_lock);
+ if (cleanup_queued_task(task))
return;
- }
if (task->state == ISCSI_TASK_PENDING) {
/*
@@ -619,7 +638,15 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
sc->result = err << 16;
scsi_set_resid(sc, scsi_bufflen(sc));
iscsi_complete_task(task, state);
- spin_unlock_bh(&conn->session->back_lock);
+}
+
+static void fail_scsi_task(struct iscsi_task *task, int err)
+{
+ struct iscsi_session *session = task->conn->session;
+
+ spin_lock_bh(&session->back_lock);
+ __fail_scsi_task(task, err);
+ spin_unlock_bh(&session->back_lock);
}
static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -668,12 +695,18 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
return 0;
}
+/**
+ * iscsi_alloc_mgmt_task - allocate and setup a mgmt task.
+ * @conn: iscsi conn that the task will be sent on.
+ * @hdr: iscsi pdu that will be sent.
+ * @data: buffer for data segment if needed.
+ * @data_size: length of data in bytes.
+ */
static struct iscsi_task *
-__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+iscsi_alloc_mgmt_task(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct iscsi_session *session = conn->session;
- struct iscsi_host *ihost = shost_priv(session->host);
uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
struct iscsi_task *task;
itt_t itt;
@@ -754,28 +787,57 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task->conn->session->age);
}
- if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
- WRITE_ONCE(conn->ping_task, task);
+ return task;
+
+free_task:
+ iscsi_put_task(task);
+ return NULL;
+}
+
+/**
+ * iscsi_send_mgmt_task - Send task created with iscsi_alloc_mgmt_task.
+ * @task: iscsi task to send.
+ *
+ * On failure this returns a non-zero error code, and the driver must free
+ * the task with iscsi_put_task;
+ */
+static int iscsi_send_mgmt_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_host *ihost = shost_priv(conn->session->host);
+ int rc = 0;
if (!ihost->workq) {
- if (iscsi_prep_mgmt_task(conn, task))
- goto free_task;
+ rc = iscsi_prep_mgmt_task(conn, task);
+ if (rc)
+ return rc;
- if (session->tt->xmit_task(task))
- goto free_task;
+ rc = session->tt->xmit_task(task);
+ if (rc)
+ return rc;
} else {
list_add_tail(&task->running, &conn->mgmtqueue);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
- return task;
+ return 0;
+}
-free_task:
- /* regular RX path uses back_lock */
- spin_lock(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock(&session->back_lock);
- return NULL;
+static int __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+{
+ struct iscsi_task *task;
+ int rc;
+
+ task = iscsi_alloc_mgmt_task(conn, hdr, data, data_size);
+ if (!task)
+ return -ENOMEM;
+
+ rc = iscsi_send_mgmt_task(task);
+ if (rc)
+ iscsi_put_task(task);
+ return rc;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -786,7 +848,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
int err = 0;
spin_lock_bh(&session->frwd_lock);
- if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ if (__iscsi_conn_send_pdu(conn, hdr, data, data_size))
err = -EPERM;
spin_unlock_bh(&session->frwd_lock);
return err;
@@ -959,7 +1021,6 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
if (!rhdr) {
if (READ_ONCE(conn->ping_task))
return -EINVAL;
- WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
}
memset(&hdr, 0, sizeof(struct iscsi_nopout));
@@ -973,10 +1034,18 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
} else
hdr.ttt = RESERVED_ITT;
- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
- if (!task) {
+ task = iscsi_alloc_mgmt_task(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+ if (!task)
+ return -ENOMEM;
+
+ if (!rhdr)
+ WRITE_ONCE(conn->ping_task, task);
+
+ if (iscsi_send_mgmt_task(task)) {
if (!rhdr)
WRITE_ONCE(conn->ping_task, NULL);
+ iscsi_put_task(task);
+
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
return -EIO;
} else if (!rhdr) {
@@ -1434,11 +1503,17 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
{
int rc;
- spin_lock_bh(&conn->session->back_lock);
-
if (!conn->task) {
- /* Take a ref so we can access it after xmit_task() */
- __iscsi_get_task(task);
+ /*
+ * Take a ref so we can access it after xmit_task().
+ *
+ * This should never fail because the failure paths will have
+ * stopped the xmit thread.
+ */
+ if (!iscsi_get_task(task)) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
} else {
/* Already have a ref from when we failed to send it last call */
conn->task = NULL;
@@ -1449,7 +1524,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* case a bad target sends a cmd rsp before we have handled the task.
*/
if (was_requeue)
- __iscsi_put_task(task);
+ iscsi_put_task(task);
/*
* Do this after dropping the extra ref because if this was a requeue
@@ -1461,10 +1536,8 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* task and get woken up again.
*/
conn->task = task;
- spin_unlock_bh(&conn->session->back_lock);
return -ENODATA;
}
- spin_unlock_bh(&conn->session->back_lock);
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
@@ -1472,20 +1545,16 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
if (!rc) {
/* done with this task */
task->last_xfer = jiffies;
- }
- /* regular RX path uses back_lock */
- spin_lock(&conn->session->back_lock);
- if (rc && task->state == ISCSI_TASK_RUNNING) {
+ } else {
/*
* get an extra ref that is released next time we access it
* as conn->task above.
*/
- __iscsi_get_task(task);
+ iscsi_get_task(task);
conn->task = task;
}
- __iscsi_put_task(task);
- spin_unlock(&conn->session->back_lock);
+ iscsi_put_task(task);
return rc;
}
@@ -1513,7 +1582,7 @@ void iscsi_requeue_task(struct iscsi_task *task)
*/
iscsi_put_task(task);
}
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1567,6 +1636,28 @@ check_mgmt:
goto done;
}
+check_requeue:
+ while (!list_empty(&conn->requeue)) {
+ /*
+ * we always do fastlogout - conn stop code will clean up.
+ */
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+ task = list_entry(conn->requeue.next, struct iscsi_task,
+ running);
+
+ if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
+ break;
+
+ list_del_init(&task->running);
+ rc = iscsi_xmit_task(conn, task, true);
+ if (rc)
+ goto done;
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
+
/* process pending command queue */
while (!list_empty(&conn->cmdqueue)) {
task = list_entry(conn->cmdqueue.next, struct iscsi_task,
@@ -1594,28 +1685,10 @@ check_mgmt:
*/
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
+ if (!list_empty(&conn->requeue))
+ goto check_requeue;
}
- while (!list_empty(&conn->requeue)) {
- /*
- * we always do fastlogout - conn stop code will clean up.
- */
- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
- break;
-
- task = list_entry(conn->requeue.next, struct iscsi_task,
- running);
-
- if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
- break;
-
- list_del_init(&task->running);
- rc = iscsi_xmit_task(conn, task, true);
- if (rc)
- goto done;
- if (!list_empty(&conn->mgmtqueue))
- goto check_mgmt;
- }
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1782,7 +1855,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
}
} else {
list_add_tail(&task->running, &conn->cmdqueue);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
session->queued_cmdsn++;
@@ -1843,11 +1916,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
__must_hold(&session->frwd_lock)
{
struct iscsi_session *session = conn->session;
- struct iscsi_task *task;
- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
- NULL, 0);
- if (!task) {
+ if (__iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0)) {
spin_unlock_bh(&session->frwd_lock);
iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
@@ -1895,6 +1965,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
struct iscsi_task *task;
int i;
+restart_cmd_loop:
spin_lock_bh(&session->back_lock);
for (i = 0; i < session->cmds_max; i++) {
task = session->cmds[i];
@@ -1903,22 +1974,25 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
if (lun != -1 && lun != task->sc->device->lun)
continue;
-
- __iscsi_get_task(task);
- spin_unlock_bh(&session->back_lock);
+ /*
+ * The cmd is completing but if this is called from an eh
+ * callout path then when we return scsi-ml owns the cmd. Wait
+ * for the completion path to finish freeing the cmd.
+ */
+ if (!iscsi_get_task(task)) {
+ spin_unlock_bh(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ udelay(ISCSI_CMD_COMPL_WAIT);
+ spin_lock_bh(&session->frwd_lock);
+ goto restart_cmd_loop;
+ }
ISCSI_DBG_SESSION(session,
"failing sc %p itt 0x%x state %d\n",
task->sc, task->itt, task->state);
- fail_scsi_task(task, error);
-
- spin_unlock_bh(&session->frwd_lock);
- iscsi_put_task(task);
- spin_lock_bh(&session->frwd_lock);
-
- spin_lock_bh(&session->back_lock);
+ __fail_scsi_task(task, error);
+ __iscsi_put_task(task);
}
-
spin_unlock_bh(&session->back_lock);
}
@@ -1943,7 +2017,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
/**
* iscsi_suspend_tx - suspend iscsi_data_xmit
- * @conn: iscsi conn tp stop processing IO on.
+ * @conn: iscsi conn to stop processing IO on.
*
* This function sets the suspend bit to prevent iscsi_data_xmit
* from sending new IO, and if work is queued on the xmit thread
@@ -1956,15 +2030,30 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
if (ihost->workq)
- flush_workqueue(ihost->workq);
+ flush_work(&conn->xmitwork);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
+}
+
+/**
+ * iscsi_suspend_rx - Prevent recvwork from running again.
+ * @conn: iscsi conn to stop.
+ */
+void iscsi_suspend_rx(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ if (ihost->workq)
+ flush_work(&conn->recvwork);
}
+EXPORT_SYMBOL_GPL(iscsi_suspend_rx);
/*
* We want to make sure a ping is in flight. It has timed out.
@@ -2008,7 +2097,16 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
spin_unlock(&session->back_lock);
goto done;
}
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ /*
+ * Racing with the completion path right now, so give it more
+ * time so that path can complete it like normal.
+ */
+ rc = BLK_EH_RESET_TIMER;
+ task = NULL;
+ spin_unlock(&session->back_lock);
+ goto done;
+ }
spin_unlock(&session->back_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -2257,6 +2355,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
+completion_check:
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
/*
@@ -2296,13 +2395,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
+ if (!iscsi_get_task(task)) {
+ spin_unlock(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ /* We are just about to call iscsi_free_task so wait for it. */
+ udelay(ISCSI_CMD_COMPL_WAIT);
+ goto completion_check;
+ }
+
+ ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
conn = session->leadconn;
iscsi_get_conn(conn->cls_conn);
conn->eh_abort_cnt++;
age = session->age;
-
- ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
- __iscsi_get_task(task);
spin_unlock(&session->back_lock);
if (task->state == ISCSI_TASK_PENDING) {
@@ -2828,11 +2934,12 @@ static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
/**
* iscsi_host_remove - remove host and sessions
* @shost: scsi host
+ * @is_shutdown: true if called from a driver shutdown callout
*
* If there are any sessions left, this will initiate the removal and wait
* for the completion.
*/
-void iscsi_host_remove(struct Scsi_Host *shost)
+void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown)
{
struct iscsi_host *ihost = shost_priv(shost);
unsigned long flags;
@@ -2841,7 +2948,11 @@ void iscsi_host_remove(struct Scsi_Host *shost)
ihost->state = ISCSI_HOST_REMOVED;
spin_unlock_irqrestore(&ihost->lock, flags);
- iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ if (!is_shutdown)
+ iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ else
+ iscsi_host_for_each_session(shost, iscsi_force_destroy_session);
+
wait_event_interruptible(ihost->session_removal_wq,
ihost->num_sessions == 0);
if (signal_pending(current))
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 883005757ddb..c182aa83f2c9 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -558,7 +558,11 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
return 0;
}
task->last_xfer = jiffies;
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ spin_unlock(&session->back_lock);
+ /* Let the path that got the early rsp complete it */
+ return 0;
+ }
tcp_conn = conn->dd_data;
rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 260e735d06fa..fa2209080cc2 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -175,13 +175,13 @@ static enum sas_device_type to_dev_type(struct discover_resp *dr)
return dr->attached_dev_type;
}
-static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
+static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
+ struct smp_disc_resp *disc_resp)
{
enum sas_device_type dev_type;
enum sas_linkrate linkrate;
u8 sas_addr[SAS_ADDR_SIZE];
- struct smp_resp *resp = rsp;
- struct discover_resp *dr = &resp->disc;
+ struct discover_resp *dr = &disc_resp->disc;
struct sas_ha_struct *ha = dev->port->ha;
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
@@ -198,7 +198,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
BUG_ON(!phy->phy);
}
- switch (resp->result) {
+ switch (disc_resp->result) {
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
break;
@@ -347,12 +347,13 @@ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
}
#define DISCOVER_REQ_SIZE 16
-#define DISCOVER_RESP_SIZE 56
+#define DISCOVER_RESP_SIZE sizeof(struct smp_disc_resp)
static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
- u8 *disc_resp, int single)
+ struct smp_disc_resp *disc_resp,
+ int single)
{
- struct discover_resp *dr;
+ struct discover_resp *dr = &disc_resp->disc;
int res;
disc_req[9] = single;
@@ -361,7 +362,6 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
return res;
- dr = &((struct smp_resp *)disc_resp)->disc;
if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
pr_notice("Found loopback topology, just ignore it!\n");
return 0;
@@ -375,7 +375,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
struct expander_device *ex = &dev->ex_dev;
int res = 0;
u8 *disc_req;
- u8 *disc_resp;
+ struct smp_disc_resp *disc_resp;
disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
if (!disc_req)
@@ -429,27 +429,14 @@ static int sas_expander_discover(struct domain_device *dev)
#define MAX_EXPANDER_PHYS 128
-static void ex_assign_report_general(struct domain_device *dev,
- struct smp_resp *resp)
-{
- struct report_general_resp *rg = &resp->rg;
-
- dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
- dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
- dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
- dev->ex_dev.t2t_supp = rg->t2t_supp;
- dev->ex_dev.conf_route_table = rg->conf_route_table;
- dev->ex_dev.configuring = rg->configuring;
- memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
-}
-
#define RG_REQ_SIZE 8
-#define RG_RESP_SIZE 32
+#define RG_RESP_SIZE sizeof(struct smp_rg_resp)
static int sas_ex_general(struct domain_device *dev)
{
u8 *rg_req;
- struct smp_resp *rg_resp;
+ struct smp_rg_resp *rg_resp;
+ struct report_general_resp *rg;
int res;
int i;
@@ -480,7 +467,15 @@ static int sas_ex_general(struct domain_device *dev)
goto out;
}
- ex_assign_report_general(dev, rg_resp);
+ rg = &rg_resp->rg;
+ dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
+ dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
+ dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
+ dev->ex_dev.t2t_supp = rg->t2t_supp;
+ dev->ex_dev.conf_route_table = rg->conf_route_table;
+ dev->ex_dev.configuring = rg->configuring;
+ memcpy(dev->ex_dev.enclosure_logical_id,
+ rg->enclosure_logical_id, 8);
if (dev->ex_dev.configuring) {
pr_debug("RG: ex %016llx self-configuring...\n",
@@ -681,10 +676,10 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
#ifdef CONFIG_SCSI_SAS_ATA
#define RPS_REQ_SIZE 16
-#define RPS_RESP_SIZE 60
+#define RPS_RESP_SIZE sizeof(struct smp_rps_resp)
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
- struct smp_resp *rps_resp)
+ struct smp_rps_resp *rps_resp)
{
int res;
u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
@@ -1657,7 +1652,7 @@ out_err:
/* ---------- Domain revalidation ---------- */
static int sas_get_phy_discover(struct domain_device *dev,
- int phy_id, struct smp_resp *disc_resp)
+ int phy_id, struct smp_disc_resp *disc_resp)
{
int res;
u8 *disc_req;
@@ -1673,10 +1668,8 @@ static int sas_get_phy_discover(struct domain_device *dev,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
goto out;
- else if (disc_resp->result != SMP_RESP_FUNC_ACC) {
+ if (disc_resp->result != SMP_RESP_FUNC_ACC)
res = disc_resp->result;
- goto out;
- }
out:
kfree(disc_req);
return res;
@@ -1686,7 +1679,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
int phy_id, int *pcc)
{
int res;
- struct smp_resp *disc_resp;
+ struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
@@ -1704,19 +1697,17 @@ static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_device_type *type)
{
int res;
- struct smp_resp *disc_resp;
- struct discover_resp *dr;
+ struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
return -ENOMEM;
- dr = &disc_resp->disc;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
if (res == 0) {
memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
SAS_ADDR_SIZE);
- *type = to_dev_type(dr);
+ *type = to_dev_type(&disc_resp->disc);
if (*type == 0)
memset(sas_addr, 0, SAS_ADDR_SIZE);
}
@@ -1760,7 +1751,7 @@ static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
{
int res;
u8 *rg_req;
- struct smp_resp *rg_resp;
+ struct smp_rg_resp *rg_resp;
rg_req = alloc_smp_req(RG_REQ_SIZE);
if (!rg_req)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index dc35f0f8eae3..e4f77072a58d 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -531,6 +531,7 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
if (!d)
return -ENOMEM;
+ pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->reset_result = 0;
@@ -544,6 +545,7 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
if (rc == 0)
rc = d->reset_result;
mutex_unlock(&d->event_lock);
+ pm_runtime_put_sync(ha->dev);
return rc;
}
@@ -558,6 +560,7 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
if (!d)
return -ENOMEM;
+ pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->enable_result = 0;
@@ -571,6 +574,7 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
if (rc == 0)
rc = d->enable_result;
mutex_unlock(&d->event_lock);
+ pm_runtime_put_sync(ha->dev);
return rc;
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 13d0ffaada93..8d0ad3abc7b5 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -83,7 +83,7 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
int sas_ex_phy_discover(struct domain_device *dev, int single);
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
- struct smp_resp *rps_resp);
+ struct smp_rps_resp *rps_resp);
int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index da9070cdad91..e6a083d098a1 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -48,9 +48,6 @@ struct lpfc_sli2_slim;
the NameServer before giving up. */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
-#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
- cmnd for menlo needs nearly twice as for firmware
- downloads using bsg */
#define LPFC_DEFAULT_XPSGL_SIZE 256
#define LPFC_MAX_SG_TABLESIZE 0xffff
@@ -604,7 +601,6 @@ struct lpfc_vport {
#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
-#define FC_RSCN_MEMENTO 0x4000000/* RSCN cmd processed */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -987,7 +983,8 @@ struct lpfc_hba {
u8 last_seq, u8 cr_cx_cmd);
void (*__lpfc_sli_prep_abort_xri)(struct lpfc_iocbq *cmdiocbq,
u16 ulp_context, u16 iotag,
- u8 ulp_class, u16 cqid, bool ia);
+ u8 ulp_class, u16 cqid, bool ia,
+ bool wqec);
/* expedite pool */
struct lpfc_epd_pool epd_pool;
@@ -1439,8 +1436,6 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
- int wait_4_mlo_maint_flg;
- wait_queue_head_t wait_4_mlo_m_q;
/* data structure used for latency data collection */
#define LPFC_NO_BUCKET 0
#define LPFC_LINEAR_BUCKET 1
@@ -1475,8 +1470,6 @@ struct lpfc_hba {
/* RAS Support */
struct lpfc_ras_fwlog ras_fwlog;
- uint8_t menlo_flag; /* menlo generic flags */
-#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
uint32_t iocb_cnt;
uint32_t iocb_max;
atomic_t sdev_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3caaa7c4af48..09cf2cd0ae60 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -922,25 +922,6 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
- * @dev: class converted to a Scsi_host structure.
- * @attr: device attribute, not used.
- * @buf: on return contains the Menlo Maintenance sli flag.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- (phba->sli.sli_flag & LPFC_MENLO_MAINT));
-}
-
-/**
* lpfc_vportnum_show - Return the port number in ascii of the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
@@ -1109,10 +1090,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
"Unknown\n");
break;
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- len += scnprintf(buf + len, PAGE_SIZE-len,
- " Menlo Maint Mode\n");
- else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
len += scnprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
@@ -2827,7 +2805,6 @@ static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
-static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
static DEVICE_ATTR_RO(lpfc_drvr_version);
static DEVICE_ATTR_RO(lpfc_enable_fip);
@@ -6220,7 +6197,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_option_rom_version.attr,
&dev_attr_link_state.attr,
&dev_attr_num_discovered_ports.attr,
- &dev_attr_menlo_mgmt_mode.attr,
&dev_attr_lpfc_drvr_version.attr,
&dev_attr_lpfc_enable_fip.attr,
&dev_attr_lpfc_temp_sensor.attr,
@@ -7396,7 +7372,6 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
case PCI_DEVICE_ID_LANCER_FCOE:
case PCI_DEVICE_ID_LANCER_FCOE_VF:
case PCI_DEVICE_ID_ZEPHYR_DCSP:
- case PCI_DEVICE_ID_HORNET:
case PCI_DEVICE_ID_TIGERSHARK:
case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 676e7d54b97a..9be3bb01a8ec 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -88,17 +88,9 @@ struct lpfc_bsg_mbox {
uint32_t outExtWLen; /* from app */
};
-#define MENLO_DID 0x0000FC0E
-
-struct lpfc_bsg_menlo {
- struct lpfc_iocbq *cmdiocbq;
- struct lpfc_dmabuf *rmp;
-};
-
#define TYPE_EVT 1
#define TYPE_IOCB 2
#define TYPE_MBOX 3
-#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
struct bsg_job *set_job; /* job waiting for this iocb to finish */
@@ -106,7 +98,6 @@ struct bsg_job_data {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
struct lpfc_bsg_mbox mbox;
- struct lpfc_bsg_menlo menlo;
} context_un;
};
@@ -3502,15 +3493,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
"1226 mbox: set_variable 0x%x, 0x%x\n",
mb->un.varWords[0],
mb->un.varWords[1]);
- if ((mb->un.varWords[0] == SETVAR_MLOMNT)
- && (mb->un.varWords[1] == 1)) {
- phba->wait_4_mlo_maint_flg = 1;
- } else if (mb->un.varWords[0] == SETVAR_MLORST) {
- spin_lock_irq(&phba->hbalock);
- phba->link_flag &= ~LS_LOOPBACK_MODE;
- spin_unlock_irq(&phba->hbalock);
- phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
- }
break;
case MBX_READ_SPARM64:
case MBX_REG_LOGIN:
@@ -4992,283 +4974,6 @@ lpfc_bsg_mbox_cmd(struct bsg_job *job)
return rc;
}
-/**
- * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
- * @phba: Pointer to HBA context object.
- * @cmdiocbq: Pointer to command iocb.
- * @rspiocbq: Pointer to response iocb.
- *
- * This function is the completion handler for iocbs issued using
- * lpfc_menlo_cmd function. This function is called by the
- * ring event handler function without any lock held. This function
- * can be called from both worker thread context and interrupt
- * context. This function also can be called from another thread which
- * cleans up the SLI layer objects.
- * This function copies the contents of the response iocb to the
- * response iocb memory object provided by the caller of
- * lpfc_sli_issue_iocb_wait and then wakes up the thread which
- * sleeps for the iocb completion.
- **/
-static void
-lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
- struct lpfc_iocbq *cmdiocbq,
- struct lpfc_iocbq *rspiocbq)
-{
- struct bsg_job_data *dd_data;
- struct bsg_job *job;
- struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
- struct lpfc_dmabuf *bmp, *cmp, *rmp;
- struct lpfc_bsg_menlo *menlo;
- unsigned long flags;
- struct menlo_response *menlo_resp;
- unsigned int rsp_size;
- int rc = 0;
-
- dd_data = cmdiocbq->context_un.dd_data;
- cmp = cmdiocbq->cmd_dmabuf;
- bmp = cmdiocbq->bpl_dmabuf;
- menlo = &dd_data->context_un.menlo;
- rmp = menlo->rmp;
- rsp = &rspiocbq->iocb;
-
- /* Determine if job has been aborted */
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
- job = dd_data->set_job;
- if (job) {
- bsg_reply = job->reply;
- /* Prevent timeout handling from trying to abort job */
- job->dd_data = NULL;
- }
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- /* Copy the job data or set the failing status for the job */
-
- if (job) {
- /* always return the xri, this would be used in the case
- * of a menlo download to allow the data to be sent as a
- * continuation of the exchange.
- */
-
- menlo_resp = (struct menlo_response *)
- bsg_reply->reply_data.vendor_reply.vendor_rsp;
- menlo_resp->xri = rsp->ulpContext;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
- rc = -EACCES;
- break;
- }
- } else {
- rc = -EACCES;
- }
- } else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
- bsg_reply->reply_payload_rcv_len =
- lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
- }
-
- }
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
- lpfc_free_bsg_buffers(phba, cmp);
- lpfc_free_bsg_buffers(phba, rmp);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
- kfree(dd_data);
-
- /* Complete the job if active */
-
- if (job) {
- bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
- }
-
- return;
-}
-
-/**
- * lpfc_menlo_cmd - send an ioctl for menlo hardware
- * @job: fc_bsg_job to handle
- *
- * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
- * all the command completions will return the xri for the command.
- * For menlo data requests a gen request 64 CX is used to continue the exchange
- * supplied in the menlo request header xri field.
- **/
-static int
-lpfc_menlo_cmd(struct bsg_job *job)
-{
- struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
- struct fc_bsg_request *bsg_request = job->request;
- struct fc_bsg_reply *bsg_reply = job->reply;
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *cmdiocbq;
- IOCB_t *cmd;
- int rc = 0;
- struct menlo_command *menlo_cmd;
- struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
- struct bsg_job_data *dd_data;
- struct ulp_bde64 *bpl = NULL;
-
- /* in case no data is returned return just the return code */
- bsg_reply->reply_payload_rcv_len = 0;
-
- if (job->request_len <
- sizeof(struct fc_bsg_request) +
- sizeof(struct menlo_command)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2784 Received MENLO_CMD request below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (job->reply_len < sizeof(*bsg_reply) +
- sizeof(struct menlo_response)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2785 Received MENLO_CMD reply below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2786 Adapter does not support menlo "
- "commands\n");
- rc = -EPERM;
- goto no_dd_data;
- }
-
- menlo_cmd = (struct menlo_command *)
- bsg_request->rqst_data.h_vendor.vendor_cmd;
-
- /* allocate our bsg tracking structure */
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2787 Failed allocation of dd_data\n");
- rc = -ENOMEM;
- goto no_dd_data;
- }
-
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp) {
- rc = -ENOMEM;
- goto free_dd;
- }
-
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
- if (!bmp->virt) {
- rc = -ENOMEM;
- goto free_bmp;
- }
-
- INIT_LIST_HEAD(&bmp->list);
-
- bpl = (struct ulp_bde64 *)bmp->virt;
- request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
- cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
- 1, bpl, &request_nseg);
- if (!cmp) {
- rc = -ENOMEM;
- goto free_bmp;
- }
- lpfc_bsg_copy_data(cmp, &job->request_payload,
- job->request_payload.payload_len, 1);
-
- bpl += request_nseg;
- reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
- rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
- bpl, &reply_nseg);
- if (!rmp) {
- rc = -ENOMEM;
- goto free_cmp;
- }
-
- cmdiocbq = lpfc_sli_get_iocbq(phba);
- if (!cmdiocbq) {
- rc = -ENOMEM;
- goto free_rmp;
- }
-
- cmd = &cmdiocbq->iocb;
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
- cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
- cmd->ulpBdeCount = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpOwner = OWN_CHIP;
- cmd->ulpLe = 1; /* Limited Edition */
- cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->vport = phba->pport;
- /* We want the firmware to timeout before we do */
- cmd->ulpTimeout = MENLO_TIMEOUT - 5;
- cmdiocbq->cmd_cmpl = lpfc_bsg_menlo_cmd_cmp;
- cmdiocbq->context_un.dd_data = dd_data;
- cmdiocbq->cmd_dmabuf = cmp;
- cmdiocbq->bpl_dmabuf = bmp;
- if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->ulpPU = MENLO_PU; /* 3 */
- cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
- cmd->ulpContext = MENLO_CONTEXT; /* 0 */
- } else {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
- cmd->ulpPU = 1;
- cmd->un.ulpWord[4] = 0;
- cmd->ulpContext = menlo_cmd->xri;
- }
-
- dd_data->type = TYPE_MENLO;
- dd_data->set_job = job;
- dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
- dd_data->context_un.menlo.rmp = rmp;
- job->dd_data = dd_data;
-
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
- MENLO_TIMEOUT - 5);
- if (rc == IOCB_SUCCESS)
- return 0; /* done for now */
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
-
-free_rmp:
- lpfc_free_bsg_buffers(phba, rmp);
-free_cmp:
- lpfc_free_bsg_buffers(phba, cmp);
-free_bmp:
- if (bmp->virt)
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
-free_dd:
- kfree(dd_data);
-no_dd_data:
- /* make error code available to userspace */
- bsg_reply->result = rc;
- job->dd_data = NULL;
- return rc;
-}
-
static int
lpfc_forced_link_speed(struct bsg_job *job)
{
@@ -5823,10 +5528,6 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_MBOX:
rc = lpfc_bsg_mbox_cmd(job);
break;
- case LPFC_BSG_VENDOR_MENLO_CMD:
- case LPFC_BSG_VENDOR_MENLO_DATA:
- rc = lpfc_menlo_cmd(job);
- break;
case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
rc = lpfc_forced_link_speed(job);
break;
@@ -5979,31 +5680,6 @@ lpfc_bsg_timeout(struct bsg_job *job)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
- case TYPE_MENLO:
- /* Check to see if IOCB was issued to the port or not. If not,
- * remove it from the txq queue and call cancel iocbs.
- * Otherwise, call abort iotag.
- */
- cmdiocb = dd_data->context_un.menlo.cmdiocbq;
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- spin_lock_irqsave(&phba->hbalock, flags);
- list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
- list) {
- if (check_iocb == cmdiocb) {
- list_move_tail(&check_iocb->list, &completions);
- break;
- }
- }
- if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- if (!list_empty(&completions)) {
- lpfc_sli_cancel_iocbs(phba, &completions,
- IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
- }
- break;
default:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 749d6c43cfce..3c04ca2d7455 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -33,8 +33,6 @@
#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
@@ -131,16 +129,6 @@ struct dfc_mbox_req {
uint32_t extSeqNum;
};
-/* Used for menlo command or menlo data. The xri is only used for menlo data */
-struct menlo_command {
- uint32_t cmd;
- uint32_t xri;
-};
-
-struct menlo_response {
- uint32_t xri; /* return the xri of the iocb exchange */
-};
-
/*
* macros and data structures for handling sli-config mailbox command
* pass-through support, this header file is shared between user and
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f5d74958b664..bcad91204328 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -370,7 +370,7 @@ void lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba,
u8 cr_cx_cmd);
void lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
- bool ia);
+ bool ia, bool wqec);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 7b24c932e812..5037ea09a810 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2607,8 +2607,8 @@ lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_multixri_pool *multixri_pool;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2688,8 +2688,8 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
if (!phba->targetport)
return -ENXIO;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2826,8 +2826,8 @@ lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 63)
- nbytes = 63;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -3060,8 +3060,8 @@ lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
char *pbuf;
int i;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 3fababb7c181..9e69de9eb992 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1790,18 +1790,20 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Move this back to NPR state */
if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
- /* The new_ndlp is replacing ndlp totally, so we need
- * to put ndlp on UNUSED list and try to free it.
+ /* The ndlp doesn't have a portname yet, but does have an
+ * NPort ID. The new_ndlp portname matches the Rport's
+ * portname. Reinstantiate the new_ndlp and reset the ndlp.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3179 PLOGI confirm NEW: %x %x\n",
new_ndlp->nlp_DID, keepDID);
/* Two ndlps cannot have the same did on the nodelist.
- * Note: for this case, ndlp has a NULL WWPN so setting
- * the nlp_fc4_type isn't required.
+ * The KeepDID and keep_nlp_fc4_type need to be swapped
+ * because ndlp is inflight with no WWPN.
*/
ndlp->nlp_DID = keepDID;
+ ndlp->nlp_fc4_type = keep_nlp_fc4_type;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
@@ -1816,9 +1818,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
lpfc_unreg_rpi(vport, ndlp);
- /* Two ndlps cannot have the same did and the fc4
- * type must be transferred because the ndlp is in
- * flight.
+ /* The ndlp and new_ndlp both have WWPNs but are swapping
+ * NPort Ids and attributes.
*/
ndlp->nlp_DID = keepDID;
ndlp->nlp_fc4_type = keep_nlp_fc4_type;
@@ -1886,7 +1887,6 @@ lpfc_end_rscn(struct lpfc_vport *vport)
else {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_RSCN_MODE;
- vport->fc_flag |= FC_RSCN_MEMENTO;
spin_unlock_irq(shost->host_lock);
}
}
@@ -2434,14 +2434,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
u32 local_nlp_type, elscmd;
/*
- * If discovery was kicked off from RSCN mode,
- * the FC4 types supported from a
+ * If we are in RSCN mode, the FC4 types supported from a
* previous GFT_ID command may not be accurate. So, if we
* are a NVME Initiator, always look for the possibility of
* the remote NPort beng a NVME Target.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- vport->fc_flag & (FC_RSCN_MODE | FC_RSCN_MEMENTO) &&
+ vport->fc_flag & FC_RSCN_MODE &&
vport->nvmei_support)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
@@ -4571,15 +4570,6 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOSTAT_LOCAL_REJECT:
switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_LOOP_OPEN_FAILURE:
- if (cmd == ELS_CMD_FLOGI) {
- if (PCI_DEVICE_ID_HORNET ==
- phba->pcidev->device) {
- phba->fc_topology = LPFC_TOPOLOGY_LOOP;
- phba->pport->fc_myDID = 0;
- phba->alpa_map[0] = 0;
- phba->alpa_map[1] = 0;
- }
- }
if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
delay = 1000;
retry = 1;
@@ -7915,7 +7905,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
vport->fc_flag |= FC_RSCN_MODE;
- vport->fc_flag &= ~FC_RSCN_MEMENTO;
spin_unlock_irq(shost->host_lock);
if (rscn_cnt) {
cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
@@ -7965,7 +7954,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_MODE;
- vport->fc_flag &= ~FC_RSCN_MEMENTO;
spin_unlock_irq(shost->host_lock);
vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
/* Indicate we are done walking fc_rscn_id_list on this vport */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index fb36f26170e4..2645def612e6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1354,8 +1354,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MEMENTO | FC_RSCN_MODE |
- FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
@@ -3763,18 +3762,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
- if (phba->sli_rev < LPFC_SLI_REV4) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (bf_get(lpfc_mbx_read_top_mm, la))
- phba->sli.sli_flag |= LPFC_MENLO_MAINT;
- else
- phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
phba->link_events++;
- if ((attn_type == LPFC_ATT_LINK_UP) &&
- !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
+ if (attn_type == LPFC_ATT_LINK_UP) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3788,15 +3777,13 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1303 Link Up Event x%x received "
- "Data: x%x x%x x%x x%x x%x x%x %d\n",
+ "Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
bf_get(lpfc_mbx_read_top_alpa_granted,
la),
bf_get(lpfc_mbx_read_top_link_spd, la),
phba->alpa_map[0],
- bf_get(lpfc_mbx_read_top_mm, la),
- bf_get(lpfc_mbx_read_top_fa, la),
- phba->wait_4_mlo_maint_flg);
+ bf_get(lpfc_mbx_read_top_fa, la));
}
lpfc_mbx_process_link_up(phba, la);
@@ -3816,58 +3803,25 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1313 Link Down Unexpected FA WWPN Event x%x "
- "received Data: x%x x%x x%x x%x x%x\n",
+ "received Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
lpfc_mbx_issue_link_down(phba);
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
- attn_type == LPFC_ATT_LINK_UP) {
- if (phba->link_state != LPFC_LINK_DOWN) {
- phba->fc_stat.LinkDown++;
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1312 Link Down Event x%x received "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- lpfc_mbx_issue_link_down(phba);
- } else
- lpfc_enable_la(phba);
-
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1310 Menlo Maint Mode Link up Event x%x rcvd "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- /*
- * The cmnd that triggered this will be waiting for this
- * signal.
- */
- /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
- if (phba->wait_4_mlo_maint_flg) {
- phba->wait_4_mlo_maint_flg = 0;
- wake_up_interruptible(&phba->wait_4_mlo_m_q);
- }
- }
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- bf_get(lpfc_mbx_read_top_fa, la)) {
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- lpfc_issue_clear_la(phba, vport);
+ bf_get(lpfc_mbx_read_top_fa, la))
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"1311 fa %d\n",
bf_get(lpfc_mbx_read_top_fa, la));
- }
lpfc_mbx_cmpl_read_topology_free_mbuf:
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7b8cf678abb5..071983e2cdfe 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1728,7 +1728,6 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
#define PCI_DEVICE_ID_ZEPHYR 0xfe00
-#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
@@ -1773,7 +1772,6 @@ struct lpfc_fdmi_reg_portattr {
#define ZEPHYR_JEDEC_ID 0x0577
#define VIPER_JEDEC_ID 0x4838
#define SATURN_JEDEC_ID 0x1004
-#define HORNET_JDEC_ID 0x2057706D
#define JEDEC_ID_MASK 0x0FFFF000
#define JEDEC_ID_SHIFT 12
@@ -3074,7 +3072,6 @@ struct lpfc_mbx_read_top {
#define lpfc_mbx_read_top_topology_WORD word3
#define LPFC_TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
#define LPFC_TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
-#define LPFC_TOPOLOGY_MM 0x05 /* maint mode zephtr to menlo */
/* store the LILP AL_PA position map into */
struct ulp_bde64 lilpBde64;
#define LPFC_ALPA_MAP_SIZE 128
@@ -4423,11 +4420,4 @@ lpfc_error_lost_link(u32 ulp_status, u32 ulp_word4)
ulp_word4 == IOERR_SLI_DOWN));
}
-#define MENLO_TRANSPORT_TYPE 0xfe
-#define MENLO_CONTEXT 0
-#define MENLO_PU 3
-#define MENLO_TIMEOUT 30
-#define SETVAR_MLOMNT 0x103107
-#define SETVAR_MLORST 0x103007
-
#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f024415731ac..4527fef23ae7 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4736,7 +4736,6 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
-#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index a1b9be245560..0b1616e93cf4 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -60,8 +60,6 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
- PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 750dd1e9f2cc..c69c5a0979ec 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -375,6 +375,9 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4 &&
vport->port_type == LPFC_PHYSICAL_PORT &&
phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
+ if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
+ phba->sli4_hba.fawwpn_flag &=
+ ~LPFC_FAWWPN_FABRIC;
lpfc_printf_log(phba, KERN_INFO,
LOG_SLI | LOG_DISCOVERY | LOG_ELS,
"2701 FA-PWWN change WWPN from %llx to "
@@ -2682,11 +2685,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_SAT_S:
m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
break;
- case PCI_DEVICE_ID_HORNET:
- m = (typeof(m)){"LP21000", "PCIe",
- "Obsolete, Unsupported FCoE Adapter"};
- GE = 1;
- break;
case PCI_DEVICE_ID_PROTEUS_VF:
m = (typeof(m)){"LPev12000", "PCIe IOV",
"Obsolete, Unsupported Fibre Channel Adapter"};
@@ -7692,7 +7690,6 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->port_list);
INIT_LIST_HEAD(&phba->work_list);
- init_waitqueue_head(&phba->wait_4_mlo_m_q);
/* Initialize the wait queue head for the kernel thread */
init_waitqueue_head(&phba->work_waitq);
@@ -7776,13 +7773,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (rc)
return -ENODEV;
- if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
- phba->menlo_flag |= HBA_MENLO_SUPPORT;
- /* check for menlo minimum sg count */
- if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
- phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
- }
-
if (!phba->sli.sli3_ring)
phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
sizeof(struct lpfc_sli_ring),
@@ -7958,6 +7948,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* The lpfc_wq workqueue for deferred irq use */
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ if (!phba->wq)
+ return -ENOMEM;
/*
* Initialize timers used by driver
@@ -9975,7 +9967,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
"configured on\n");
phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
} else {
- phba->sli4_hba.fawwpn_flag = 0;
+ /* Clear FW configured flag, preserve driver flag */
+ phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
}
phba->sli4_hba.conf_trunk =
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index cd10ee6482fc..152245f7cacc 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2824,6 +2824,7 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
wcqep->word0 = 0;
bf_set(lpfc_wcqe_c_status, wcqep, stat);
wcqep->parameter = param;
+ wcqep->total_data_placed = 0;
wcqep->word3 = 0; /* xb is 0 */
/* Call release with XB=1 to queue the IO into the abort list. */
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index c0ee0b39075d..f7cfac0da9b6 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -722,7 +722,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *rsp;
struct lpfc_async_xchg_ctx *ctxp;
- uint32_t status, result, op, start_clean, logerr;
+ uint32_t status, result, op, logerr;
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int id;
@@ -820,9 +820,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
} else {
ctxp->entry_cnt++;
- start_clean = offsetof(struct lpfc_iocbq, cmd_flag);
- memset(((char *)cmdwqe) + start_clean, 0,
- (sizeof(struct lpfc_iocbq) - start_clean));
+ memset_startat(cmdwqe, 0, cmd_flag);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
ctxp->ts_isr_data = cmdwqe->isr_timestamp;
@@ -3337,46 +3335,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
return 1;
}
-/**
- * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
- * @pwqeq: Pointer to command iocb.
- * @xritag: Tag that uniqely identifies the local exchange resource.
- * @opt: Option bits -
- * bit 0 = inhibit sending abts on the link
- *
- * This function is called with hbalock held.
- **/
-static void
-lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
-{
- union lpfc_wqe128 *wqe = &pwqeq->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(wqe, 0, sizeof(*wqe));
-
- if (opt & INHIBIT_ABORT)
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- /* Abort specified xri tag, with the mask deliberately zeroed */
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* Abort the I/O associated with this outstanding exchange ID. */
- wqe->abort_cmd.wqe_com.abort_tag = xritag;
-
- /* iotag for the wqe completion. */
- bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
-
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-}
-
static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_async_xchg_ctx *ctxp,
@@ -3386,7 +3344,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_iocbq *abts_wqeq;
struct lpfc_nodelist *ndlp;
unsigned long flags;
- u8 opt;
+ bool ia;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3426,7 +3384,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
abts_wqeq = ctxp->abort_wqeq;
ctxp->state = LPFC_NVME_STE_ABORT;
- opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
+ ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3472,7 +3430,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */
abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
- lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
+ lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag,
+ abts_wqeq->iotag, CLASS3,
+ LPFC_WQE_CQ_ID_DEFAULT, ia, true);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index ba5e4016262e..084c0f9fdc3a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5456,7 +5456,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
cur_iocbq->cmd_flag |= LPFC_IO_VMID;
}
}
- atomic_inc(&ndlp->cmd_pending);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 80ac3a051c19..608016725db9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2003,10 +2003,12 @@ initpath:
sync_buf->cmd_flag |= LPFC_IO_CMF;
ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
- if (ret_val)
+ if (ret_val) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6214 Cannot issue CMF_SYNC_WQE: x%x\n",
ret_val);
+ __lpfc_sli_release_iocbq(phba, sync_buf);
+ }
out_unlock:
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret_val;
@@ -5263,7 +5265,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
phba->pport->stopped = 0;
phba->link_state = LPFC_INIT_START;
phba->hba_flag = 0;
- phba->sli4_hba.fawwpn_flag = 0;
+ /* Preserve FA-PWWN expectation */
+ phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -6052,6 +6055,10 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
/* obtain link type and link number via READ_CONFIG */
phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
lpfc_sli4_read_config(phba);
+
+ if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+
if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
goto retrieve_ppname;
@@ -10216,16 +10223,6 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
* can be issued if the link is not up.
*/
switch (piocb->iocb.ulpCommand) {
- case CMD_GEN_REQUEST64_CR:
- case CMD_GEN_REQUEST64_CX:
- if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
- FC_RCTL_DD_UNSOL_CMD) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Type !=
- MENLO_TRANSPORT_TYPE))
-
- goto iocb_busy;
- break;
case CMD_QUE_RING_BUF_CN:
case CMD_QUE_RING_BUF64_CN:
/*
@@ -10549,6 +10546,7 @@ __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+ cmd->ulpPU = PARM_NPIV_DID;
}
cmd->ulpBdeCount = 1;
cmd->ulpLe = 1;
@@ -10855,7 +10853,8 @@ lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
static void
__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
- u16 iotag, u8 ulp_class, u16 cqid, bool ia)
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
{
IOCB_t *icmd = NULL;
@@ -10884,7 +10883,8 @@ __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
static void
__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
- u16 iotag, u8 ulp_class, u16 cqid, bool ia)
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
{
union lpfc_wqe128 *wqe;
@@ -10911,6 +10911,8 @@ __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
/* Word 11 */
+ if (wqec)
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
}
@@ -10918,10 +10920,10 @@ __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
void
lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
- bool ia)
+ bool ia, bool wqec)
{
phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
- cqid, ia);
+ cqid, ia, wqec);
}
/**
@@ -12199,7 +12201,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
cmdiocb->iocb.ulpClass,
- LPFC_WQE_CQ_ID_DEFAULT, ia);
+ LPFC_WQE_CQ_ID_DEFAULT, ia, false);
abtsiocbp->vport = vport;
@@ -12659,7 +12661,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
iocbq->iocb.ulpClass, cqid,
- ia);
+ ia, false);
abtsiocbq->vport = vport;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 0af6860b8936..cd33dfec758c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -355,7 +355,6 @@ struct lpfc_sli {
#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
-#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 2ab6f7db64d8..63eba9928e4b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.4"
+#define LPFC_DRIVER_VERSION "14.2.0.5"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 2a339d4a7e9d..157c3bdb50be 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -181,7 +181,7 @@ MODULE_PARM_DESC(cmd_per_lun,
* This would result in non-disk devices being skipped during driver load
* time. These can be later added though, using /proc/scsi/scsi
*/
-static unsigned int megaraid_fast_load = 0;
+static unsigned int megaraid_fast_load;
module_param_named(fast_load, megaraid_fast_load, int, 0);
MODULE_PARM_DESC(fast_load,
"Faster loading of the driver, skips physical devices! (default=0)");
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0917b05059b4..f6c37a97544e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3950,9 +3950,9 @@ process_fw_state_change_wq(struct work_struct *work)
u32 wait;
unsigned long flags;
- if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
- atomic_read(&instance->adprecovery));
+ atomic_read(&instance->adprecovery));
return ;
}
@@ -7153,22 +7153,18 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
switch (instance->adapter_type) {
case MFI_SERIES:
if (megasas_alloc_mfi_ctrl_mem(instance))
- goto fail;
+ return -ENOMEM;
break;
case AERO_SERIES:
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
if (megasas_alloc_fusion_context(instance))
- goto fail;
+ return -ENOMEM;
break;
}
return 0;
- fail:
- kfree(instance->reply_map);
- instance->reply_map = NULL;
- return -ENOMEM;
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 5b5885d9732b..09c5fe37754c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3199,7 +3199,6 @@ megasas_build_io_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd)
{
int sge_count;
- u8 cmd_type;
u16 pd_index = 0;
u8 drive_type = 0;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
@@ -3225,7 +3224,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
*/
io_request->IoFlags = cpu_to_le16(scp->cmd_len);
- switch (cmd_type = megasas_cmd_type(scp)) {
+ switch (megasas_cmd_type(scp)) {
case READ_WRITE_LDIO:
megasas_build_ldio_fusion(instance, scp, cmd);
break;
@@ -5311,7 +5310,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
if (!fusion->log_to_span) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- kfree(instance->ctrl_context);
return -ENOMEM;
}
}
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 322d3ad38159..84b541a57b7b 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -38,7 +38,7 @@
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/processor.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/pmac_feature.h>
#include <asm/macio.h>
@@ -1882,11 +1882,6 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
goto out_release;
}
- /* Old junk for root discovery, that will die ultimately */
-#if !defined(MODULE)
- note_scsi_host(mesh, mesh_host);
-#endif
-
mesh_host->base = macio_resource_start(mdev, 0);
mesh_host->irq = macio_irq(mdev, 0);
ms = (struct mesh_state *) mesh_host->hostdata;
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0e1cb4aa4ca2..0935b2e80662 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -66,12 +66,14 @@ extern atomic64_t event_counter;
#define MPI3MR_NAME_LENGTH 32
#define IOCNAME "%s: "
+#define MPI3MR_MAX_SECTORS 2048
+
/* Definitions for internal SGL and Chain SGL buffers */
#define MPI3MR_PAGE_SIZE_4K 4096
#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
/* Definitions for MAX values for shost */
-#define MPI3MR_MAX_CMDS_LUN 7
+#define MPI3MR_MAX_CMDS_LUN 128
#define MPI3MR_MAX_CDB_LENGTH 32
/* Admin queue management definitions */
@@ -333,6 +335,12 @@ struct mpi3mr_ioc_facts {
u8 sge_mod_mask;
u8 sge_mod_value;
u8 sge_mod_shift;
+ u8 max_dev_per_tg;
+ u16 max_io_throttle_group;
+ u16 io_throttle_data_length;
+ u16 io_throttle_low;
+ u16 io_throttle_high;
+
};
/**
@@ -425,6 +433,31 @@ struct mpi3mr_intr_info {
};
/**
+ * struct mpi3mr_throttle_group_info - Throttle group info
+ *
+ * @io_divert: Flag indicates io divert is on or off for the TG
+ * @need_qd_reduction: Flag to indicate QD reduction is needed
+ * @qd_reduction: Queue Depth reduction in units of 10%
+ * @fw_qd: QueueDepth value reported by the firmware
+ * @modified_qd: Modified QueueDepth value due to throttling
+ * @id: Throttle Group ID.
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @pend_large_data_sz: Counter to track pending large data
+ */
+struct mpi3mr_throttle_group_info {
+ u8 io_divert;
+ u8 need_qd_reduction;
+ u8 qd_reduction;
+ u16 fw_qd;
+ u16 modified_qd;
+ u16 id;
+ u32 high;
+ u32 low;
+ atomic_t pend_large_data_sz;
+};
+
+/**
* struct tgt_dev_sas_sata - SAS/SATA device specific
* information cached from firmware given data
*
@@ -457,22 +490,33 @@ struct tgt_dev_pcie {
};
/**
- * struct tgt_dev_volume - virtual device specific information
+ * struct tgt_dev_vd - virtual device specific information
* cached from firmware given data
*
* @state: State of the VD
+ * @tg_qd_reduction: Queue Depth reduction in units of 10%
+ * @tg_id: VDs throttle group ID
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @tg: Pointer to throttle group info
*/
-struct tgt_dev_volume {
+struct tgt_dev_vd {
u8 state;
+ u8 tg_qd_reduction;
+ u16 tg_id;
+ u32 tg_high;
+ u32 tg_low;
+ struct mpi3mr_throttle_group_info *tg;
};
+
/**
* union _form_spec_inf - union of device specific information
*/
union _form_spec_inf {
struct tgt_dev_sas_sata sas_sata_inf;
struct tgt_dev_pcie pcie_inf;
- struct tgt_dev_volume vol_inf;
+ struct tgt_dev_vd vd_inf;
};
@@ -490,6 +534,7 @@ union _form_spec_inf {
* @dev_type: SAS/SATA/PCIE device type
* @is_hidden: Should be exposed to upper layers or not
* @host_exposed: Already exposed to host or not
+ * @io_throttle_enabled: I/O throttling needed or not
* @q_depth: Device specific Queue Depth
* @wwid: World wide ID
* @dev_spec: Device type specific information
@@ -506,6 +551,7 @@ struct mpi3mr_tgt_dev {
u8 dev_type;
u8 is_hidden;
u8 host_exposed;
+ u8 io_throttle_enabled;
u16 q_depth;
u64 wwid;
union _form_spec_inf dev_spec;
@@ -557,6 +603,9 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
* @dev_removed: Device removed in the Firmware
* @dev_removedelay: Device is waiting to be removed in FW
* @dev_type: Device type
+ * @io_throttle_enabled: I/O throttling needed or not
+ * @io_divert: Flag indicates io divert is on or off for the dev
+ * @throttle_group: Pointer to throttle group info
* @tgt_dev: Internal target device pointer
* @pend_count: Counter to track pending I/Os during error
* handling
@@ -570,6 +619,9 @@ struct mpi3mr_stgt_priv_data {
u8 dev_removed;
u8 dev_removedelay;
u8 dev_type;
+ u8 io_throttle_enabled;
+ u8 io_divert;
+ struct mpi3mr_throttle_group_info *throttle_group;
struct mpi3mr_tgt_dev *tgt_dev;
u32 pend_count;
};
@@ -796,6 +848,12 @@ struct scmd_priv {
* @logdata_buf: Circular buffer to store log data entries
* @logdata_buf_idx: Index of entry in buffer to store
* @logdata_entry_sz: log data entry size
+ * @pend_large_data_sz: Counter to track pending large data
+ * @io_throttle_data_length: I/O size to track in 512b blocks
+ * @io_throttle_high: I/O size to start throttle in 512b blocks
+ * @io_throttle_low: I/O size to stop throttle in 512b blocks
+ * @num_io_throttle_group: Maximum number of throttle groups
+ * @throttle_groups: Pointer to throttle group info structures
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -960,6 +1018,13 @@ struct mpi3mr_ioc {
u8 *logdata_buf;
u16 logdata_buf_idx;
u16 logdata_entry_sz;
+
+ atomic_t pend_large_data_sz;
+ u32 io_throttle_data_length;
+ u32 io_throttle_high;
+ u32 io_throttle_low;
+ u16 num_io_throttle_group;
+ struct mpi3mr_throttle_group_info *throttle_groups;
};
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index f1d4ea8ba989..0866dfd43318 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -2785,6 +2785,27 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.shutdown_timeout =
le16_to_cpu(facts_data->shutdown_timeout);
+ mrioc->facts.max_dev_per_tg =
+ facts_data->max_devices_per_throttle_group;
+ mrioc->facts.io_throttle_data_length =
+ le16_to_cpu(facts_data->io_throttle_data_length);
+ mrioc->facts.max_io_throttle_group =
+ le16_to_cpu(facts_data->max_io_throttle_group);
+ mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
+ mrioc->facts.io_throttle_high =
+ le16_to_cpu(facts_data->io_throttle_high);
+
+ /* Store in 512b block count */
+ if (mrioc->facts.io_throttle_data_length)
+ mrioc->io_throttle_data_length =
+ (mrioc->facts.io_throttle_data_length * 2 * 4);
+ else
+ /* set the length to 1MB + 1K to disable throttle */
+ mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
+
+ mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
+ mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
+
ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
@@ -2798,6 +2819,13 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
mrioc->facts.dma_mask, (facts_flags &
MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
+ ioc_info(mrioc,
+ "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
+ mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
+ ioc_info(mrioc,
+ "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
+ mrioc->facts.io_throttle_data_length * 4,
+ mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
}
/**
@@ -3666,6 +3694,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
int retval = 0;
u8 retry = 0;
struct mpi3_ioc_facts_data facts_data;
+ u32 sz;
retry_init:
retval = mpi3mr_bring_ioc_ready(mrioc);
@@ -3691,6 +3720,9 @@ retry_init:
mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
+ mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+
if (reset_devices)
mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
MPI3MR_HOST_IOS_KDUMP);
@@ -3760,6 +3792,15 @@ retry_init:
}
}
+ if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
+ dprint_init(mrioc, "allocating memory for throttle groups\n");
+ sz = sizeof(struct mpi3mr_throttle_group_info);
+ mrioc->throttle_groups = (struct mpi3mr_throttle_group_info *)
+ kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+ if (!mrioc->throttle_groups)
+ goto out_failed_noretry;
+ }
+
retval = mpi3mr_enable_events(mrioc);
if (retval) {
ioc_err(mrioc, "failed to enable events %d\n",
@@ -3981,6 +4022,7 @@ static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
{
u16 i;
+ struct mpi3mr_throttle_group_info *tg;
mrioc->change_count = 0;
mrioc->active_poll_qcount = 0;
@@ -4029,6 +4071,22 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
spin_lock_init(&mrioc->req_qinfo[i].q_lock);
mpi3mr_memset_op_req_q_buffers(mrioc, i);
}
+
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+ if (mrioc->throttle_groups) {
+ tg = mrioc->throttle_groups;
+ for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
+ tg->id = 0;
+ tg->fw_qd = 0;
+ tg->modified_qd = 0;
+ tg->io_divert = 0;
+ tg->need_qd_reduction = 0;
+ tg->high = 0;
+ tg->low = 0;
+ tg->qd_reduction = 0;
+ atomic_set(&tg->pend_large_data_sz, 0);
+ }
+ }
}
/**
@@ -4663,6 +4721,15 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
goto out;
}
+ if (mrioc->num_io_throttle_group !=
+ mrioc->facts.max_io_throttle_group) {
+ ioc_err(mrioc,
+ "max io throttle group doesn't match old(%d), new(%d)\n",
+ mrioc->num_io_throttle_group,
+ mrioc->facts.max_io_throttle_group);
+ retval = -EPERM;
+ goto out;
+ }
mpi3mr_flush_delayed_cmd_lists(mrioc);
mpi3mr_flush_drv_cmds(mrioc);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 59a18769a4fe..bfa1165e23b6 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -38,6 +38,8 @@ MODULE_PARM_DESC(logging_level,
static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
+#define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
+
/**
* mpi3mr_host_tag_for_scmd - Get host tag for a scmd
* @mrioc: Adapter instance reference
@@ -355,6 +357,50 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
}
/**
+ * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to queue on synthetically generated driver event to
+ * the event worker thread, the driver event will be used to
+ * reduce the QD of all VDs in the TG from the worker thread.
+ *
+ * Return: None.
+ */
+static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ struct mpi3mr_fwevt *fwevt;
+ u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
+
+ /*
+ * If the QD reduction event is already queued due to throttle and if
+ * the QD is not restored through device info change event
+ * then dont queue further reduction events
+ */
+ if (tg->fw_qd != tg->modified_qd)
+ return;
+
+ fwevt = mpi3mr_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
+ return;
+ }
+ *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = sz;
+ tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
+
+ dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
+ tg->id);
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+}
+
+/**
* mpi3mr_invalidate_devhandles -Invalidate device handles
* @mrioc: Adapter instance reference
*
@@ -373,6 +419,9 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
if (tgtdev->starget && tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ tgt_priv->io_throttle_enabled = 0;
+ tgt_priv->io_divert = 0;
+ tgt_priv->throttle_group = NULL;
}
}
}
@@ -710,6 +759,35 @@ static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
}
/**
+ * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ * @divert_value: 1 or 0
+ *
+ * Accessor to set io_divert flag for each device associated
+ * with the given throttle group with the given value.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg, u8 divert_value)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg)
+ tgt_priv->io_divert = divert_value;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
* mpi3mr_print_device_event_notice - print notice related to post processing of
* device event after controller reset.
*
@@ -840,6 +918,7 @@ static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
else if (!q_depth)
q_depth = MPI3MR_DEFAULT_SDEV_QD;
retval = scsi_change_queue_depth(sdev, q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
return retval;
}
@@ -926,6 +1005,7 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
* @mrioc: Adapter instance reference
* @tgtdev: Target device internal structure
* @dev_pg0: New device page0
+ * @is_added: Flag to indicate the device is just added
*
* Update the information from the device page0 into the driver
* cached target device structure.
@@ -933,10 +1013,11 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
* Return: Nothing.
*/
static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0)
+ struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
+ bool is_added)
{
u16 flags = 0;
- struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
u8 prot_mask = 0;
tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
@@ -951,12 +1032,19 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
flags = le16_to_cpu(dev_pg0->flags);
tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
+ if (is_added == true)
+ tgtdev->io_throttle_enabled =
+ (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+
+
if (tgtdev->starget && tgtdev->starget->hostdata) {
scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
tgtdev->starget->hostdata;
scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgtdev->io_throttle_enabled;
}
switch (dev_pg0->access_status) {
@@ -1034,10 +1122,32 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
{
struct mpi3_device0_vd_format *vdinf =
&dev_pg0->device_specific.vd_format;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u16 vdinf_io_throttle_group =
+ le16_to_cpu(vdinf->io_throttle_group);
- tgtdev->dev_spec.vol_inf.state = vdinf->vd_state;
+ tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
tgtdev->is_hidden = 1;
+ tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
+ tgtdev->dev_spec.vd_inf.tg_high =
+ le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
+ tgtdev->dev_spec.vd_inf.tg_low =
+ le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
+ if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
+ tg = mrioc->throttle_groups + vdinf_io_throttle_group;
+ tg->id = vdinf_io_throttle_group;
+ tg->high = tgtdev->dev_spec.vd_inf.tg_high;
+ tg->low = tgtdev->dev_spec.vd_inf.tg_low;
+ tg->qd_reduction =
+ tgtdev->dev_spec.vd_inf.tg_qd_reduction;
+ if (is_added == true)
+ tg->fw_qd = tgtdev->q_depth;
+ tg->modified_qd = tgtdev->q_depth;
+ }
+ tgtdev->dev_spec.vd_inf.tg = tg;
+ if (scsi_tgt_priv_data)
+ scsi_tgt_priv_data->throttle_group = tg;
break;
}
default:
@@ -1134,7 +1244,7 @@ static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
if (!tgtdev)
goto out;
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
if (!tgtdev->is_hidden && !tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
if (tgtdev->is_hidden && tgtdev->host_exposed)
@@ -1428,6 +1538,60 @@ static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_update_sdev_qd - Update SCSI device queue depath
+ * @sdev: SCSI device reference
+ * @data: Queue depth reference
+ *
+ * This is an iterator function called for each SCSI device in a
+ * target to update the QD of each SCSI device.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
+{
+ u16 *q_depth = (u16 *)data;
+
+ scsi_change_queue_depth(sdev, (int)*q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
+}
+
+/**
+ * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to reduce QD for each device associated with the
+ * given throttle group.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg) {
+ dprint_event_bh(mrioc,
+ "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
+ tgt_priv->perst_id, tgtdev->q_depth,
+ tg->modified_qd);
+ starget_for_each_device(tgtdev->starget,
+ (void *)&tg->modified_qd,
+ mpi3mr_update_sdev_qd);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
* mpi3mr_fwevt_bh - Firmware event bottomhalf handler
* @mrioc: Adapter instance reference
* @fwevt: Firmware event reference
@@ -1484,6 +1648,20 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_logdata_evt_bh(mrioc, fwevt);
break;
}
+ case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
+ {
+ struct mpi3mr_throttle_group_info *tg;
+
+ tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
+ dprint_event_bh(mrioc,
+ "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
+ tg->id, tg->need_qd_reduction);
+ if (tg->need_qd_reduction) {
+ mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
+ tg->need_qd_reduction = 0;
+ }
+ break;
+ }
default:
break;
}
@@ -1540,13 +1718,13 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
perst_id = le16_to_cpu(dev_pg0->persistent_id);
tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
if (tgtdev) {
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
mpi3mr_tgtdev_put(tgtdev);
} else {
tgtdev = mpi3mr_alloc_tgtdev();
if (!tgtdev)
return -ENOMEM;
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
}
@@ -2558,6 +2736,11 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
u32 xfer_count = 0, sense_count = 0, resp_data = 0;
u16 dev_handle = 0xFFFF;
struct scsi_sense_hdr sshdr;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u8 throttle_enabled_dev = 0;
*reply_dma = 0;
reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
@@ -2614,6 +2797,51 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
goto out;
}
priv = scsi_cmd_priv(scmd);
+
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ sdev_priv_data = scmd->device->hostdata;
+ if (sdev_priv_data) {
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (stgt_priv_data) {
+ tg = stgt_priv_data->throttle_group;
+ throttle_enabled_dev =
+ stgt_priv_data->io_throttle_enabled;
+ }
+ }
+ if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
+ throttle_enabled_dev)) {
+ ioc_pend_data_len = atomic_sub_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (tg) {
+ tg_pend_data_len = atomic_sub_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (tg->io_divert && ((ioc_pend_data_len <=
+ mrioc->io_throttle_low) &&
+ (tg_pend_data_len <= tg->low))) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ } else {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+ }
+ } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
+ ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
+ if (!tg) {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+
+ } else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
+ tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
+ if (tg->io_divert && (tg_pend_data_len <= tg->low)) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ }
+ }
+
if (success_desc) {
scmd->result = DID_OK << 16;
goto out_success;
@@ -3834,6 +4062,11 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
tgt_dev->starget = starget;
atomic_set(&scsi_tgt_priv_data->block_io, 0);
retval = 0;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgt_dev->io_throttle_enabled;
+ if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ scsi_tgt_priv_data->throttle_group =
+ tgt_dev->dev_spec.vd_inf.tg;
} else
retval = -ENXIO;
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
@@ -3989,10 +4222,13 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
int retval = 0;
u16 dev_handle;
u16 host_tag;
- u32 scsiio_flags = 0;
+ u32 scsiio_flags = 0, data_len_blks = 0;
struct request *rq = scsi_cmd_to_rq(scmd);
int iprio_class;
u8 is_pcie_dev = 0;
+ u32 tracked_io_sz = 0;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
if (mrioc->unrecoverable) {
scmd->result = DID_ERROR << 16;
@@ -4096,11 +4332,50 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ if ((data_len_blks >= mrioc->io_throttle_data_length) &&
+ stgt_priv_data->io_throttle_enabled) {
+ tracked_io_sz = data_len_blks;
+ tg = stgt_priv_data->throttle_group;
+ if (tg) {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ tg_pend_data_len = atomic_add_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (!tg->io_divert && ((ioc_pend_data_len >=
+ mrioc->io_throttle_high) ||
+ (tg_pend_data_len >= tg->high))) {
+ tg->io_divert = 1;
+ tg->need_qd_reduction = 1;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
+ tg, 1);
+ mpi3mr_queue_qd_reduction_event(mrioc, tg);
+ }
+ } else {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (ioc_pend_data_len >= mrioc->io_throttle_high)
+ stgt_priv_data->io_divert = 1;
+ }
+ }
+
+ if (stgt_priv_data->io_divert) {
+ scsiio_req->msg_flags |=
+ MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
+ }
+ scsiio_req->flags = cpu_to_le32(scsiio_flags);
if (mpi3mr_op_request_post(mrioc, op_req_q,
scmd_priv_data->mpi3mr_scsiio_req)) {
mpi3mr_clear_scmd_priv(mrioc, scmd);
retval = SCSI_MLQUEUE_HOST_BUSY;
+ if (tracked_io_sz) {
+ atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
+ if (tg)
+ atomic_sub(tracked_io_sz,
+ &tg->pend_large_data_sz);
+ }
goto out;
}
@@ -4313,6 +4588,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_channel = 0;
shost->max_id = 0xFFFFFFFF;
+ shost->host_tagset = 1;
+
if (prot_mask >= 0)
scsi_host_set_prot(shost, prot_mask);
else {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 9a1ae52bb621..565339a0811d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -873,7 +873,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
* @fault_code: fault code
*/
void
-mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
+mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
{
ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
}
@@ -1057,7 +1057,7 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
desc = "config no defaults";
break;
case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
- desc = "config cant commit";
+ desc = "config can't commit";
break;
/****************************************************************************
@@ -1321,7 +1321,7 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
* @log_info: log info
*/
static void
-_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
+_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info)
{
union loginfo_type {
u32 loginfo;
@@ -1393,7 +1393,7 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
(ioc->logging_level & MPT_DEBUG_REPLY)) {
- _base_sas_ioc_info(ioc , mpi_reply,
+ _base_sas_ioc_info(ioc, mpi_reply,
mpt3sas_base_get_msg_frame(ioc, smid));
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 5e8887fa02c8..def37a7e5980 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5294,7 +5294,7 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
}
/**
- * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
+ * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
* @ioc: per adapter object
* @scmd: pointer to scsi command object
* @mpi_reply: reply mf payload returned from firmware
@@ -12410,7 +12410,6 @@ scsih_suspend(struct device *dev)
return rc;
mpt3sas_base_stop_watchdog(ioc);
- flush_scheduled_work();
scsi_block_requests(shost);
_scsih_nvme_shutdown(ioc);
ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 991eb01bb1e0..91d78d0a38fe 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -699,6 +699,10 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
return 0;
}
+static void pm8001_chip_post_init(struct pm8001_hba_info *pm8001_ha)
+{
+}
+
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
{
u32 max_wait_count;
@@ -3134,7 +3138,7 @@ int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
*
* when HBA driver received the identify done event or initiate FIS received
* event(for SATA), it will invoke this function to notify the sas layer that
- * the sas toplogy has formed, please discover the the whole sas domain,
+ * the sas toplogy has formed, please discover the whole sas domain,
* while receive a broadcast(change) primitive just tell the sas
* layer to discover the changed domain rather than the whole domain.
*/
@@ -4934,6 +4938,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
const struct pm8001_dispatch pm8001_8001_dispatch = {
.name = "pmc8001",
.chip_init = pm8001_chip_init,
+ .chip_post_init = pm8001_chip_post_init,
.chip_soft_rst = pm8001_chip_soft_rst,
.chip_rst = pm8001_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 01f2f41928eb..a0028e130a7e 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
" 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt;
-static int pm8001_init_ccb_tag(struct pm8001_hba_info *, struct Scsi_Host *, struct pci_dev *);
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *);
/*
* chip info structure to identify chip key functionality as
@@ -81,6 +81,18 @@ LIST_HEAD(hba_list);
struct workqueue_struct *pm8001_wq;
+static int pm8001_map_queues(struct Scsi_Host *shost)
+{
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+
+ if (pm8001_ha->number_of_intr > 1)
+ blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+
+ return blk_mq_map_queues(qmap);
+}
+
/*
* The main structure which LLDD must register for scsi core.
*/
@@ -109,6 +121,8 @@ static struct scsi_host_template pm8001_sht = {
#endif
.shost_groups = pm8001_host_groups,
.track_queue_depth = 1,
+ .cmd_per_lun = 32,
+ .map_queues = pm8001_map_queues,
};
/*
@@ -607,12 +621,8 @@ static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
shost->transportt = pm8001_stt;
shost->max_id = PM8001_MAX_DEVICES;
- shost->max_lun = 8;
- shost->max_channel = 0;
shost->unique_id = pm8001_id;
shost->max_cmd_len = 16;
- shost->can_queue = PM8001_CAN_QUEUE;
- shost->cmd_per_lun = 32;
return 0;
exit_free1:
kfree(arr_port);
@@ -933,31 +943,35 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
*/
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
{
- u32 number_of_intr;
- int rc, cpu_online_count;
unsigned int allocated_irq_vectors;
+ int rc;
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
- number_of_intr = 1;
+ rc = pci_alloc_irq_vectors(pm8001_ha->pdev, 1, 1,
+ PCI_IRQ_MSIX);
} else {
- number_of_intr = PM8001_MAX_MSIX_VEC;
+ /*
+ * Queue index #0 is used always for housekeeping, so don't
+ * include in the affinity spreading.
+ */
+ struct irq_affinity desc = {
+ .pre_vectors = 1,
+ };
+ rc = pci_alloc_irq_vectors_affinity(
+ pm8001_ha->pdev, 2, PM8001_MAX_MSIX_VEC,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
}
- cpu_online_count = num_online_cpus();
- number_of_intr = min_t(int, cpu_online_count, number_of_intr);
- rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
- number_of_intr, PCI_IRQ_MSIX);
allocated_irq_vectors = rc;
if (rc < 0)
return rc;
/* Assigns the number of interrupts */
- number_of_intr = min_t(int, allocated_irq_vectors, number_of_intr);
- pm8001_ha->number_of_intr = number_of_intr;
+ pm8001_ha->number_of_intr = allocated_irq_vectors;
/* Maximum queue number updating in HBA structure */
- pm8001_ha->max_q_num = number_of_intr;
+ pm8001_ha->max_q_num = allocated_irq_vectors;
pm8001_dbg(pm8001_ha, INIT,
"pci_alloc_irq_vectors request ret:%d no of intr %d\n",
@@ -1124,10 +1138,23 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_ha_free;
}
- rc = pm8001_init_ccb_tag(pm8001_ha, shost, pdev);
+ rc = pm8001_init_ccb_tag(pm8001_ha);
if (rc)
goto err_out_enable;
+
+ PM8001_CHIP_DISP->chip_post_init(pm8001_ha);
+
+ if (pm8001_ha->number_of_intr > 1) {
+ shost->nr_hw_queues = pm8001_ha->number_of_intr - 1;
+ /*
+ * For now, ensure we're not sent too many commands by setting
+ * host_tagset. This is also required if we start using request
+ * tag.
+ */
+ shost->host_tagset = 1;
+ }
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha_free;
@@ -1177,16 +1204,14 @@ err_out_enable:
/**
* pm8001_init_ccb_tag - allocate memory to CCB and tag.
* @pm8001_ha: our hba card information.
- * @shost: scsi host which has been allocated outside.
- * @pdev: pci device.
*/
-static int
-pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
- struct pci_dev *pdev)
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha)
{
- int i = 0;
+ struct Scsi_Host *shost = pm8001_ha->shost;
+ struct device *dev = pm8001_ha->dev;
u32 max_out_io, ccb_count;
u32 can_queue;
+ int i;
max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io;
ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io);
@@ -1209,7 +1234,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
goto err_out_noccb;
}
for (i = 0; i < ccb_count; i++) {
- pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(&pdev->dev,
+ pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(dev,
sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
&pm8001_ha->ccb_info[i].ccb_dma_handle,
GFP_KERNEL);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 3a863d776724..8e3f2f9ddaac 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -66,7 +66,11 @@ static int pm8001_find_tag(struct sas_task *task, u32 *tag)
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
{
void *bitmap = pm8001_ha->tags;
- clear_bit(tag, bitmap);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
+ __clear_bit(tag, bitmap);
+ spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
}
/**
@@ -76,9 +80,9 @@ void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
*/
int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
{
- unsigned int tag;
void *bitmap = pm8001_ha->tags;
unsigned long flags;
+ unsigned int tag;
spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
@@ -86,7 +90,7 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
return -SAS_QUEUE_FULL;
}
- set_bit(tag, bitmap);
+ __set_bit(tag, bitmap);
spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
*tag_out = tag;
return 0;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 060ab680a7ed..c5e3f380a01c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -55,6 +55,8 @@
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
@@ -172,6 +174,7 @@ struct forensic_data {
struct pm8001_dispatch {
char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
+ void (*chip_post_init)(struct pm8001_hba_info *pm8001_ha);
int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 303cd05fec50..f8b8624458f7 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -1469,11 +1469,18 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
} else
return -EBUSY;
+ return 0;
+}
+
+static void pm80xx_chip_post_init(struct pm8001_hba_info *pm8001_ha)
+{
/* send SAS protocol timer configuration page to FW */
- ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+ pm80xx_set_sas_protocol_timer_config(pm8001_ha);
/* Check for encryption */
if (pm8001_ha->chip->encrypt) {
+ int ret;
+
pm8001_dbg(pm8001_ha, INIT, "Checking for encryption\n");
ret = pm80xx_get_encrypt_info(pm8001_ha);
if (ret == -1) {
@@ -1485,7 +1492,6 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
}
}
}
- return 0;
}
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
@@ -4349,6 +4355,29 @@ static int check_enc_sat_cmd(struct sas_task *task)
return ret;
}
+static u32 pm80xx_chip_get_q_index(struct sas_task *task)
+{
+ struct scsi_cmnd *scmd = NULL;
+ u32 blk_tag;
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(task->dev)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
+ }
+
+ if (!scmd)
+ return 0;
+
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ return blk_mq_unique_tag_to_hwq(blk_tag);
+}
+
/**
* pm80xx_chip_ssp_io_req - send an SSP task to FW
* @pm8001_ha: our hba card information.
@@ -4364,7 +4393,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
u32 tag = ccb->ccb_tag;
u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
- u32 q_index, cpu_id;
+ u32 q_index;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
@@ -4385,8 +4414,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- cpu_id = smp_processor_id();
- q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
+ q_index = pm80xx_chip_get_q_index(task);
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
@@ -4515,8 +4543,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
struct ata_queued_cmd *qc = task->uldd_task;
- u32 tag = ccb->ccb_tag;
- u32 q_index, cpu_id;
+ u32 tag = ccb->ccb_tag, q_index;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
u64 phys_addr, end_addr;
@@ -4526,8 +4553,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- cpu_id = smp_processor_id();
- q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
+
+ q_index = pm80xx_chip_get_q_index(task);
if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
@@ -5011,6 +5038,7 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
const struct pm8001_dispatch pm8001_80xx_dispatch = {
.name = "pmc80xx",
.chip_init = pm80xx_chip_init,
+ .chip_post_init = pm80xx_chip_post_init,
.chip_soft_rst = pm80xx_chip_soft_rst,
.chip_rst = pm80xx_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 83ffba7f51da..cecfb2cb4c7b 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2414,9 +2414,12 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
int rval;
u16 retry = 10;
- if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
- iscsi_host_remove(qedi->shost);
+ if (mode == QEDI_MODE_NORMAL)
+ iscsi_host_remove(qedi->shost, false);
+ else if (mode == QEDI_MODE_SHUTDOWN)
+ iscsi_host_remove(qedi->shost, true);
+ if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
if (qedi->tmf_thread) {
destroy_workqueue(qedi->tmf_thread);
qedi->tmf_thread = NULL;
@@ -2491,7 +2494,7 @@ static void qedi_board_disable_work(struct work_struct *work)
if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
return;
- __qedi_remove(qedi->pdev, QEDI_MODE_SHUTDOWN);
+ __qedi_remove(qedi->pdev, QEDI_MODE_NORMAL);
}
static void qedi_shutdown(struct pci_dev *pdev)
@@ -2791,7 +2794,7 @@ remove_host:
#ifdef CONFIG_DEBUG_FS
qedi_dbg_host_exit(&qedi->dbg_ctx);
#endif
- iscsi_host_remove(qedi->shost);
+ iscsi_host_remove(qedi->shost, false);
stop_iscsi_func:
qedi_ops->stop(qedi->cdev);
stop_slowpath:
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3b3e4234f37a..fa1fcbfb946f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2476,7 +2476,6 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
qla2x00_port_speed_store);
static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
-static DEVICE_ATTR_RO(edif_doorbell);
static struct attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version.attr,
@@ -2521,7 +2520,6 @@ static struct attribute *qla2x00_host_attrs[] = {
&dev_attr_port_no.attr,
&dev_attr_fw_attr.attr,
&dev_attr_dport_diagnostics.attr,
- &dev_attr_edif_doorbell.attr,
&dev_attr_mpi_pause.attr,
&dev_attr_qlini_mode.attr,
&dev_attr_ql2xiniexchg.attr,
@@ -2716,17 +2714,27 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!fcport)
return;
- /* Now that the rport has been deleted, set the fcport state to
- FCS_DEVICE_DEAD */
- qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5101,
+ DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d",
+ rport->port_state));
+
+ /*
+ * Now that the rport has been deleted, set the fcport state to
+ * FCS_DEVICE_DEAD, if the fcport is still lost.
+ */
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
/*
* Transport has effectively 'deleted' the rport, clear
* all local references.
*/
spin_lock_irqsave(host->host_lock, flags);
- fcport->rport = fcport->drport = NULL;
- *((fc_port_t **)rport->dd_data) = NULL;
+ /* Confirm port has not reappeared before clearing pointers. */
+ if (rport->port_state != FC_PORTSTATE_ONLINE) {
+ fcport->rport = fcport->drport = NULL;
+ *((fc_port_t **)rport->dd_data) = NULL;
+ }
spin_unlock_irqrestore(host->host_lock, flags);
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
@@ -2759,9 +2767,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
/*
* At this point all fcport's software-states are cleared. Perform any
* final cleanup of firmware resources (PCBs and XCBs).
+ *
+ * Attempt to cleanup only lost devices.
*/
if (fcport->loop_id != FC_NO_LOOP_ID) {
- if (IS_FWI2_CAPABLE(fcport->vha->hw)) {
+ if (IS_FWI2_CAPABLE(fcport->vha->hw) &&
+ fcport->scan_state != QLA_FCPORT_FOUND) {
if (fcport->loop_id != FC_NO_LOOP_ID)
fcport->logout_on_delete = 1;
@@ -2771,7 +2782,7 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
__LINE__);
qlt_schedule_sess_for_deletion(fcport);
}
- } else {
+ } else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) {
qla2x00_port_logout(fcport->vha, fcport);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c2f00f076f79..5db9bf69dcff 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2425,6 +2425,89 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
}
static int
+qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval;
+ struct qla_dport_diag_v2 *dd;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint16_t options;
+
+ if (!IS_DPORT_CAPABLE(vha->hw))
+ return -EPERM;
+
+ dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
+
+ options = dd->options;
+
+ /* Check dport Test in progress */
+ if (options == QLA_GET_DPORT_RESULT_V2 &&
+ vha->dport_status & DPORT_DIAG_IN_PROGRESS) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_IN_PROCESS;
+ goto dportcomplete;
+ }
+
+ /* Check chip reset in progress and start/restart requests arrive */
+ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
+ (options == QLA_START_DPORT_TEST_V2 ||
+ options == QLA_RESTART_DPORT_TEST_V2)) {
+ vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
+ }
+
+ /* Check chip reset in progress and get result request arrive */
+ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
+ options == QLA_GET_DPORT_RESULT_V2) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_NOT_RUNNING;
+ goto dportcomplete;
+ }
+
+ rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_OK;
+ if (options == QLA_START_DPORT_TEST_V2 ||
+ options == QLA_RESTART_DPORT_TEST_V2) {
+ dd->mbx1 = mcp->mb[0];
+ dd->mbx2 = mcp->mb[1];
+ vha->dport_status |= DPORT_DIAG_IN_PROGRESS;
+ } else if (options == QLA_GET_DPORT_RESULT_V2) {
+ dd->mbx1 = le16_to_cpu(vha->dport_data[1]);
+ dd->mbx2 = le16_to_cpu(vha->dport_data[2]);
+ }
+ } else {
+ dd->mbx1 = mcp->mb[0];
+ dd->mbx2 = mcp->mb[1];
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_ERR;
+ }
+
+dportcomplete:
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
+
+ bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ kfree(dd);
+
+ return 0;
+}
+
+static int
qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
{
scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
@@ -2860,6 +2943,9 @@ qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_j
case QL_VND_DPORT_DIAGNOSTICS:
return qla2x00_do_dport_diagnostics(bsg_job);
+ case QL_VND_DPORT_DIAGNOSTICS_V2:
+ return qla2x00_do_dport_diagnostics_v2(bsg_job);
+
case QL_VND_EDIF_MGMT:
return qla_edif_app_mgmt(bsg_job);
@@ -2975,6 +3061,13 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
__func__, bsg_job);
+
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x9007,
+ "PCI/Register disconnect.\n");
+ qla_pci_set_eeh_busy(vha);
+ }
+
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
@@ -2992,7 +3085,8 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
sp->u.bsg_job == bsg_job) {
req->outstanding_cmds[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
+
+ if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command failed.\n");
bsg_reply->result = -EIO;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 6d2b0a7436c1..bb64b9c5a74b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -37,6 +37,7 @@
#define QL_VND_GET_TGT_STATS 0x25
#define QL_VND_MANAGE_HOST_PORT 0x26
#define QL_VND_MBX_PASSTHRU 0x2B
+#define QL_VND_DPORT_DIAGNOSTICS_V2 0x2C
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -60,6 +61,9 @@
#define EXT_STATUS_TIMEOUT 30
#define EXT_STATUS_THREAD_FAILED 31
#define EXT_STATUS_DATA_CMP_FAILED 32
+#define EXT_STATUS_DPORT_DIAG_ERR 40
+#define EXT_STATUS_DPORT_DIAG_IN_PROCESS 41
+#define EXT_STATUS_DPORT_DIAG_NOT_RUNNING 42
/* BSG definations for interpreting CommandSent field */
#define INT_DEF_LB_LOOPBACK_CMD 0
@@ -288,6 +292,17 @@ struct qla_dport_diag {
uint8_t unused[62];
} __packed;
+#define QLA_GET_DPORT_RESULT_V2 0 /* Get Result */
+#define QLA_RESTART_DPORT_TEST_V2 1 /* Restart test */
+#define QLA_START_DPORT_TEST_V2 2 /* Start test */
+struct qla_dport_diag_v2 {
+ uint16_t options;
+ uint16_t mbx1;
+ uint16_t mbx2;
+ uint8_t unused[58];
+ uint8_t buf[1024]; /* Test Result */
+} __packed;
+
/* D_Port options */
#define QLA_DPORT_RESULT 0x0
#define QLA_DPORT_START 0x2
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f1f6c740bdcd..feeb1666227f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -383,5 +383,5 @@ ql_mask_match(uint level)
if (ql2xextended_error_logging == 1)
ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
- return (level & ql2xextended_error_logging) == level;
+ return level && ((level & ql2xextended_error_logging) == level);
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e8f69c486be1..3ec6a200942e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -78,7 +78,7 @@ typedef union {
#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
-#define QLA2XXX_MANUFACTURER "QLogic Corporation"
+#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc."
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -1173,6 +1173,12 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
/* ISP mailbox loopback echo diagnostic error code */
#define MBS_LB_RESET 0x17
+
+/* AEN mailbox Port Diagnostics test */
+#define AEN_START_DIAG_TEST 0x0 /* start the diagnostics */
+#define AEN_DONE_DIAG_TEST_WITH_NOERR 0x1 /* Done with no errors */
+#define AEN_DONE_DIAG_TEST_WITH_ERR 0x2 /* Done with error.*/
+
/*
* Firmware options 1, 2, 3.
*/
@@ -2158,6 +2164,11 @@ typedef struct {
#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request
failure */
#define CS_REJECT_RECEIVED 0x4E /* Reject received */
+#define CS_EDIF_AUTH_ERROR 0x63 /* decrypt error */
+#define CS_EDIF_PAD_LEN_ERROR 0x65 /* pad > frame size, not 4byte align */
+#define CS_EDIF_INV_REQ 0x66 /* invalid request */
+#define CS_EDIF_SPI_ERROR 0x67 /* rx frame unable to locate sa */
+#define CS_EDIF_HDR_ERROR 0x69 /* data frame != expected len */
#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
#define CS_UNKNOWN 0x81 /* Driver defined */
#define CS_RETRY 0x82 /* Driver defined */
@@ -2626,7 +2637,6 @@ typedef struct fc_port {
struct {
uint32_t enable:1; /* device is edif enabled/req'd */
uint32_t app_stop:2;
- uint32_t app_started:1;
uint32_t aes_gmac:1;
uint32_t app_sess_online:1;
uint32_t tx_sa_set:1;
@@ -2637,6 +2647,7 @@ typedef struct fc_port {
uint32_t rx_rekey_cnt;
uint64_t tx_bytes;
uint64_t rx_bytes;
+ uint8_t sess_down_acked;
uint8_t auth_state;
uint16_t authok:1;
uint16_t rekey_cnt;
@@ -3204,6 +3215,8 @@ struct ct_sns_rsp {
#define GFF_NVME_OFFSET 23 /* type = 28h */
struct {
uint8_t fc4_features[128];
+#define FC4_FF_TARGET BIT_0
+#define FC4_FF_INITIATOR BIT_1
} gff_id;
struct {
uint8_t reserved;
@@ -3975,6 +3988,7 @@ struct qla_hw_data {
/* SRB cache. */
#define SRB_MIN_REQ 128
mempool_t *srb_mempool;
+ u8 port_name[WWN_SIZE];
volatile struct {
uint32_t mbox_int :1;
@@ -4040,6 +4054,9 @@ struct qla_hw_data {
uint32_t n2n_fw_acc_sec:1;
uint32_t plogi_template_valid:1;
uint32_t port_isolated:1;
+ uint32_t eeh_flush:2;
+#define EEH_FLUSH_RDY 1
+#define EEH_FLUSH_DONE 2
} flags;
uint16_t max_exchg;
@@ -4074,6 +4091,7 @@ struct qla_hw_data {
uint32_t rsp_que_len;
uint32_t req_que_off;
uint32_t rsp_que_off;
+ unsigned long eeh_jif;
/* Multi queue data structs */
device_reg_t *mqiobase;
@@ -4256,8 +4274,8 @@ struct qla_hw_data {
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_MQUE_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_BIDI_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
/* Bit 21 of fw_attributes decides the MCTP capabilities */
@@ -5012,6 +5030,10 @@ typedef struct scsi_qla_host {
u64 short_link_down_cnt;
struct edif_dbell e_dbell;
struct pur_core pur_cinfo;
+
+#define DPORT_DIAG_IN_PROGRESS BIT_0
+#define DPORT_DIAG_CHIP_RESET_IN_PROGRESS BIT_1
+ uint16_t dport_status;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -5443,4 +5465,10 @@ struct ql_vnd_tgt_stats_resp {
#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \
_fcport->disc_state == DSC_DELETED)
+#define DBG_FCPORT_PRFMT(_fp, _fmt, _args...) \
+ "%s: %8phC: " _fmt " (state=%d disc_state=%d scan_state=%d loopid=0x%x deleted=%d flags=0x%x)\n", \
+ __func__, _fp->port_name, ##_args, atomic_read(&_fp->state), \
+ _fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \
+ _fp->flags
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index cb8145a9ac09..400a8b6f3982 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -52,6 +52,31 @@ const char *sc_to_str(uint16_t cmd)
return "unknown";
}
+static struct edb_node *qla_edb_getnext(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct edb_node *edbnode = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+ /* db nodes are fifo - no qualifications done */
+ if (!list_empty(&vha->e_dbell.head)) {
+ edbnode = list_first_entry(&vha->e_dbell.head,
+ struct edb_node, list);
+ list_del_init(&edbnode->list);
+ }
+
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ return edbnode;
+}
+
+static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
+{
+ list_del_init(&node->list);
+ kfree(node);
+}
+
static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
uint16_t handle)
{
@@ -257,14 +282,8 @@ qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
f = NULL;
list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
- if ((f->flags & FCF_FCSP_DEVICE)) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058,
- "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n",
- f->node_name, f->port_name,
- f->d_id.b24, id->b24);
- if (f->d_id.b24 == id->b24)
- return f;
- }
+ if (f->d_id.b24 == id->b24)
+ return f;
}
return NULL;
}
@@ -280,14 +299,19 @@ qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
{
/* check that the app is allow/known to the driver */
- if (appid.app_vid == EDIF_APP_ID) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__);
- return true;
+ if (appid.app_vid != EDIF_APP_ID) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
+ __func__, appid.app_vid);
+ return false;
+ }
+
+ if (appid.version != EDIF_VERSION1) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app version is not ok (%x)",
+ __func__, appid.version);
+ return false;
}
- ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
- __func__, appid.app_vid);
- return false;
+ return true;
}
static void
@@ -486,16 +510,35 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
/* mark doorbell as active since an app is now present */
vha->e_dbell.db_flags |= EDB_ACTIVE;
} else {
- ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
- __func__);
+ goto out;
}
if (N2N_TOPO(vha->hw)) {
- if (vha->hw->flags.n2n_fw_acc_sec)
- set_bit(N2N_LINK_RESET, &vha->dpc_flags);
- else
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
+ fcport->n2n_link_reset_cnt = 0;
+
+ if (vha->hw->flags.n2n_fw_acc_sec) {
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
+ qla_edif_sa_ctl_init(vha, fcport);
+
+ /*
+ * While authentication app was not running, remote device
+ * could still try to login with this local port. Let's
+ * clear the state and try again.
+ */
+ qla2x00_wait_for_sess_deletion(vha);
+
+ /* bounce the link to get the other guy to relogin */
+ if (!vha->hw->flags.n2n_bigger) {
+ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else {
+ qla2x00_wait_for_hba_online(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+ }
} else {
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_edif, vha, 0x2058,
@@ -517,19 +560,31 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (atomic_read(&vha->loop_state) == LOOP_DOWN)
break;
- fcport->edif.app_started = 1;
fcport->login_retry = vha->hw->login_retry_count;
- /* no activity */
fcport->edif.app_stop = 0;
+ fcport->edif.app_sess_online = 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ continue;
+
+ if (fcport->port_type == FCT_UNKNOWN &&
+ !fcport->fc4_features)
+ rval = qla24xx_async_gffid(vha, fcport, true);
+
+ if (!rval && !(fcport->fc4_features & FC4_FF_TARGET ||
+ fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET)))
+ continue;
+
+ rval = 0;
ql_dbg(ql_dbg_edif, vha, 0x911e,
"%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
__func__, fcport->port_name);
- fcport->edif.app_sess_online = 0;
qlt_schedule_sess_for_deletion(fcport);
qla_edif_sa_ctl_init(vha, fcport);
}
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
@@ -540,9 +595,11 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
__func__);
}
+out:
appreply.host_support_edif = vha->hw->flags.edif_enabled;
appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
appreply.edif_edb_active = vha->e_dbell.db_flags;
+ appreply.version = EDIF_VERSION1;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
@@ -610,9 +667,6 @@ qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
fcport->send_els_logo = 1;
qlt_schedule_sess_for_deletion(fcport);
-
- /* qla_edif_flush_sa_ctl_lists(fcport); */
- fcport->edif.app_started = 0;
}
}
@@ -672,6 +726,7 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
portid.b.area = appplogiok.u.d_id.b.area;
portid.b.al_pa = appplogiok.u.d_id.b.al_pa;
+ appplogireply.version = EDIF_VERSION1;
switch (appplogiok.type) {
case PL_TYPE_WWPN:
fcport = qla2x00_find_fcport_by_wwpn(vha,
@@ -864,6 +919,8 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
} else {
struct fc_port *fcport = NULL, *tf;
+ app_reply->version = EDIF_VERSION1;
+
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (!(fcport->flags & FCF_FCSP_DEVICE))
continue;
@@ -880,9 +937,25 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
continue;
- app_reply->ports[pcnt].rekey_count =
- fcport->edif.rekey_cnt;
+ if (!N2N_TOPO(vha->hw)) {
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ continue;
+
+ if (fcport->port_type == FCT_UNKNOWN &&
+ !fcport->fc4_features)
+ rval = qla24xx_async_gffid(vha, fcport,
+ true);
+ if (!rval &&
+ !(fcport->fc4_features & FC4_FF_TARGET ||
+ fcport->port_type &
+ (FCT_TARGET | FCT_NVME_TARGET)))
+ continue;
+ }
+
+ rval = 0;
+
+ app_reply->ports[pcnt].version = EDIF_VERSION1;
app_reply->ports[pcnt].remote_type =
VND_CMD_RTYPE_UNKNOWN;
if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
@@ -979,6 +1052,8 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
} else {
struct fc_port *fcport = NULL, *tf;
+ app_reply->version = EDIF_VERSION1;
+
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (fcport->edif.enable) {
if (pcnt > app_req.num_ports)
@@ -1012,6 +1087,164 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
return rval;
}
+static int32_t
+qla_edif_ack(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_port *fcport;
+ struct aen_complete_cmd ack;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &ack, sizeof(ack));
+
+ ql_dbg(ql_dbg_edif, vha, 0x70cf,
+ "%s: %06x event_code %x\n",
+ __func__, ack.port_id.b24, ack.event_code);
+
+ fcport = qla2x00_find_fcport_by_pid(vha, &ack.port_id);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x70cf,
+ "%s: unable to find fcport %06x \n",
+ __func__, ack.port_id.b24);
+ return 0;
+ }
+
+ switch (ack.event_code) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ fcport->edif.sess_down_acked = 1;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int qla_edif_consume_dbell(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ u32 sg_skip, reply_payload_len;
+ bool keep;
+ struct edb_node *dbnode = NULL;
+ struct edif_app_dbell ap;
+ int dat_size = 0;
+
+ sg_skip = 0;
+ reply_payload_len = bsg_job->reply_payload.payload_len;
+
+ while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) {
+ dbnode = qla_edb_getnext(vha);
+ if (dbnode) {
+ keep = true;
+ dat_size = 0;
+ ap.event_code = dbnode->ntype;
+ switch (dbnode->ntype) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ case VND_CMD_AUTH_STATE_NEEDED:
+ ap.port_id = dbnode->u.plogi_did;
+ dat_size += sizeof(ap.port_id);
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ ap.port_id = dbnode->u.els_sid;
+ dat_size += sizeof(ap.port_id);
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ ap.port_id = dbnode->u.sa_aen.port_id;
+ memcpy(&ap.event_data, &dbnode->u,
+ sizeof(struct edif_sa_update_aen));
+ dat_size += sizeof(struct edif_sa_update_aen);
+ break;
+ default:
+ keep = false;
+ ql_log(ql_log_warn, vha, 0x09102,
+ "%s unknown DB type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+ break;
+ }
+ ap.event_data_size = dat_size;
+ /* 8 = sizeof(ap.event_code + ap.event_data_size) */
+ dat_size += 8;
+ if (keep)
+ sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &ap, dat_size, sg_skip, false);
+
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s Doorbell consumed : type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+
+ kfree(dbnode);
+ } else {
+ break;
+ }
+ }
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ bsg_reply->reply_payload_rcv_len = sg_skip;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+ return 0;
+}
+
+static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
+ u32 delay)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+ /* small sleep for doorbell events to accumulate */
+ if (delay)
+ msleep(delay);
+
+ qla_edif_consume_dbell(vha, bsg_job);
+
+ bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+}
+
+static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct bsg_job *prev_bsg_job = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ if (vha->e_dbell.dbell_bsg_job) {
+ prev_bsg_job = vha->e_dbell.dbell_bsg_job;
+ vha->e_dbell.dbell_bsg_job = NULL;
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ if (prev_bsg_job)
+ __qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0);
+}
+
+static int
+qla_edif_dbell_bsg(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ unsigned long flags;
+ bool return_bsg = false;
+
+ /* flush previous dbell bsg */
+ qla_edif_dbell_bsg_done(vha);
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) {
+ /*
+ * when the next db event happens, bsg_job will return.
+ * Otherwise, timer will return it.
+ */
+ vha->e_dbell.dbell_bsg_job = bsg_job;
+ vha->e_dbell.bsg_expire = jiffies + 10 * HZ;
+ } else {
+ return_bsg = true;
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ if (return_bsg)
+ __qla_edif_dbell_bsg_done(vha, bsg_job, 1);
+
+ return 0;
+}
+
int32_t
qla_edif_app_mgmt(struct bsg_job *bsg_job)
{
@@ -1023,8 +1256,13 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
bool done = true;
int32_t rval = 0;
uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+ u32 level = ql_dbg_edif;
+
+ /* doorbell is high traffic */
+ if (vnd_sc == QL_VND_SC_READ_DBELL)
+ level = 0;
- ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n",
+ ql_dbg(level, vha, 0x911d, "%s vnd subcmd=%x\n",
__func__, vnd_sc);
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
@@ -1033,7 +1271,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
if (!vha->hw->flags.edif_enabled ||
test_bit(VPORT_DELETE, &vha->dpc_flags)) {
- ql_dbg(ql_dbg_edif, vha, 0x911d,
+ ql_dbg(level, vha, 0x911d,
"%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
__func__, bsg_job, vha->dpc_flags);
@@ -1042,7 +1280,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
}
if (!qla_edif_app_check(vha, appcheck)) {
- ql_dbg(ql_dbg_edif, vha, 0x911d,
+ ql_dbg(level, vha, 0x911d,
"%s app checked failed.\n",
__func__);
@@ -1074,6 +1312,13 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
case QL_VND_SC_GET_STATS:
rval = qla_edif_app_getstats(vha, bsg_job);
break;
+ case QL_VND_SC_AEN_COMPLETE:
+ rval = qla_edif_ack(vha, bsg_job);
+ break;
+ case QL_VND_SC_READ_DBELL:
+ rval = qla_edif_dbell_bsg(vha, bsg_job);
+ done = false;
+ break;
default:
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
__func__,
@@ -1085,7 +1330,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
done:
if (done) {
- ql_dbg(ql_dbg_user, vha, 0x7009,
+ ql_dbg(level, vha, 0x7009,
"%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job);
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
@@ -1247,6 +1492,8 @@ qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
+#define EDIF_MSLEEP_INTERVAL 100
+#define EDIF_RETRY_COUNT 50
int
qla24xx_sadb_update(struct bsg_job *bsg_job)
@@ -1259,7 +1506,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
struct edif_list_entry *edif_entry = NULL;
int found = 0;
int rval = 0;
- int result = 0;
+ int result = 0, cnt;
struct qla_sa_update_frame sa_frame;
struct srb_iocb *iocb_cmd;
port_id_t portid;
@@ -1500,11 +1747,23 @@ force_rx_delete:
sp->done = qla2x00_bsg_job_done;
iocb_cmd = &sp->u.iocb_cmd;
iocb_cmd->u.sa_update.sa_frame = sa_frame;
-
+ cnt = 0;
+retry:
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS) {
+ switch (rval) {
+ case QLA_SUCCESS:
+ break;
+ case EAGAIN:
+ msleep(EDIF_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < EDIF_RETRY_COUNT)
+ goto retry;
+
+ fallthrough;
+ default:
ql_log(ql_dbg_edif, vha, 0x70e3,
- "qla2x00_start_sp failed=%d.\n", rval);
+ "%s qla2x00_start_sp failed=%d.\n",
+ __func__, rval);
qla2x00_rel_sp(sp);
rval = -EIO;
@@ -1797,30 +2056,6 @@ qla_edb_init(scsi_qla_host_t *vha)
/* initialize lock which protects doorbell & init list */
spin_lock_init(&vha->e_dbell.db_lock);
INIT_LIST_HEAD(&vha->e_dbell.head);
-
- /* create and initialize doorbell */
- init_completion(&vha->e_dbell.dbell);
-}
-
-static void
-qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
-{
- /*
- * releases the space held by this edb node entry
- * this function does _not_ free the edb node itself
- * NB: the edb node entry passed should not be on any list
- *
- * currently for doorbell there's no additional cleanup
- * needed, but here as a placeholder for furture use.
- */
-
- if (!node) {
- ql_dbg(ql_dbg_edif, vha, 0x09122,
- "%s error - no valid node passed\n", __func__);
- return;
- }
-
- node->ntype = N_UNDEF;
}
static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
@@ -1867,11 +2102,8 @@ static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
}
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- list_for_each_entry_safe(e, tmp, &edb_list, list) {
+ list_for_each_entry_safe(e, tmp, &edb_list, list)
qla_edb_node_free(vha, e);
- list_del_init(&e->list);
- kfree(e);
- }
}
/* function called when app is stopping */
@@ -1899,14 +2131,10 @@ qla_edb_stop(scsi_qla_host_t *vha)
"%s freeing edb_node type=%x\n",
__func__, node->ntype);
qla_edb_node_free(vha, node);
- list_del(&node->list);
-
- kfree(node);
}
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- /* wake up doorbell waiters - they'll be dismissed with error code */
- complete_all(&vha->e_dbell.dbell);
+ qla_edif_dbell_bsg_done(vha);
}
static struct edb_node *
@@ -1944,9 +2172,6 @@ qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
list_add_tail(&ptr->list, &vha->e_dbell.head);
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- /* ring doorbell for waiters */
- complete(&vha->e_dbell.dbell);
-
return true;
}
@@ -2010,47 +2235,29 @@ qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
edbnode->u.sa_aen.port_id = fcport->d_id;
edbnode->u.sa_aen.status = data;
edbnode->u.sa_aen.key_type = data2;
+ edbnode->u.sa_aen.version = EDIF_VERSION1;
break;
default:
ql_dbg(ql_dbg_edif, vha, 0x09102,
"%s unknown type: %x\n", __func__, dbtype);
- qla_edb_node_free(vha, edbnode);
kfree(edbnode);
edbnode = NULL;
break;
}
- if (edbnode && (!qla_edb_node_add(vha, edbnode))) {
+ if (edbnode) {
+ if (!qla_edb_node_add(vha, edbnode)) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unable to add dbnode\n", __func__);
+ kfree(edbnode);
+ return;
+ }
ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s unable to add dbnode\n", __func__);
- qla_edb_node_free(vha, edbnode);
- kfree(edbnode);
- return;
- }
- if (edbnode && fcport)
- fcport->edif.auth_state = dbtype;
- ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
-}
-
-static struct edb_node *
-qla_edb_getnext(scsi_qla_host_t *vha)
-{
- unsigned long flags;
- struct edb_node *edbnode = NULL;
-
- spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
-
- /* db nodes are fifo - no qualifications done */
- if (!list_empty(&vha->e_dbell.head)) {
- edbnode = list_first_entry(&vha->e_dbell.head,
- struct edb_node, list);
- list_del(&edbnode->list);
+ "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
+ qla_edif_dbell_bsg_done(vha);
+ if (fcport)
+ fcport->edif.auth_state = dbtype;
}
-
- spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
-
- return edbnode;
}
void
@@ -2078,89 +2285,14 @@ qla_edif_timer(scsi_qla_host_t *vha)
ha->edif_post_stop_cnt_down = 60;
}
}
-}
-/*
- * app uses separate thread to read this. It'll wait until the doorbell
- * is rung by the driver or the max wait time has expired
- */
-ssize_t
-edif_doorbell_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- struct edb_node *dbnode = NULL;
- struct edif_app_dbell *ap = (struct edif_app_dbell *)buf;
- uint32_t dat_siz, buf_size, sz;
-
- /* TODO: app currently hardcoded to 256. Will transition to bsg */
- sz = 256;
-
- /* stop new threads from waiting if we're not init'd */
- if (DBELL_INACTIVE(vha)) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
- "%s error - edif db not enabled\n", __func__);
- return 0;
- }
-
- if (!vha->hw->flags.edif_enabled) {
- /* edif not enabled */
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
- "%s error - edif not enabled\n", __func__);
- return -1;
- }
-
- buf_size = 0;
- while ((sz - buf_size) >= sizeof(struct edb_node)) {
- /* remove the next item from the doorbell list */
- dat_siz = 0;
- dbnode = qla_edb_getnext(vha);
- if (dbnode) {
- ap->event_code = dbnode->ntype;
- switch (dbnode->ntype) {
- case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
- case VND_CMD_AUTH_STATE_NEEDED:
- ap->port_id = dbnode->u.plogi_did;
- dat_siz += sizeof(ap->port_id);
- break;
- case VND_CMD_AUTH_STATE_ELS_RCVD:
- ap->port_id = dbnode->u.els_sid;
- dat_siz += sizeof(ap->port_id);
- break;
- case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
- ap->port_id = dbnode->u.sa_aen.port_id;
- memcpy(ap->event_data, &dbnode->u,
- sizeof(struct edif_sa_update_aen));
- dat_siz += sizeof(struct edif_sa_update_aen);
- break;
- default:
- /* unknown node type, rtn unknown ntype */
- ap->event_code = VND_CMD_AUTH_STATE_UNDEF;
- memcpy(ap->event_data, &dbnode->ntype, 4);
- dat_siz += 4;
- break;
- }
-
- ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s Doorbell consumed : type=%d %p\n",
- __func__, dbnode->ntype, dbnode);
- /* we're done with the db node, so free it up */
- qla_edb_node_free(vha, dbnode);
- kfree(dbnode);
- } else {
- break;
- }
-
- ap->event_data_size = dat_siz;
- /* 8bytes = ap->event_code + ap->event_data_size */
- buf_size += dat_siz + 8;
- ap = (struct edif_app_dbell *)(buf + buf_size);
- }
- return buf_size;
+ if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire))
+ qla_edif_dbell_bsg_done(vha);
}
static void qla_noop_sp_done(srb_t *sp, int res)
{
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
@@ -2185,7 +2317,8 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
if (!sa_ctl) {
ql_dbg(ql_dbg_edif, vha, 0x70e6,
"sa_ctl allocation failed\n");
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto done;
}
fcport = sa_ctl->fcport;
@@ -2195,7 +2328,8 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
if (!sp) {
ql_dbg(ql_dbg_edif, vha, 0x70e6,
"SRB allocation failed\n");
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto done;
}
fcport->flags |= FCF_ASYNC_SENT;
@@ -2224,10 +2358,17 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- rval = QLA_FUNCTION_FAILED;
+ if (rval != QLA_SUCCESS) {
+ goto done_free_sp;
+ }
return rval;
+done_free_sp:
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
+ return rval;
}
void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
@@ -2446,8 +2587,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
- if (DBELL_INACTIVE(vha) ||
- (fcport && EDIF_SESSION_DOWN(fcport))) {
+ if (DBELL_INACTIVE(vha)) {
ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
__func__, host->e_dbell.db_flags,
fcport ? fcport->d_id.b24 : 0);
@@ -2457,6 +2597,22 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
return;
}
+ if (fcport && EDIF_SESSION_DOWN(fcport)) {
+ ql_dbg(ql_dbg_edif, host, 0x13b6,
+ "%s terminate exchange. Send logo to 0x%x\n",
+ __func__, a.did.b24);
+
+ a.tx_byte_count = a.tx_len = 0;
+ a.tx_addr = 0;
+ a.control_flags = EPD_RX_XCHG; /* EPD_RX_XCHG = terminate cmd */
+ qla_els_reject_iocb(host, (*rsp)->qpair, &a);
+ qla_enode_free(host, ptr);
+ /* send logo to let remote port knows to tear down session */
+ fcport->send_els_logo = 1;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+
/* add the local enode to the list */
qla_enode_add(host, ptr);
@@ -2832,6 +2988,12 @@ qla28xx_start_scsi_edif(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword(req->req_q_out);
@@ -3023,6 +3185,7 @@ queuing_error:
mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
sp->u.scmd.ct6_ctx = NULL;
}
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(lock, flags);
return QLA_FUNCTION_FAILED;
@@ -3349,10 +3512,14 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
fc_port_t *fcport = NULL;
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- int rval = (DID_ERROR << 16);
+ int rval = (DID_ERROR << 16), cnt;
port_id_t d_id;
struct qla_bsg_auth_els_request *p =
(struct qla_bsg_auth_els_request *)bsg_job->request;
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ rpl->version = EDIF_VERSION1;
d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
@@ -3371,7 +3538,7 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (qla_bsg_check(vha, bsg_job, fcport))
return 0;
- if (fcport->loop_id == FC_NO_LOOP_ID) {
+ if (EDIF_SESS_DELETE(fcport)) {
ql_dbg(ql_dbg_edif, vha, 0x910d,
"%s ELS code %x, no loop id.\n", __func__,
bsg_request->rqst_data.r_els.els_code);
@@ -3440,17 +3607,26 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
+ cnt = 0;
+retry:
rval = qla2x00_start_sp(sp);
-
- ql_dbg(ql_dbg_edif, vha, 0x700a,
- "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
- __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
- p->e.extra_rx_xchg_address, p->e.extra_control_flags,
- sp->handle, sp->remap.req.len, bsg_job);
-
- if (rval != QLA_SUCCESS) {
+ switch (rval) {
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_edif, vha, 0x700a,
+ "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
+ __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
+ p->e.extra_rx_xchg_address, p->e.extra_control_flags,
+ sp->handle, sp->remap.req.len, bsg_job);
+ break;
+ case EAGAIN:
+ msleep(EDIF_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < EDIF_RETRY_COUNT)
+ goto retry;
+ fallthrough;
+ default:
ql_log(ql_log_warn, vha, 0x700e,
- "qla2x00_start_sp failed = %d\n", rval);
+ "%s qla2x00_start_sp failed = %d\n", __func__, rval);
SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
rval = -EIO;
goto done_free_remap_rsp;
@@ -3472,14 +3648,29 @@ done:
void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
{
+ u16 cnt = 0;
+
if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) {
ql_dbg(ql_dbg_disc, vha, 0xf09c,
"%s: sess %8phN send port_offline event\n",
__func__, sess->port_name);
sess->edif.app_sess_online = 0;
+ sess->edif.sess_down_acked = 0;
qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
sess->d_id.b24, 0, sess);
qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
+
+ while (!READ_ONCE(sess->edif.sess_down_acked) &&
+ !test_bit(VPORT_DELETE, &vha->dpc_flags)) {
+ msleep(100);
+ cnt++;
+ if (cnt > 100)
+ break;
+ }
+ sess->edif.sess_down_acked = 0;
+ ql_dbg(ql_dbg_disc, vha, 0xf09c,
+ "%s: sess %8phN port_offline event completed\n",
+ __func__, sess->port_name);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
index a965ca8e47ce..7cdb89ccdc6e 100644
--- a/drivers/scsi/qla2xxx/qla_edif.h
+++ b/drivers/scsi/qla2xxx/qla_edif.h
@@ -51,7 +51,8 @@ struct edif_dbell {
enum db_flags_t db_flags;
spinlock_t db_lock;
struct list_head head;
- struct completion dbell;
+ struct bsg_job *dbell_bsg_job;
+ unsigned long bsg_expire;
};
#define SA_UPDATE_IOCB_TYPE 0x71 /* Security Association Update IOCB entry */
@@ -140,4 +141,8 @@ struct enode {
(DBELL_ACTIVE(_fcport->vha) && \
(_fcport->disc_state == DSC_LOGIN_AUTH_PEND))
+#define EDIF_SESS_DELETE(_s) \
+ (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
+ _s->disc_state == DSC_DELETED))
+
#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
index 5a26c77157da..0931f4e4e127 100644
--- a/drivers/scsi/qla2xxx/qla_edif_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -7,13 +7,15 @@
#ifndef __QLA_EDIF_BSG_H
#define __QLA_EDIF_BSG_H
+#define EDIF_VERSION1 1
+
/* BSG Vendor specific commands */
#define ELS_MAX_PAYLOAD 2112
#ifndef WWN_SIZE
#define WWN_SIZE 8
#endif
-#define VND_CMD_APP_RESERVED_SIZE 32
-
+#define VND_CMD_APP_RESERVED_SIZE 28
+#define VND_CMD_PAD_SIZE 3
enum auth_els_sub_cmd {
SEND_ELS = 0,
SEND_ELS_REPLY,
@@ -28,7 +30,9 @@ struct extra_auth_els {
#define BSG_CTL_FLAG_LS_ACC 1
#define BSG_CTL_FLAG_LS_RJT 2
#define BSG_CTL_FLAG_TRM 3
- uint8_t extra_rsvd[3];
+ uint8_t version;
+ uint8_t pad[2];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct qla_bsg_auth_els_request {
@@ -39,51 +43,46 @@ struct qla_bsg_auth_els_request {
struct qla_bsg_auth_els_reply {
struct fc_bsg_reply r;
uint32_t rx_xchg_address;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
};
struct app_id {
int app_vid;
- uint8_t app_key[32];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_start_reply {
uint32_t host_support_edif;
uint32_t edif_enode_active;
uint32_t edif_edb_active;
- uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_start {
struct app_id app_info;
- uint32_t prli_to;
- uint32_t key_shred;
uint8_t app_start_flags;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE - 1];
+ uint8_t version;
+ uint8_t pad[2];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_stop {
struct app_id app_info;
- char buf[16];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_plogi_reply {
uint32_t prli_status;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
-} __packed;
-
-#define RECFG_TIME 1
-#define RECFG_BYTES 2
-
-struct app_rekey_cfg {
- struct app_id app_info;
- uint8_t rekey_mode;
- port_id_t d_id;
- uint8_t force;
- union {
- int64_t bytes;
- int64_t time;
- } rky_units;
-
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -91,7 +90,9 @@ struct app_pinfo_req {
struct app_id app_info;
uint8_t num_ports;
port_id_t remote_pid;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_pinfo {
@@ -103,11 +104,8 @@ struct app_pinfo {
#define VND_CMD_RTYPE_INITIATOR 2
uint8_t remote_state;
uint8_t auth_state;
- uint8_t rekey_mode;
- int64_t rekey_count;
- int64_t rekey_config_value;
- int64_t rekey_consumed_value;
-
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -120,6 +118,8 @@ struct app_pinfo {
struct app_pinfo_reply {
uint8_t port_count;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
struct app_pinfo ports[];
} __packed;
@@ -127,6 +127,8 @@ struct app_pinfo_reply {
struct app_sinfo_req {
struct app_id app_info;
uint8_t num_ports;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -140,6 +142,9 @@ struct app_sinfo {
struct app_stats_reply {
uint8_t elem_count;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
struct app_sinfo elem[];
} __packed;
@@ -163,9 +168,11 @@ struct qla_sa_update_frame {
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
port_id_t port_id;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved2[VND_CMD_APP_RESERVED_SIZE];
} __packed;
-// used for edif mgmt bsg interface
#define QL_VND_SC_UNDEF 0
#define QL_VND_SC_SA_UPDATE 1
#define QL_VND_SC_APP_START 2
@@ -175,6 +182,22 @@ struct qla_sa_update_frame {
#define QL_VND_SC_REKEY_CONFIG 6
#define QL_VND_SC_GET_FCINFO 7
#define QL_VND_SC_GET_STATS 8
+#define QL_VND_SC_AEN_COMPLETE 9
+#define QL_VND_SC_READ_DBELL 10
+
+/*
+ * bsg caller to provide empty buffer for doorbell events.
+ *
+ * sg_io_v4.din_xferp = empty buffer for door bell events
+ * sg_io_v4.dout_xferp = struct edif_read_dbell *buf
+ */
+struct edif_read_dbell {
+ struct app_id app_info;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+};
+
/* Application interface data structure for rtn data */
#define EXT_DEF_EVENT_DATA_SIZE 64
@@ -191,7 +214,9 @@ struct edif_sa_update_aen {
port_id_t port_id;
uint32_t key_type; /* Tx (1) or RX (2) */
uint32_t status; /* 0 succes, 1 failed, 2 timeout , 3 error */
- uint8_t reserved[16];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
#define QL_VND_SA_STAT_SUCCESS 0
@@ -212,9 +237,22 @@ struct auth_complete_cmd {
uint8_t wwpn[WWN_SIZE];
port_id_t d_id;
} u;
- uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct aen_complete_cmd {
+ struct app_id app_info;
+ port_id_t port_id;
+ uint32_t event_code;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
#define RX_DELAY_DELETE_TIMEOUT 20
+#define FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN 1
+
#endif /* QLA_EDIF_BSG_H */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 0bb1d562f0bf..361015b5763e 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -807,7 +807,7 @@ struct els_entry_24xx {
#define EPD_ELS_COMMAND (0 << 13)
#define EPD_ELS_ACC (1 << 13)
#define EPD_ELS_RJT (2 << 13)
-#define EPD_RX_XCHG (3 << 13)
+#define EPD_RX_XCHG (3 << 13) /* terminate exchange */
#define ECF_CLR_PASSTHRU_PEND BIT_12
#define ECF_INCL_FRAME_HDR BIT_11
#define ECF_SEC_LOGIN BIT_3
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index dac27b5ff0ac..5dd2932382ee 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -193,6 +193,8 @@ extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
extern u32 ql2xnvme_queues;
+extern int ql2xrspq_follow_inptr;
+extern int ql2xrspq_follow_inptr_legacy;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -335,6 +337,7 @@ extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha,
struct qla_work_evt *e);
void qla2x00_sp_release(struct kref *kref);
+void qla2x00_els_dcmd2_iocb_timeout(void *data);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -433,7 +436,8 @@ extern int
qla2x00_get_resource_cnts(scsi_qla_host_t *);
extern int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
+qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map,
+ u8 *num_entries);
extern int
qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
@@ -554,6 +558,10 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int
qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+extern int
+qla26xx_dport_diagnostics_v2(scsi_qla_host_t *,
+ struct qla_dport_diag_v2 *, mbx_cmd_t *);
+
int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
@@ -727,7 +735,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
-int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
@@ -989,7 +997,6 @@ fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id);
void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2,
fc_port_t *fcport);
void qla_edb_stop(scsi_qla_host_t *vha);
-ssize_t edif_doorbell_show(struct device *dev, struct device_attribute *attr, char *buf);
int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job);
void qla_enode_init(scsi_qla_host_t *vha);
void qla_enode_stop(scsi_qla_host_t *vha);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index e811de2f6a25..64ab070b8716 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1596,7 +1596,6 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
unsigned int callopt)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (void *)ha->init_cb;
struct new_utsname *p_sysid = utsname();
struct ct_fdmi_hba_attr *eiter;
uint16_t alen;
@@ -1617,7 +1616,7 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
alen = scnprintf(
eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
- "%s", "QLogic Corporation");
+ "%s", QLA2XXX_MANUFACTURER);
alen += FDMI_ATTR_ALIGNMENT(alen);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -1758,8 +1757,8 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
/* MAX CT Payload Length */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
- icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2);
+
alen = sizeof(eiter->a.max_ct_len);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -1851,7 +1850,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
unsigned int callopt)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (void *)ha->init_cb;
struct new_utsname *p_sysid = utsname();
char *hostname = p_sysid ?
p_sysid->nodename : fc_host_system_hostname(vha->host);
@@ -1903,8 +1901,7 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
/* Max frame size. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
- icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size);
alen = sizeof(eiter->a.max_frame_size);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -3280,19 +3277,12 @@ done:
return rval;
}
-void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- qla24xx_post_gnl_work(vha, fcport);
-}
void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
{
struct scsi_qla_host *vha = sp->vha;
fc_port_t *fcport = sp->fcport;
struct ct_sns_rsp *ct_rsp;
- struct event_arg ea;
uint8_t fc4_scsi_feat;
uint8_t fc4_nvme_feat;
@@ -3300,10 +3290,10 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
"Async done-%s res %x ID %x. %8phC\n",
sp->name, res, fcport->d_id.b24, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
- ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp;
fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+ sp->rc = res;
/*
* FC-GS-7, 5.2.3.12 FC-4 Features - format
@@ -3324,24 +3314,42 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
}
}
- memset(&ea, 0, sizeof(ea));
- ea.sp = sp;
- ea.fcport = sp->fcport;
- ea.rc = res;
+ if (sp->flags & SRB_WAKEUP_ON_COMP) {
+ complete(sp->comp);
+ } else {
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
- qla24xx_handle_gffid_event(vha, &ea);
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ /* we should not be here */
+ dump_stack();
+ }
}
/* Get FC4 Feature with Nport ID. */
-int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
srb_t *sp;
+ DECLARE_COMPLETION_ONSTACK(comp);
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ /* this routine does not have handling for no wait */
+ if (!vha->flags.online || !wait)
return rval;
/* ref: INIT */
@@ -3349,43 +3357,86 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!sp)
return rval;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gffid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
qla24xx_async_gffid_sp_done);
+ sp->comp = &comp;
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
+
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
+
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns response.\n",
+ __func__);
+ goto done_free_sp;
+ }
/* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
- GFF_ID_RSP_SIZE);
+ ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE);
ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
- sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- ql_dbg(ql_dbg_disc, vha, 0x2132,
- "Async-%s hdl=%x %8phC.\n", sp->name,
- sp->handle, fcport->port_name);
-
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+
+ if (rval != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
goto done_free_sp;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x3074,
+ "Async-%s hdl=%x portid %06x\n",
+ sp->name, sp->handle, fcport->d_id.b24);
+ }
+
+ wait_for_completion(sp->comp);
+ rval = sp->rc;
- return rval;
done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
- fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
@@ -3578,7 +3629,7 @@ login_logout:
do_delete) {
if (fcport->loop_id != FC_NO_LOOP_ID) {
if (fcport->flags & FCF_FCP2_DEVICE)
- fcport->logout_on_delete = 0;
+ continue;
ql_log(ql_log_warn, vha, 0x20f0,
"%s %d %8phC post del sess\n",
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3f3417a3e891..e7fe0e52c11d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -47,6 +47,7 @@ qla2x00_sp_timeout(struct timer_list *t)
{
srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
+ scsi_qla_host_t *vha = sp->vha;
WARN_ON(irqs_disabled());
iocb = &sp->u.iocb_cmd;
@@ -54,6 +55,12 @@ qla2x00_sp_timeout(struct timer_list *t)
/* ref: TMR */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+
+ if (vha && qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9008,
+ "PCI/Register disconnect.\n");
+ qla_pci_set_eeh_busy(vha);
+ }
}
void qla2x00_sp_free(srb_t *sp)
@@ -161,6 +168,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
struct srb_iocb *abt_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ uint8_t bail;
/* ref: INIT for ABTS command */
sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
@@ -168,6 +176,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
if (!sp)
return QLA_MEMORY_ALLOC_FAILED;
+ QLA_VHA_MARK_BUSY(vha, bail);
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
@@ -1480,7 +1489,6 @@ static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
ql_dbg(ql_dbg_disc, vha, 0x20ef,
"%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
__func__, __LINE__, fcport->port_name);
- fcport->edif.app_started = 1;
fcport->edif.app_sess_online = 1;
qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
@@ -1763,8 +1771,16 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
case DSC_LOGIN_PEND:
- if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ if (vha->hw->flags.edif_enabled)
+ break;
+
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post %s PRLI\n",
+ __func__, __LINE__, fcport->port_name,
+ NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC");
qla24xx_post_prli_work(vha, fcport);
+ }
break;
case DSC_UPD_FCPORT:
@@ -1818,7 +1834,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_PORT_ADDR:
fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
if (fcport) {
- if (fcport->flags & FCF_FCP2_DEVICE) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, vha, 0x2115,
"Delaying session delete for FCP2 portid=%06x %8phC ",
fcport->d_id.b24, fcport->port_name);
@@ -1850,7 +1867,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
break;
case RSCN_AREA_ADDR:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
@@ -1861,7 +1879,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
break;
case RSCN_DOM_ADDR:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
@@ -1873,7 +1892,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_FAB_ADDR:
default:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
fcport->scan_needed = 1;
@@ -2000,12 +2020,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ uint8_t bail;
/* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ QLA_VHA_MARK_BUSY(vha, bail);
sp->type = SRB_TM_CMD;
sp->name = "tmf";
qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
@@ -2124,6 +2146,13 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
}
if (N2N_TOPO(vha->hw)) {
+ if (ea->fcport->n2n_link_reset_cnt ==
+ vha->hw->login_retry_count &&
+ ea->fcport->flags & FCF_FCSP_DEVICE) {
+ /* remote authentication app just started */
+ ea->fcport->n2n_link_reset_cnt = 0;
+ }
+
if (ea->fcport->n2n_link_reset_cnt <
vha->hw->login_retry_count) {
ea->fcport->n2n_link_reset_cnt++;
@@ -4509,6 +4538,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
+ /* Init_cb will be reused for other command(s). Save a backup copy of port_name */
+ memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
}
/* ELS pass through payload is limit by frame size. */
@@ -5273,9 +5304,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
- if (vha->e_dbell.db_flags == EDB_ACTIVE)
- fcport->edif.app_started = 1;
-
spin_lock_init(&fcport->edif.indx_list_lock);
INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
@@ -5488,6 +5516,22 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
+static void
+qla_reinitialize_link(scsi_qla_host_t *vha)
+{
+ int rval;
+
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ rval = qla2x00_full_login_lip(vha);
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xd051,
+ "Link reinitialization failed (%d)\n", rval);
+ }
+}
+
/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
@@ -5539,6 +5583,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&vha->work_lock, flags);
if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ u8 loop_map_entries = 0;
+ int rc;
+
+ rc = qla2x00_get_fcal_position_map(vha, NULL,
+ &loop_map_entries);
+ if (rc == QLA_SUCCESS && loop_map_entries > 1) {
+ /*
+ * There are devices that are still not logged
+ * in. Reinitialize to give them a chance.
+ */
+ qla_reinitialize_link(vha);
+ return QLA_FUNCTION_FAILED;
+ }
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
@@ -5767,8 +5824,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
if (atomic_read(&fcport->state) == FCS_ONLINE)
return;
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
@@ -5869,7 +5924,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_reg_remote_port(vha, fcport);
break;
case MODE_TARGET:
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
if (!vha->vha_tgt.qla_tgt->tgt_stop &&
!vha->vha_tgt.qla_tgt->tgt_stopped)
qlt_fc_port_added(vha, fcport);
@@ -5887,6 +5941,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (NVME_TARGET(vha->hw, fcport))
qla_nvme_register_remote(vha, fcport);
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
if (fcport->id_changed) {
fcport->id_changed = 0;
@@ -7197,6 +7253,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
+ vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
+ vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
+
if (vha->hw->flags.port_isolated)
return status;
@@ -9657,6 +9716,12 @@ int qla2xxx_disable_port(struct Scsi_Host *host)
vha->hw->flags.port_isolated = 1;
+ if (qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9006,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
if (qla2x00_chip_is_down(vha))
return 0;
@@ -9672,6 +9737,13 @@ int qla2xxx_enable_port(struct Scsi_Host *host)
{
scsi_qla_host_t *vha = shost_priv(host);
+ if (qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9001,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
+
vha->hw->flags.port_isolated = 0;
/* Set the flag to 1, so that isp_abort can proceed */
vha->flags.online = 1;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index e0fe9ddb4bd2..42ce4e1fe744 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2819,7 +2819,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
sp->vha->qla_stats.control_requests++;
}
-static void
+void
qla2x00_els_dcmd2_iocb_timeout(void *data)
{
srb_t *sp = data;
@@ -2882,6 +2882,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
+ /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/
+ fcport->logout_on_delete = 1;
+ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 21b31d6359c8..76e79f350a22 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1354,9 +1354,7 @@ skip_rio:
if (!vha->vp_idx) {
if (ha->flags.fawwpn_enabled &&
(ha->current_topology == ISP_CFG_F)) {
- void *wwpn = ha->init_cb->port_name;
-
- memcpy(vha->port_name, wwpn, WWN_SIZE);
+ memcpy(vha->port_name, ha->port_name, WWN_SIZE);
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
ql_dbg(ql_dbg_init + ql_dbg_verbose,
@@ -1761,6 +1759,9 @@ global_port_update:
break;
case MBA_DPORT_DIAGNOSTICS:
+ if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR ||
+ (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR)
+ vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
ql_dbg(ql_dbg_async, vha, 0x5052,
"D-Port Diagnostics: %04x %04x %04x %04x\n",
mb[0], mb[1], mb[2], mb[3]);
@@ -2245,9 +2246,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
res = DID_ERROR << 16;
}
- if (logit) {
- if (sp->remap.remapped &&
- ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ if (sp->remap.remapped &&
+ ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ if (logit) {
ql_dbg(ql_dbg_user, vha, 0x503f,
"%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
type, sp->handle, comp_status);
@@ -2259,18 +2260,24 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
pkt)->total_byte_count),
e->s_id[0], e->s_id[2], e->s_id[1],
e->d_id[2], e->d_id[1], e->d_id[0]);
- } else {
- ql_log(ql_log_info, vha, 0x503f,
- "%s IOCB Done hdl=%x comp_status=0x%x\n",
- type, sp->handle, comp_status);
- ql_log(ql_log_info, vha, 0x503f,
- "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
- fw_status[1], fw_status[2],
- le32_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count),
- e->s_id[0], e->s_id[2], e->s_id[1],
- e->d_id[2], e->d_id[1], e->d_id[0]);
}
+ if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE &&
+ sp->type == SRB_ELS_CMD_HST_NOLOGIN) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s rcv reject. Sched delete\n", __func__);
+ qlt_schedule_sess_for_deletion(sp->fcport);
+ }
+ } else if (logit) {
+ ql_log(ql_log_info, vha, 0x503f,
+ "%s IOCB Done hdl=%x comp_status=0x%x\n",
+ type, sp->handle, comp_status);
+ ql_log(ql_log_info, vha, 0x503f,
+ "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+ fw_status[1], fw_status[2],
+ le32_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count),
+ e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
}
}
goto els_ct_done;
@@ -2639,7 +2646,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (unlikely(logit))
- ql_log(ql_dbg_io, fcport->vha, 0x5060,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x5060,
"NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
sp->name, sp->handle, comp_status,
fd->transferred_length, le32_to_cpu(sts->residual_len),
@@ -3426,6 +3433,7 @@ check_scsi_status:
case CS_PORT_UNAVAILABLE:
case CS_TIMEOUT:
case CS_RESET:
+ case CS_EDIF_INV_REQ:
/*
* We are going to have the fc class block the rport
@@ -3496,7 +3504,7 @@ check_scsi_status:
out:
if (logit)
- ql_log(ql_dbg_io, fcport->vha, 0x3022,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
@@ -3712,12 +3720,11 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
* Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
*/
static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
- struct rsp_que *rsp, response_t *pkt)
+ struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
{
- int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
- response_t *end_pkt;
+ int start_pkt_ring_index;
+ u32 iocb_cnt = 0;
int rc = 0;
- u32 rsp_q_in;
if (pkt->entry_count == 1)
return rc;
@@ -3728,34 +3735,18 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
else
start_pkt_ring_index = rsp->ring_index - 1;
- if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
- end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
- rsp->length - 1;
+ if (rsp_q_in < start_pkt_ring_index)
+ /* q in ptr is wrapped */
+ iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
else
- end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
+ iocb_cnt = rsp_q_in - start_pkt_ring_index;
- end_pkt = rsp->ring + end_pkt_ring_index;
-
- /* next pkt = end_pkt + 1 */
- n_ring_index = end_pkt_ring_index + 1;
- if (n_ring_index >= rsp->length)
- n_ring_index = 0;
-
- rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
- rd_reg_dword(rsp->rsp_q_in);
-
- /* rsp_q_in is either wrapped or pointing beyond endpkt */
- if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
- rsp_q_in >= n_ring_index)
- /* all IOCBs arrived. */
- rc = 0;
- else
+ if (iocb_cnt < pkt->entry_count)
rc = -EIO;
- ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
- "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
- __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
- rsp_q_in, rc);
+ ql_dbg(ql_dbg_init, vha, 0x5091,
+ "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
+ __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
return rc;
}
@@ -3772,6 +3763,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct purex_entry_24xx *purex_entry;
struct purex_item *pure_item;
+ u16 rsp_in = 0, cur_ring_index;
+ int follow_inptr, is_shadow_hba;
if (!ha->flags.fw_started)
return;
@@ -3781,8 +3774,27 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla_cpu_update(rsp->qpair, smp_processor_id());
}
- while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+#define __update_rsp_in(_update, _is_shadow_hba, _rsp, _rsp_in) \
+ do { \
+ if (_update) { \
+ _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
+ rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
+ } \
+ } while (0)
+
+ is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
+ follow_inptr = is_shadow_hba ? ql2xrspq_follow_inptr :
+ ql2xrspq_follow_inptr_legacy;
+
+ __update_rsp_in(follow_inptr, is_shadow_hba, rsp, rsp_in);
+
+ while ((likely(follow_inptr &&
+ rsp->ring_index != rsp_in &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)) ||
+ (!follow_inptr &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
+ cur_ring_index = rsp->ring_index;
rsp->ring_index++;
if (rsp->ring_index == rsp->length) {
@@ -3894,6 +3906,8 @@ process_err:
}
pure_item = qla27xx_copy_fpin_pkt(vha,
(void **)&pkt, &rsp);
+ __update_rsp_in(follow_inptr, is_shadow_hba,
+ rsp, rsp_in);
if (!pure_item)
break;
qla24xx_queue_purex_item(vha, pure_item,
@@ -3901,7 +3915,17 @@ process_err:
break;
case ELS_AUTH_ELS:
- if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
+ /*
+ * ring_ptr and ring_index were
+ * pre-incremented above. Reset them
+ * back to current. Wait for next
+ * interrupt with all IOCBs to arrive
+ * and re-process.
+ */
+ rsp->ring_ptr = (response_t *)pkt;
+ rsp->ring_index = cur_ring_index;
+
ql_dbg(ql_dbg_init, vha, 0x5091,
"Defer processing ELS opcode %#x...\n",
purex_entry->els_frame_payload[3]);
@@ -4420,16 +4444,12 @@ msix_register_fail:
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
- } else
- if (ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
+ if (IS_MQUE_CAPABLE(ha) &&
+ (ha->msixbase && ha->mqiobase && ha->max_qpairs))
+ ha->mqenable = 1;
+ else
+ ha->mqenable = 0;
+
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 892caf2475df..359595a64664 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -238,6 +238,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_dbg(ql_dbg_mbx, vha, 0x1112,
"mbox[%d]<-0x%04x\n", cnt, *iptr);
wrt_reg_word(optr, *iptr);
+ } else {
+ wrt_reg_word(optr, 0);
}
mboxes >>= 1;
@@ -274,6 +276,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117a,
+ "cmd=%x Timeout.\n", command);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
if (chip_reset != ha->chip_reset) {
eeh_delay = ha->flags.eeh_busy ? 1 : 0;
@@ -286,12 +294,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_ABORTED;
goto premature_exit;
}
- ql_dbg(ql_dbg_mbx, vha, 0x117a,
- "cmd=%x Timeout.\n", command);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
eeh_delay = ha->flags.eeh_busy ? 1 : 0;
@@ -3066,7 +3068,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
* Kernel context.
*/
int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
+ u8 *num_entries)
{
int rval;
mbx_cmd_t mc;
@@ -3106,6 +3109,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
if (pos_map)
memcpy(pos_map, pmap, FCAL_MAP_SIZE);
+ if (num_entries)
+ *num_entries = pmap[0];
}
dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
@@ -6471,6 +6476,54 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
return rval;
}
+int
+qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha,
+ struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp)
+{
+ int rval;
+ dma_addr_t dd_dma;
+ uint size = sizeof(dd->buf);
+ uint16_t options = dd->options;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
+ "Entered %s.\n", __func__);
+
+ dd_dma = dma_map_single(&vha->hw->pdev->dev,
+ dd->buf, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
+ ql_log(ql_log_warn, vha, 0x1194,
+ "Failed to map dma buffer.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memset(dd->buf, 0, size);
+
+ mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
+ mcp->mb[1] = options;
+ mcp->mb[2] = MSW(LSD(dd_dma));
+ mcp->mb[3] = LSW(LSD(dd_dma));
+ mcp->mb[6] = MSW(MSD(dd_dma));
+ mcp->mb[7] = LSW(MSD(dd_dma));
+ mcp->mb[8] = size;
+ mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0;
+ mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = MBX_TOV_SECONDS * 4;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
+ "Done %s.\n", __func__);
+ }
+
+ dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE);
+
+ return rval;
+}
+
static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
{
sp->u.iocb_cmd.u.mbx.rc = res;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 346d47b61c07..16a9f22bb860 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -166,9 +166,13 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
int ret = QLA_SUCCESS;
fc_port_t *fcport;
- if (vha->hw->flags.edif_enabled)
+ if (vha->hw->flags.edif_enabled) {
+ if (DBELL_ACTIVE(vha))
+ qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE,
+ FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN);
/* delete sessions and flush sa_indexes */
qla2x00_wait_for_sess_deletion(vha);
+ }
if (vha->hw->flags.fw_started)
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 87c9404aa401..7450c3458be7 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -37,11 +37,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return 0;
- if (atomic_read(&fcport->state) == FCS_ONLINE)
- return 0;
-
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-
fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
memset(&req, 0, sizeof(struct nvme_fc_port_info));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 73073fb08369..0bd0fd1042df 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -333,6 +333,21 @@ MODULE_PARM_DESC(ql2xabts_wait_nvme,
"To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
+u32 ql2xdelay_before_pci_error_handling = 5;
+module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
+MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
+ "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
+
+int ql2xrspq_follow_inptr = 1;
+module_param(ql2xrspq_follow_inptr, int, 0644);
+MODULE_PARM_DESC(ql2xrspq_follow_inptr,
+ "Follow RSP IN pointer for RSP updates for HBAs 27xx and newer (default: 1).");
+
+int ql2xrspq_follow_inptr_legacy = 1;
+module_param(ql2xrspq_follow_inptr_legacy, int, 0644);
+MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy,
+ "Follow RSP IN pointer for RSP updates for HBAs older than 27XX. (default: 1).");
+
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
@@ -1337,21 +1352,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
/*
* Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
*/
-int
-qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
- uint64_t l, enum nexus_wait_type type)
+static int
+__qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
{
int cnt, match, status;
unsigned long flags;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req;
+ scsi_qla_host_t *vha = qpair->vha;
+ struct req_que *req = qpair->req;
srb_t *sp;
struct scsi_cmnd *cmd;
status = QLA_SUCCESS;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = vha->req;
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (cnt = 1; status == QLA_SUCCESS &&
cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
@@ -1378,12 +1392,32 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
if (!match)
continue;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
status = qla2x00_eh_wait_on_command(cmd);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ return status;
+}
+
+int
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
+{
+ struct qla_qpair *qpair;
+ struct qla_hw_data *ha = vha->hw;
+ int i, status = QLA_SUCCESS;
+ status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
+ type);
+ for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
+ qpair = ha->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
+ type);
+ }
return status;
}
@@ -1420,7 +1454,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
return err;
if (fcport->deleted)
- return SUCCESS;
+ return FAILED;
ql_log(ql_log_info, vha, 0x8009,
"DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
@@ -1488,7 +1522,7 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
return err;
if (fcport->deleted)
- return SUCCESS;
+ return FAILED;
ql_log(ql_log_info, vha, 0x8009,
"TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
@@ -5472,7 +5506,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
e->u.fcport.fcport, false);
break;
case QLA_EVT_SA_REPLACE:
- qla24xx_issue_sa_replace_iocb(vha, e);
+ rc = qla24xx_issue_sa_replace_iocb(vha, e);
break;
}
@@ -7238,6 +7272,44 @@ static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
}
}
+static void qla_wind_down_chip(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->flags.eeh_busy)
+ return;
+ if (ha->pci_error_state)
+ /* system is trying to recover */
+ return;
+
+ /*
+ * Current system is not handling PCIE error. At this point, this is
+ * best effort to wind down the adapter.
+ */
+ if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) &&
+ !ha->flags.eeh_flush) {
+ ql_log(ql_log_info, vha, 0x9009,
+ "PCI Error detected, attempting to reset hardware.\n");
+
+ ha->isp_ops->reset_chip(vha);
+ ha->isp_ops->disable_intrs(ha);
+
+ ha->flags.eeh_flush = EEH_FLUSH_RDY;
+ ha->eeh_jif = jiffies;
+
+ } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY &&
+ time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) {
+ pci_clear_master(ha->pdev);
+
+ /* flush all command */
+ qla2x00_abort_isp_cleanup(vha);
+ ha->flags.eeh_flush = EEH_FLUSH_DONE;
+
+ ql_log(ql_log_info, vha, 0x900a,
+ "PCI Error handling complete, all IOs aborted.\n");
+ }
+}
+
/**************************************************************************
* qla2x00_timer
*
@@ -7261,6 +7333,8 @@ qla2x00_timer(struct timer_list *t)
fc_port_t *fcport = NULL;
if (ha->flags.eeh_busy) {
+ qla_wind_down_chip(vha);
+
ql_dbg(ql_dbg_timer, vha, 0x6000,
"EEH = %d, restarting timer.\n",
ha->flags.eeh_busy);
@@ -7841,6 +7915,9 @@ void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
spin_lock_irqsave(&base_vha->work_lock, flags);
if (!ha->flags.eeh_busy) {
+ ha->eeh_jif = jiffies;
+ ha->flags.eeh_flush = 0;
+
ha->flags.eeh_busy = 1;
do_cleanup = true;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index cb97f625970d..62666df1a59e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -981,22 +981,6 @@ void qlt_free_session_done(struct work_struct *work)
sess->send_els_logo);
if (!IS_SW_RESV_ADDR(sess->d_id)) {
- if (ha->flags.edif_enabled &&
- (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
- sess->edif.authok = 0;
- if (!ha->flags.host_shutting_down) {
- ql_dbg(ql_dbg_edif, vha, 0x911e,
- "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
- __func__, sess->port_name);
- qla2x00_release_all_sadb(vha, sess);
- } else {
- ql_dbg(ql_dbg_edif, vha, 0x911e,
- "%s bypassing release_all_sadb\n",
- __func__);
- }
- qla_edif_clear_appdata(vha, sess);
- qla_edif_sess_down(vha, sess);
- }
qla2x00_mark_device_lost(vha, sess, 0);
if (sess->send_els_logo) {
@@ -1042,6 +1026,25 @@ void qlt_free_session_done(struct work_struct *work)
sess->nvme_flag |= NVME_FLAG_DELETING;
qla_nvme_unregister_remote_port(sess);
}
+
+ if (ha->flags.edif_enabled &&
+ (!own || (own &&
+ own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ sess->edif.authok = 0;
+ if (!ha->flags.host_shutting_down) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
+ __func__, sess->port_name);
+ qla2x00_release_all_sadb(vha, sess);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s bypassing release_all_sadb\n",
+ __func__);
+ }
+
+ qla_edif_clear_appdata(vha, sess);
+ qla_edif_sess_down(vha, sess);
+ }
}
/*
@@ -6932,14 +6935,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (IS_QLA2071(ha)) {
- /* 4 ports Baker: Enable Interrupt Handshake */
- icb->msix_atio = 0;
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- } else {
- icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
- }
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b09d7d2080c0..f3257d46b6d2 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.07.400-k"
+#define QLA2XXX_VERSION "10.02.07.800-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 7
-#define QLA_DRIVER_BETA_VER 400
+#define QLA_DRIVER_BETA_VER 800
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c59eac7a32f2..086ec5b5862d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -586,10 +586,13 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
- struct module *mod = sdev->host->hostt->module;
-
+ /*
+ * Decreasing the module reference count before the device reference
+ * count is safe since scsi_remove_host() only returns after all
+ * devices have been removed.
+ */
+ module_put(sdev->host->hostt->module);
put_device(&sdev->sdev_gendev);
- module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b776cefc7cda..448748e3fba5 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -463,14 +463,12 @@ static void scsi_report_sense(struct scsi_device *sdev,
evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
scsi_report_lun_change(sdev);
sdev_printk(KERN_WARNING, sdev,
- "Warning! Received an indication that the "
"LUN assignments on this target have "
"changed. The Linux SCSI layer does not "
"automatically remap LUN assignments.\n");
} else if (sshdr->asc == 0x3f)
sdev_printk(KERN_WARNING, sdev,
- "Warning! Received an indication that the "
- "operating parameters on this target have "
+ "Operating parameters on this target have "
"changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n");
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 17a617db9ae0..ef08029a0079 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -75,13 +75,6 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
return ret;
}
-/*
- * When to reinvoke queueing after a resource shortage. It's 3 msecs to
- * not change behaviour from the previous unplug mechanism, experimentation
- * may prove this needs changing.
- */
-#define SCSI_QUEUE_DELAY 3
-
static void
scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
{
@@ -118,7 +111,7 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
}
}
-static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
{
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -128,7 +121,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
} else {
WARN_ON_ONCE(true);
}
- blk_mq_requeue_request(rq, true);
+
+ if (msecs) {
+ blk_mq_requeue_request(rq, false);
+ blk_mq_delay_kick_requeue_list(rq->q, msecs);
+ } else
+ blk_mq_requeue_request(rq, true);
}
/**
@@ -658,14 +656,6 @@ static unsigned int scsi_rq_err_bytes(const struct request *rq)
return bytes;
}
-/* Helper for scsi_io_completion() when "reprep" action required. */
-static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
- struct request_queue *q)
-{
- /* A new command will be prepared and issued. */
- scsi_mq_requeue_cmd(cmd);
-}
-
static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
{
struct request *req = scsi_cmd_to_rq(cmd);
@@ -683,14 +673,21 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
return false;
}
+/*
+ * When ALUA transition state is returned, reprep the cmd to
+ * use the ALUA handler's transition timeout. Delay the reprep
+ * 1 sec to avoid aggressive retries of the target in that
+ * state.
+ */
+#define ALUA_TRANSITION_REPREP_DELAY 1000
+
/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
int level = 0;
- enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
- ACTION_DELAYED_RETRY} action;
+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
+ ACTION_RETRY, ACTION_DELAYED_RETRY} action;
struct scsi_sense_hdr sshdr;
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
@@ -779,8 +776,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
- blk_stat = BLK_STS_TRANSPORT;
- fallthrough;
+ action = ACTION_DELAYED_REPREP;
+ break;
default:
action = ACTION_FAIL;
break;
@@ -839,7 +836,10 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
return;
fallthrough;
case ACTION_REPREP:
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
+ break;
+ case ACTION_DELAYED_REPREP:
+ scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -933,7 +933,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
* command block will be released and the queue function will be goosed. If we
* are not done then we have to figure out what to do next:
*
- * a) We can call scsi_io_completion_reprep(). The request will be
+ * a) We can call scsi_mq_requeue_cmd(). The request will be
* unprepared and put back on the queue. Then a new command will
* be created for it. This should be used if we made forward
* progress, or if we want to switch from READ(10) to READ(6) for
@@ -949,7 +949,6 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
blk_status_t blk_stat = BLK_STS_OK;
@@ -986,7 +985,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* request just queue the command up again.
*/
if (likely(result == 0))
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
else
scsi_io_completion_action(cmd, result);
}
@@ -1549,7 +1548,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
scsi_init_command(sdev, cmd);
cmd->eh_eflags = 0;
- cmd->allowed = 0;
cmd->prot_type = 0;
cmd->prot_flags = 0;
cmd->submitter = 0;
@@ -1600,6 +1598,8 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
return ret;
}
+ /* Usually overridden by the ULP */
+ cmd->allowed = 0;
memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
@@ -1648,6 +1648,13 @@ static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
sbitmap_put(&sdev->budget_map, budget_token);
}
+/*
+ * When to reinvoke queueing after a resource shortage. It's 3 msecs to
+ * not change behaviour from the previous unplug mechanism, experimentation
+ * may prove this needs changing.
+ */
+#define SCSI_QUEUE_DELAY 3
+
static int scsi_mq_get_budget(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
@@ -1876,10 +1883,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
- if (dev->dma_mask) {
- shost->max_sectors = min_t(unsigned int, shost->max_sectors,
- dma_max_mapping_size(dev) >> SECTOR_SHIFT);
- }
blk_queue_max_hw_sectors(q, shost->max_sectors);
blk_queue_segment_boundary(q, shost->dma_boundary);
dma_set_seg_boundary(dev, shost->dma_boundary);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 91ac901a6682..ac6059702d13 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -406,9 +406,14 @@ static void scsi_target_destroy(struct scsi_target *starget)
static void scsi_target_dev_release(struct device *dev)
{
struct device *parent = dev->parent;
+ struct Scsi_Host *shost = dev_to_shost(parent);
struct scsi_target *starget = to_scsi_target(dev);
kfree(starget);
+
+ if (atomic_dec_return(&shost->target_count) == 0)
+ wake_up(&shost->targets_wq);
+
put_device(parent);
}
@@ -521,6 +526,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
+ init_waitqueue_head(&starget->sdev_wq);
+
+ atomic_inc(&shost->target_count);
+
retry:
spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index aa70d9282161..9dad2fd5297f 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -443,18 +443,15 @@ static void scsi_device_cls_release(struct device *class_dev)
static void scsi_device_dev_release_usercontext(struct work_struct *work)
{
- struct scsi_device *sdev;
+ struct scsi_device *sdev = container_of(work, struct scsi_device,
+ ew.work);
+ struct scsi_target *starget = sdev->sdev_target;
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
- struct module *mod;
-
- sdev = container_of(work, struct scsi_device, ew.work);
-
- mod = sdev->host->hostt->module;
scsi_dh_release_device(sdev);
@@ -516,19 +513,16 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree(sdev->inquiry);
kfree(sdev);
+ if (starget && atomic_dec_return(&starget->sdev_count) == 0)
+ wake_up(&starget->sdev_wq);
+
if (parent)
put_device(parent);
- module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
-
- /* Set module pointer as NULL in case of module unloading */
- if (!try_module_get(sdp->host->hostt->module))
- sdp->host->hostt->module = NULL;
-
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
@@ -1535,6 +1529,14 @@ static void __scsi_remove_target(struct scsi_target *starget)
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * After scsi_remove_target() returns its caller can remove resources
+ * associated with @starget, e.g. an rport or session. Wait until all
+ * devices associated with @starget have been removed to prevent that
+ * a SCSI error handling callback function triggers a use-after-free.
+ */
+ wait_event(starget->sdev_wq, atomic_read(&starget->sdev_count) == 0);
}
/**
@@ -1645,6 +1647,9 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ atomic_inc(&starget->sdev_count);
+
/*
* device can now only be removed via __scsi_remove_device() so hold
* the target. Target will be held in CREATED state until something
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 5d21f07456c6..cd3db9684e52 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1980,7 +1980,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
scsi_remove_target(&session->dev);
if (session->ida_used)
- ida_simple_remove(&iscsi_sess_ida, target_id);
+ ida_free(&iscsi_sess_ida, target_id);
unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
@@ -2049,7 +2049,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
return -ENOMEM;
if (target_id == ISCSI_MAX_TARGET) {
- id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&iscsi_sess_ida, GFP_KERNEL);
if (id < 0) {
iscsi_cls_session_printk(KERN_ERR, session,
@@ -2088,7 +2088,7 @@ release_dev:
device_del(&session->dev);
release_ida:
if (session->ida_used)
- ida_simple_remove(&iscsi_sess_ida, session->target_id);
+ ida_free(&iscsi_sess_ida, session->target_id);
destroy_wq:
destroy_workqueue(session->workq);
return err;
@@ -2143,8 +2143,6 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
return 0;
iscsi_remove_conn(iscsi_dev_to_conn(dev));
- iscsi_put_conn(iscsi_dev_to_conn(dev));
-
return 0;
}
@@ -2264,18 +2262,20 @@ static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
}
}
-static int iscsi_if_stop_conn(struct iscsi_transport *transport,
- struct iscsi_uevent *ev)
+static int iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
{
- int flag = ev->u.stop_conn.flag;
- struct iscsi_cls_conn *conn;
-
- conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
- if (!conn)
- return -EINVAL;
-
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
/*
+ * For offload, iscsid may not know about the ep like when iscsid is
+ * restarted or for kernel based session shutdown iscsid is not even
+ * up. For these cases, we do the disconnect now.
+ */
+ mutex_lock(&conn->ep_mutex);
+ if (conn->ep)
+ iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+ mutex_unlock(&conn->ep_mutex);
+
+ /*
* If this is a termination we have to call stop_conn with that flag
* so the correct states get set. If we haven't run the work yet try to
* avoid the extra run.
@@ -2285,16 +2285,6 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
iscsi_stop_conn(conn, flag);
} else {
/*
- * For offload, when iscsid is restarted it won't know about
- * existing endpoints so it can't do a ep_disconnect. We clean
- * it up here for userspace.
- */
- mutex_lock(&conn->ep_mutex);
- if (conn->ep)
- iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
- mutex_unlock(&conn->ep_mutex);
-
- /*
* Figure out if it was the kernel or userspace initiating this.
*/
spin_lock_irq(&conn->lock);
@@ -2349,6 +2339,55 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
}
+static int iscsi_iter_force_destroy_conn_fn(struct device *dev, void *data)
+{
+ struct iscsi_transport *transport;
+ struct iscsi_cls_conn *conn;
+
+ if (!iscsi_is_conn_dev(dev))
+ return 0;
+
+ conn = iscsi_dev_to_conn(dev);
+ transport = conn->transport;
+
+ if (READ_ONCE(conn->state) != ISCSI_CONN_DOWN)
+ iscsi_if_stop_conn(conn, STOP_CONN_TERM);
+
+ transport->destroy_conn(conn);
+ return 0;
+}
+
+/**
+ * iscsi_force_destroy_session - destroy a session from the kernel
+ * @session: session to destroy
+ *
+ * Force the destruction of a session from the kernel. This should only be
+ * used when userspace is no longer running during system shutdown.
+ */
+void iscsi_force_destroy_session(struct iscsi_cls_session *session)
+{
+ struct iscsi_transport *transport = session->transport;
+ unsigned long flags;
+
+ WARN_ON_ONCE(system_state == SYSTEM_RUNNING);
+
+ spin_lock_irqsave(&sesslock, flags);
+ if (list_empty(&session->sess_list)) {
+ spin_unlock_irqrestore(&sesslock, flags);
+ /*
+ * Conn/ep is already freed. Session is being torn down via
+ * async path. For shutdown we don't care about it so return.
+ */
+ return;
+ }
+ spin_unlock_irqrestore(&sesslock, flags);
+
+ device_for_each_child(&session->dev, NULL,
+ iscsi_iter_force_destroy_conn_fn);
+ transport->destroy_session(session);
+}
+EXPORT_SYMBOL_GPL(iscsi_force_destroy_session);
+
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
@@ -3720,7 +3759,12 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
case ISCSI_UEVENT_DESTROY_CONN:
return iscsi_if_destroy_conn(transport, ev);
case ISCSI_UEVENT_STOP_CONN:
- return iscsi_if_stop_conn(transport, ev);
+ conn = iscsi_conn_lookup(ev->u.stop_conn.sid,
+ ev->u.stop_conn.cid);
+ if (!conn)
+ return -EINVAL;
+
+ return iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
}
/*
@@ -4812,7 +4856,7 @@ free_priv:
}
EXPORT_SYMBOL_GPL(iscsi_register_transport);
-int iscsi_unregister_transport(struct iscsi_transport *tt)
+void iscsi_unregister_transport(struct iscsi_transport *tt)
{
struct iscsi_internal *priv;
unsigned long flags;
@@ -4835,8 +4879,6 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
device_unregister(&priv->dev);
mutex_unlock(&rx_queue_mutex);
-
- return 0;
}
EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 12bff64dade6..2f88c61216ee 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -225,6 +225,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct device *dma_dev = shost->dma_dev;
INIT_LIST_HEAD(&sas_host->rphy_list);
mutex_init(&sas_host->lock);
@@ -236,6 +237,11 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
shost->host_no);
+ if (dma_dev->dma_mask) {
+ shost->opt_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_opt_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
return 0;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index eb02d939dd44..eb76ba055021 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3296,6 +3296,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
(sector_t)BLK_DEF_MAX_SECTORS);
}
+ /*
+ * Limit default to SCSI host optimal sector limit if set. There may be
+ * an impact on performance for when the size of a request exceeds this
+ * host limit.
+ */
+ rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
+
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 118c7b4a8af2..340b050ad28d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -195,7 +195,7 @@ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
-static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
@@ -444,6 +444,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
+ bool busy;
sg_io_hdr_t *hp;
struct sg_header *old_hdr;
int retval;
@@ -466,20 +467,16 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
if (retval)
return retval;
- srp = sg_get_rq_mark(sfp, req_pack_id);
+ srp = sg_get_rq_mark(sfp, req_pack_id, &busy);
if (!srp) { /* now wait on packet to arrive */
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
retval = wait_event_interruptible(sfp->read_wait,
- (atomic_read(&sdp->detaching) ||
- (srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
- if (retval)
- /* -ERESTARTSYS as signal hit process */
- return retval;
+ ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) ||
+ (!busy && atomic_read(&sdp->detaching))));
+ if (!srp)
+ /* signal or detaching */
+ return retval ? retval : -ENODEV;
}
if (srp->header.interface_id != '\0')
return sg_new_read(sfp, buf, count, srp);
@@ -940,9 +937,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
- (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
+ srp_done(sfp, srp));
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
@@ -2079,19 +2074,28 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
}
static Sg_request *
-sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy)
{
Sg_request *resp;
unsigned long iflags;
+ *busy = false;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
- /* look for requests that are ready + not SG_IO owned */
- if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ /* look for requests that are not SG_IO owned */
+ if ((!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
- resp->done = 2; /* guard against other readers */
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ switch (resp->done) {
+ case 0: /* request active */
+ *busy = true;
+ break;
+ case 1: /* request done; response ready to return */
+ resp->done = 2; /* guard against other readers */
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
+ case 2: /* response already being returned */
+ break;
+ }
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
@@ -2145,6 +2149,15 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ /*
+ * If the device is detaching, wakeup any readers in case we just
+ * removed the last response, which would leave nothing for them to
+ * return other than -ENODEV.
+ */
+ if (unlikely(atomic_read(&sfp->parentdp->detaching)))
+ wake_up_interruptible_all(&sfp->read_wait);
+
return res;
}
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 6f83e2df4d64..973d240649ab 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,7 +1,7 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 2e40320129c0..e550b12e525a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -293,7 +293,8 @@ struct pqi_raid_path_request {
u8 additional_cdb_bytes_usage : 3;
u8 reserved5 : 3;
u8 cdb[16];
- u8 reserved6[12];
+ u8 reserved6[11];
+ u8 ml_device_lun_number;
__le32 timeout;
struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
};
@@ -467,7 +468,8 @@ struct pqi_task_management_request {
struct pqi_iu_header header;
__le16 request_id;
__le16 nexus_id;
- u8 reserved[2];
+ u8 reserved;
+ u8 ml_device_lun_number;
__le16 timeout;
u8 lun_number[8];
__le16 protocol_specific;
@@ -708,6 +710,7 @@ typedef u32 pqi_index_t;
#define SOP_TMF_COMPLETE 0x0
#define SOP_TMF_REJECTED 0x4
#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
+#define SOP_RC_INCORRECT_LOGICAL_UNIT 0x9
/* additional CDB bytes usage field codes */
#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
@@ -863,7 +866,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN 16
#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17
#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18
-#define PQI_FIRMWARE_FEATURE_MAXIMUM 18
+#define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 21
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -1081,6 +1085,8 @@ struct pqi_stream_data {
u32 last_accessed;
};
+#define PQI_MAX_LUNS_PER_DEVICE 256
+
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */
@@ -1124,6 +1130,7 @@ struct pqi_scsi_dev {
u8 phy_id;
u8 ncq_prio_enable;
u8 ncq_prio_support;
+ u8 multi_lun_device_lun_count;
bool raid_bypass_configured; /* RAID bypass configured */
bool raid_bypass_enabled; /* RAID bypass enabled */
u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
@@ -1139,7 +1146,7 @@ struct pqi_scsi_dev {
struct list_head delete_list_entry;
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
- atomic_t scsi_cmds_outstanding;
+ atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
atomic_t raid_bypass_cnt;
};
@@ -1262,6 +1269,12 @@ struct pqi_event {
#define PQI_CTRL_PRODUCT_REVISION_A 0
#define PQI_CTRL_PRODUCT_REVISION_B 1
+enum pqi_ctrl_removal_state {
+ PQI_CTRL_PRESENT = 0,
+ PQI_CTRL_GRACEFUL_REMOVAL,
+ PQI_CTRL_SURPRISE_REMOVAL
+};
+
struct pqi_ctrl_info {
unsigned int ctrl_id;
struct pci_dev *pci_dev;
@@ -1332,12 +1345,13 @@ struct pqi_ctrl_info {
u8 tmf_iu_timeout_supported : 1;
u8 firmware_triage_supported : 1;
u8 rpl_extended_format_4_5_supported : 1;
+ u8 multi_lun_device_supported : 1;
u8 enable_r1_writes : 1;
u8 enable_r5_writes : 1;
u8 enable_r6_writes : 1;
u8 lv_drive_type_mix_valid : 1;
u8 enable_stream_detection : 1;
-
+ u8 disable_managed_interrupts : 1;
u8 ciss_report_log_flags;
u32 max_transfer_encrypted_sas_sata;
u32 max_transfer_encrypted_nvme;
@@ -1381,6 +1395,7 @@ struct pqi_ctrl_info {
struct work_struct ofa_quiesce_work;
u32 ofa_bytes_requested;
u16 ofa_cancel_reason;
+ enum pqi_ctrl_removal_state ctrl_removal_state;
};
enum pqi_ctrl_mode {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 7c0d069a3158..7a8c2c75acba 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.14-035"
+#define DRIVER_VERSION "2.1.18-045"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 14
-#define DRIVER_REVISION 35
+#define DRIVER_RELEASE 18
+#define DRIVER_REVISION 45
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -94,7 +94,8 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_msecs);
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
+static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -174,6 +175,18 @@ module_param_named(hide_vsep,
pqi_hide_vsep, int, 0644);
MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
+static int pqi_disable_managed_interrupts;
+module_param_named(disable_managed_interrupts,
+ pqi_disable_managed_interrupts, int, 0644);
+MODULE_PARM_DESC(disable_managed_interrupts,
+ "Disable the kernel automatically assigning SMP affinity to IRQs.");
+
+static unsigned int pqi_ctrl_ready_timeout_secs;
+module_param_named(ctrl_ready_timeout,
+ pqi_ctrl_ready_timeout_secs, uint, 0644);
+MODULE_PARM_DESC(ctrl_ready_timeout,
+ "Timeout in seconds for driver to wait for controller ready.");
+
static char *raid_levels[] = {
"RAID-0",
"RAID-4",
@@ -1597,7 +1610,9 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
&id_phys->alternate_paths_phys_connector,
sizeof(device->phys_connector));
device->bay = id_phys->phys_bay_in_box;
-
+ device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
+ if (!device->multi_lun_device_lun_count)
+ device->multi_lun_device_lun_count = 1;
if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
id_phys->phy_count)
device->phy_id =
@@ -1880,15 +1895,18 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{
int rc;
+ int lun;
- rc = pqi_device_wait_for_pending_io(ctrl_info, device,
- PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
- if (rc)
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
- ctrl_info->scsi_host->host_no, device->bus,
- device->target, device->lun,
- atomic_read(&device->scsi_cmds_outstanding));
+ for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
+ PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus,
+ device->target, lun,
+ atomic_read(&device->scsi_cmds_outstanding[lun]));
+ }
if (pqi_is_logical_device(device))
scsi_remove_device(device->sdev);
@@ -2020,6 +2038,23 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
}
+static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
+{
+ u32 raid_map1_size;
+ u32 raid_map2_size;
+
+ if (raid_map1 == NULL || raid_map2 == NULL)
+ return raid_map1 == raid_map2;
+
+ raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
+ raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
+
+ if (raid_map1_size != raid_map2_size)
+ return false;
+
+ return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
+}
+
/* Assumes the SCSI device list lock is held. */
static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
@@ -2033,49 +2068,51 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
existing_device->target_lun_valid = true;
}
- if (pqi_is_logical_device(existing_device) &&
- ctrl_info->logical_volume_rescan_needed)
- existing_device->rescan = true;
-
/* By definition, the scsi3addr and wwid fields are already the same. */
existing_device->is_physical_device = new_device->is_physical_device;
- existing_device->is_external_raid_device =
- new_device->is_external_raid_device;
- existing_device->is_expander_smp_device =
- new_device->is_expander_smp_device;
- existing_device->aio_enabled = new_device->aio_enabled;
- memcpy(existing_device->vendor, new_device->vendor,
- sizeof(existing_device->vendor));
- memcpy(existing_device->model, new_device->model,
- sizeof(existing_device->model));
+ memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
+ memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
existing_device->sas_address = new_device->sas_address;
- existing_device->raid_level = new_device->raid_level;
existing_device->queue_depth = new_device->queue_depth;
- existing_device->aio_handle = new_device->aio_handle;
- existing_device->volume_status = new_device->volume_status;
- existing_device->active_path_index = new_device->active_path_index;
- existing_device->phy_id = new_device->phy_id;
- existing_device->path_map = new_device->path_map;
- existing_device->bay = new_device->bay;
- existing_device->box_index = new_device->box_index;
- existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
- existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
- memcpy(existing_device->box, new_device->box,
- sizeof(existing_device->box));
- memcpy(existing_device->phys_connector, new_device->phys_connector,
- sizeof(existing_device->phys_connector));
- memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
- kfree(existing_device->raid_map);
- existing_device->raid_map = new_device->raid_map;
- existing_device->raid_bypass_configured =
- new_device->raid_bypass_configured;
- existing_device->raid_bypass_enabled =
- new_device->raid_bypass_enabled;
existing_device->device_offline = false;
- /* To prevent this from being freed later. */
- new_device->raid_map = NULL;
+ if (pqi_is_logical_device(existing_device)) {
+ existing_device->is_external_raid_device = new_device->is_external_raid_device;
+
+ if (existing_device->devtype == TYPE_DISK) {
+ existing_device->raid_level = new_device->raid_level;
+ existing_device->volume_status = new_device->volume_status;
+ if (ctrl_info->logical_volume_rescan_needed)
+ existing_device->rescan = true;
+ memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
+ if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
+ kfree(existing_device->raid_map);
+ existing_device->raid_map = new_device->raid_map;
+ /* To prevent this from being freed later. */
+ new_device->raid_map = NULL;
+ }
+ existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
+ existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
+ }
+ } else {
+ existing_device->aio_enabled = new_device->aio_enabled;
+ existing_device->aio_handle = new_device->aio_handle;
+ existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
+ existing_device->active_path_index = new_device->active_path_index;
+ existing_device->phy_id = new_device->phy_id;
+ existing_device->path_map = new_device->path_map;
+ existing_device->bay = new_device->bay;
+ existing_device->box_index = new_device->box_index;
+ existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
+ existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
+ memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
+ memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
+
+ existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
+ if (existing_device->multi_lun_device_lun_count == 0)
+ existing_device->multi_lun_device_lun_count = 1;
+ }
}
static inline void pqi_free_device(struct pqi_scsi_dev *device)
@@ -2505,23 +2542,6 @@ out:
return rc;
}
-static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_scsi_dev *device;
- struct pqi_scsi_dev *next;
-
- list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (pqi_is_device_added(device))
- pqi_remove_device(ctrl_info, device);
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
- list_del(&device->scsi_device_list_entry);
- pqi_free_device(device);
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- }
-}
-
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -3322,6 +3342,9 @@ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_inf
case SOP_TMF_REJECTED:
rc = -EAGAIN;
break;
+ case SOP_RC_INCORRECT_LOGICAL_UNIT:
+ rc = -ENODEV;
+ break;
default:
rc = -EIO;
break;
@@ -3663,6 +3686,20 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
return ack_event;
}
+static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+ if (device->raid_bypass_enabled)
+ device->raid_bypass_enabled = false;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
static void pqi_event_worker(struct work_struct *work)
{
unsigned int i;
@@ -3690,6 +3727,8 @@ static void pqi_event_worker(struct work_struct *work)
rescan_needed = true;
if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
ctrl_info->logical_volume_rescan_needed = true;
+ else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
+ pqi_disable_raid_bypass(ctrl_info);
}
if (ack_event)
pqi_acknowledge_event(ctrl_info, event);
@@ -3697,8 +3736,11 @@ static void pqi_event_worker(struct work_struct *work)
event++;
}
+#define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
+
if (rescan_needed)
- pqi_schedule_rescan_worker_delayed(ctrl_info);
+ pqi_schedule_rescan_worker_with_delay(ctrl_info,
+ PQI_RESCAN_WORK_FOR_EVENT_DELAY);
out:
pqi_ctrl_unbusy(ctrl_info);
@@ -3992,10 +4034,14 @@ static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
int num_vectors_enabled;
+ unsigned int flags = PCI_IRQ_MSIX;
+
+ if (!pqi_disable_managed_interrupts)
+ flags |= PCI_IRQ_AFFINITY;
num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ flags);
if (num_vectors_enabled < 0) {
dev_err(&ctrl_info->pci_dev->dev,
"MSI-X init failed with error %d\n",
@@ -5457,6 +5503,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
+ request->ml_device_lun_number = (u8)scmd->device->lun;
cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
memcpy(request->cdb, scmd->cmnd, cdb_length);
@@ -5484,10 +5531,10 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
}
switch (scmd->sc_data_direction) {
- case DMA_TO_DEVICE:
+ case DMA_FROM_DEVICE:
request->data_direction = SOP_READ_FLAG;
break;
- case DMA_FROM_DEVICE:
+ case DMA_TO_DEVICE:
request->data_direction = SOP_WRITE_FLAG;
break;
case DMA_NONE:
@@ -5621,7 +5668,9 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
int rc;
struct pqi_io_request *io_request;
struct pqi_aio_path_request *request;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
@@ -5637,6 +5686,8 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
if (cdb_length > sizeof(request->cdb))
cdb_length = sizeof(request->cdb);
request->cdb_length = cdb_length;
@@ -5846,7 +5897,7 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
return;
}
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
}
static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
@@ -5941,7 +5992,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
return 0;
}
- atomic_inc(&device->scsi_cmds_outstanding);
+ atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
ctrl_info = shost_to_hba(shost);
@@ -5987,7 +6038,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
out:
if (rc)
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
return rc;
}
@@ -6127,7 +6178,7 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_msecs)
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
{
int cmds_outstanding;
unsigned long start_jiffies;
@@ -6137,23 +6188,25 @@ static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
start_jiffies = jiffies;
warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
- while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
+ while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
+ if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ }
msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
if (msecs_waiting >= timeout_msecs) {
dev_err(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus, device->target,
- device->lun, msecs_waiting / 1000, cmds_outstanding);
+ lun, msecs_waiting / 1000, cmds_outstanding);
return -ETIMEDOUT;
}
if (time_after(jiffies, warning_timeout)) {
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus, device->target,
- device->lun, msecs_waiting / 1000, cmds_outstanding);
+ lun, msecs_waiting / 1000, cmds_outstanding);
warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
@@ -6173,7 +6226,7 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct completion *wait)
+ struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
{
int rc;
unsigned int wait_secs;
@@ -6195,10 +6248,10 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
}
wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
- cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding);
+ cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
- ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding);
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
}
return rc;
@@ -6206,13 +6259,15 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int rc;
struct pqi_io_request *io_request;
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -6226,6 +6281,8 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
put_unaligned_le16(io_request->index, &request->request_id);
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ request->ml_device_lun_number = (u8)scmd->device->lun;
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
if (ctrl_info->tmf_iu_timeout_supported)
put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
@@ -6233,7 +6290,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
+ rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
if (rc == 0)
rc = io_request->status;
@@ -6247,16 +6304,18 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
-static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int reset_rc;
int wait_rc;
unsigned int retries;
unsigned long timeout_msecs;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
for (retries = 0;;) {
- reset_rc = pqi_lun_reset(ctrl_info, device);
- if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
+ reset_rc = pqi_lun_reset(ctrl_info, scmd);
+ if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -6264,18 +6323,19 @@ static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pq
timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
- wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
+ wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
if (wait_rc && reset_rc == 0)
reset_rc = wait_rc;
return reset_rc == 0 ? SUCCESS : FAILED;
}
-static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int rc;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_io_queued_for_device(ctrl_info, device);
@@ -6283,7 +6343,7 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
if (rc)
rc = FAILED;
else
- rc = pqi_lun_reset_with_retries(ctrl_info, device);
+ rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
pqi_ctrl_unblock_requests(ctrl_info);
return rc;
@@ -6305,18 +6365,18 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
shost->host_no,
- device->bus, device->target, device->lun,
+ device->bus, device->target, (u32)scmd->device->lun,
scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
rc = FAILED;
else
- rc = pqi_device_reset(ctrl_info, device);
+ rc = pqi_device_reset(ctrl_info, scmd);
dev_err(&ctrl_info->pci_dev->dev,
"reset of scsi %d:%d:%d:%d: %s\n",
- shost->host_no, device->bus, device->target, device->lun,
+ shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&ctrl_info->lun_reset_mutex);
@@ -6405,6 +6465,35 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return rc;
}
+static void pqi_slave_destroy(struct scsi_device *sdev)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ int mutex_acquired;
+ unsigned long flags;
+
+ ctrl_info = shost_to_hba(sdev->host);
+
+ mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+ if (!mutex_acquired)
+ return;
+
+ device = sdev->hostdata;
+ if (!device) {
+ mutex_unlock(&ctrl_info->scan_mutex);
+ return;
+ }
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_del(&device->scsi_device_list_entry);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ mutex_unlock(&ctrl_info->scan_mutex);
+
+ pqi_dev_info(ctrl_info, "removed", device);
+ pqi_free_device(device);
+}
+
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
{
struct pci_dev *pci_dev;
@@ -6919,6 +7008,9 @@ static ssize_t pqi_unique_id_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -6955,6 +7047,9 @@ static ssize_t pqi_lunid_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -6990,6 +7085,9 @@ static ssize_t pqi_path_info_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7067,6 +7165,9 @@ static ssize_t pqi_sas_address_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7093,6 +7194,9 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7122,6 +7226,9 @@ static ssize_t pqi_raid_level_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7152,6 +7259,9 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7179,6 +7289,9 @@ static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7268,6 +7381,7 @@ static struct scsi_host_template pqi_driver_template = {
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
+ .slave_destroy = pqi_slave_destroy,
.map_queues = pqi_map_queues,
.sdev_groups = pqi_sdev_groups,
.shost_groups = pqi_shost_groups,
@@ -7290,6 +7404,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->this_id = -1;
shost->max_channel = PQI_MAX_BUS;
shost->max_cmd_len = MAX_COMMAND_SIZE;
+ shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
shost->max_lun = ~0;
shost->max_id = ~0;
shost->max_sectors = ctrl_info->max_sectors;
@@ -7358,8 +7473,7 @@ static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
reset_reg.all_bits = readl(&pqi_registers->device_reset);
if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
+ if (!sis_is_firmware_running(ctrl_info)) {
rc = -ENXIO;
break;
}
@@ -7463,6 +7577,9 @@ static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
sizeof(identify->vendor_id));
ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
+ dev_info(&ctrl_info->pci_dev->dev,
+ "Firmware version: %s\n", ctrl_info->firmware_version);
+
out:
kfree(identify);
@@ -7634,6 +7751,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
break;
+ case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
+ ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
+ break;
}
pqi_firmware_feature_status(ctrl_info, firmware_feature);
@@ -7734,6 +7854,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
.feature_status = pqi_ctrl_update_feature_flags,
},
+ {
+ .feature_name = "Multi-LUN Target",
+ .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -7835,6 +7960,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
ctrl_info->tmf_iu_timeout_supported = false;
ctrl_info->firmware_triage_supported = false;
ctrl_info->rpl_extended_format_4_5_supported = false;
+ ctrl_info->multi_lun_device_supported = false;
}
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@@ -8491,6 +8617,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
ctrl_info->max_write_raid_1_10_2drive = ~0;
ctrl_info->max_write_raid_1_10_3drive = ~0;
+ ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
return ctrl_info;
}
@@ -8508,7 +8635,6 @@ static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
{
- pqi_stop_heartbeat_timer(ctrl_info);
pqi_free_interrupts(ctrl_info);
if (ctrl_info->queue_memory_base)
dma_free_coherent(&ctrl_info->pci_dev->dev,
@@ -8533,9 +8659,15 @@ static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
+ ctrl_info->controller_online = false;
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
pqi_cancel_rescan_worker(ctrl_info);
pqi_cancel_update_time_worker(ctrl_info);
- pqi_remove_all_scsi_devices(ctrl_info);
+ if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
+ pqi_fail_all_outstanding_requests(ctrl_info);
+ ctrl_info->pqi_mode_enabled = false;
+ }
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -8875,11 +9007,18 @@ error:
static void pqi_pci_remove(struct pci_dev *pci_dev)
{
struct pqi_ctrl_info *ctrl_info;
+ u16 vendor_id;
ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
return;
+ pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
+ if (vendor_id == 0xffff)
+ ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
+ else
+ ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
+
pqi_remove_ctrl(ctrl_info);
}
@@ -8956,9 +9095,31 @@ static void pqi_process_lockup_action_param(void)
DRIVER_NAME_SHORT, pqi_lockup_action_param);
}
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
+
+static void pqi_process_ctrl_ready_timeout_param(void)
+{
+ if (pqi_ctrl_ready_timeout_secs == 0)
+ return;
+
+ if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
+ } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
+ }
+
+ sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
+}
+
static void pqi_process_module_params(void)
{
pqi_process_lockup_action_param();
+ pqi_process_ctrl_ready_timeout_param();
}
#if defined(CONFIG_PM)
@@ -9275,6 +9436,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0659)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0800)
},
{
@@ -9739,6 +9904,46 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0101)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0201)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0220)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0221)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0520)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0522)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0620)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0621)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0622)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0623)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index dea4ebaf1677..13e8c539010e 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index afc27adf68e9..5811fb3c22a9 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -86,6 +86,8 @@ struct sis_base_struct {
#pragma pack()
+unsigned int sis_ctrl_ready_timeout_secs = SIS_CTRL_READY_TIMEOUT_SECS;
+
static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
unsigned int timeout_secs)
{
@@ -122,7 +124,7 @@ static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
{
return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
- SIS_CTRL_READY_TIMEOUT_SECS);
+ sis_ctrl_ready_timeout_secs);
}
int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info)
@@ -138,7 +140,7 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
status = readl(&ctrl_info->registers->sis_firmware_status);
- if (status & SIS_CTRL_KERNEL_PANIC)
+ if (status != ~0 && (status & SIS_CTRL_KERNEL_PANIC))
running = false;
else
running = true;
@@ -194,6 +196,7 @@ static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
/* Disable doorbell interrupts by masking all interrupts. */
writel(~0, &registers->sis_interrupt_mask);
+ usleep_range(1000, 2000);
/*
* Force the completion of the interrupt mask register write before
@@ -383,6 +386,7 @@ static int sis_wait_for_doorbell_bit_to_clear(
static inline int sis_set_doorbell_bit(struct pqi_ctrl_info *ctrl_info, u32 bit)
{
writel(bit, &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ usleep_range(1000, 2000);
return sis_wait_for_doorbell_bit_to_clear(ctrl_info, bit);
}
@@ -423,6 +427,7 @@ int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value)
{
writel(value, &ctrl_info->registers->sis_driver_scratch);
+ usleep_range(1000, 2000);
}
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 5f3575261a8e..9dcbae96a5c6 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -32,4 +32,6 @@ void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info);
+extern unsigned int sis_ctrl_ready_timeout_secs;
+
#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
index e6b3e8b431c0..2550ba964b03 100644
--- a/drivers/scsi/snic/snic_fwint.h
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -145,7 +145,7 @@ struct snic_exch_ver_req {
* HBA Capabilities
* Bit 1: Reserved.
* Bit 2: Dynamic Discovery of LUNs.
- * Bit 3: Async event notifications on on tgt online/offline events.
+ * Bit 3: Async event notifications on tgt online/offline events.
* Bit 4: IO timeout support in FW.
* Bit 5-31: Reserved.
*/
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index fe000da11332..8ced292c4b96 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -2012,7 +2012,7 @@ static int storvsc_probe(struct hv_device *device,
*/
host_dev->handle_error_wq =
alloc_ordered_workqueue("storvsc_error_wq_%d",
- WQ_MEM_RECLAIM,
+ 0,
host->host_no);
if (!host_dev->handle_error_wq) {
ret = -ENOMEM;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 255a2d48d421..f0db17e34ea0 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3598,7 +3598,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num)
}
/*
- * Gerard's alchemy:) that deals with with the data
+ * Gerard's alchemy:) that deals with the data
* pointer for both MDP and the residual calculation.
*
* I didn't want to bloat the code by more than 200
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 28a8c0dda66c..a0ceeede450f 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
+#define __DISABLE_TRACE_MMIO__
+
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/slab.h>
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index 32c346b72635..dff6d5ef4e46 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -107,30 +107,47 @@ int devm_tegra_core_dev_init_opp_table(struct device *dev,
{
u32 hw_version;
int err;
-
- err = devm_pm_opp_set_clkname(dev, NULL);
- if (err) {
- dev_err(dev, "failed to set OPP clk: %d\n", err);
- return err;
- }
-
- /* Tegra114+ doesn't support OPP yet */
- if (!of_machine_is_compatible("nvidia,tegra20") &&
- !of_machine_is_compatible("nvidia,tegra30"))
- return -ENODEV;
-
- if (of_machine_is_compatible("nvidia,tegra20"))
+ /*
+ * The clk's connection id to set is NULL and this is a NULL terminated
+ * array, hence two NULL entries.
+ */
+ const char *clk_names[] = { NULL, NULL };
+ struct dev_pm_opp_config config = {
+ /*
+ * For some devices we don't have any OPP table in the DT, and
+ * in order to use the same code path for all the devices, we
+ * create a dummy OPP table for them via this. The dummy OPP
+ * table is only capable of doing clk_set_rate() on invocation
+ * of dev_pm_opp_set_rate() and doesn't provide any other
+ * functionality.
+ */
+ .clk_names = clk_names,
+ };
+
+ if (of_machine_is_compatible("nvidia,tegra20")) {
hw_version = BIT(tegra_sku_info.soc_process_id);
- else
+ config.supported_hw = &hw_version;
+ config.supported_hw_count = 1;
+ } else if (of_machine_is_compatible("nvidia,tegra30")) {
hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ config.supported_hw = &hw_version;
+ config.supported_hw_count = 1;
+ }
- err = devm_pm_opp_set_supported_hw(dev, &hw_version, 1);
+ err = devm_pm_opp_set_config(dev, &config);
if (err) {
- dev_err(dev, "failed to set OPP supported HW: %d\n", err);
+ dev_err(dev, "failed to set OPP config: %d\n", err);
return err;
}
/*
+ * Tegra114+ doesn't support OPP yet, return early for non tegra20/30
+ * case.
+ */
+ if (!config.supported_hw)
+ return -ENODEV;
+
+ /*
* Older device-trees have an empty OPP table, we will get
* -ENODEV from devm_pm_opp_of_add_table() in this case.
*/
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 5611d14d3ba2..6a4b8f7e7948 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -1384,7 +1384,7 @@ tegra_pmc_core_pd_opp_to_performance_state(struct generic_pm_domain *genpd,
static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
{
struct generic_pm_domain *genpd;
- const char *rname = "core";
+ const char *rname[] = { "core", NULL};
int err;
genpd = devm_kzalloc(pmc->dev, sizeof(*genpd), GFP_KERNEL);
@@ -1395,7 +1395,7 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state;
genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state;
- err = devm_pm_opp_set_regulators(pmc->dev, &rname, 1);
+ err = devm_pm_opp_set_regulators(pmc->dev, rname);
if (err)
return dev_err_probe(pmc->dev, err,
"failed to set core OPP regulator\n");
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 95ce292994cc..89d1d0d021fc 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -1004,9 +1004,18 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_cdns_dma_data *dma;
int ret = 0;
+ /*
+ * The .trigger callback is used to send required IPC to audio
+ * firmware. The .free_stream callback will still be called
+ * by intel_free_stream() in the TRIGGER_SUSPEND case.
+ */
+ if (res->ops && res->ops->trigger)
+ res->ops->trigger(dai, cmd, substream->stream);
+
dma = snd_soc_dai_get_dma_data(dai, substream);
if (!dma) {
dev_err(dai->dev, "failed to get dma data in %s\n",
@@ -1114,9 +1123,10 @@ static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
};
static const struct snd_soc_component_driver dai_component = {
- .name = "soundwire",
- .probe = intel_component_probe,
- .suspend = intel_component_dais_suspend
+ .name = "soundwire",
+ .probe = intel_component_probe,
+ .suspend = intel_component_dais_suspend,
+ .legacy_dai_naming = 1,
};
static int intel_create_dai(struct sdw_cdns *cdns,
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 0bc7daa7afc8..e4cb52e1fe26 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -156,6 +156,7 @@ struct meson_spicc_device {
void __iomem *base;
struct clk *core;
struct clk *pclk;
+ struct clk_divider pow2_div;
struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
@@ -168,6 +169,8 @@ struct meson_spicc_device {
unsigned long xfer_remain;
};
+#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
+
static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
{
u32 conf;
@@ -421,7 +424,7 @@ static int meson_spicc_prepare_message(struct spi_master *master,
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
struct spi_device *spi = message->spi;
- u32 conf = 0;
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Store current message */
spicc->message = message;
@@ -458,8 +461,6 @@ static int meson_spicc_prepare_message(struct spi_master *master,
/* Select CS */
conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
- /* Default Clock rate core/4 */
-
/* Default 8bit word */
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
@@ -476,12 +477,16 @@ static int meson_spicc_prepare_message(struct spi_master *master,
static int meson_spicc_unprepare_transfer(struct spi_master *master)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
device_reset_optional(&spicc->pdev->dev);
+ /* Set default configuration, keeping datarate field */
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
return 0;
}
@@ -518,14 +523,60 @@ static void meson_spicc_cleanup(struct spi_device *spi)
* Clk path for G12A series:
* pclk -> pow2 fixed div -> pow2 div -> mux -> out
* pclk -> enh fixed div -> enh div -> mux -> out
+ *
+ * The pow2 divider is tied to the controller HW state, and the
+ * divider is only valid when the controller is initialized.
+ *
+ * A set of clock ops is added to make sure we don't read/set this
+ * clock rate while the controller is in an unknown state.
*/
-static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
+static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return 0;
+
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return -EINVAL;
+
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return -EINVAL;
+
+ return clk_divider_ops.set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops meson_spicc_pow2_clk_ops = {
+ .recalc_rate = meson_spicc_pow2_recalc_rate,
+ .determine_rate = meson_spicc_pow2_determine_rate,
+ .set_rate = meson_spicc_pow2_set_rate,
+};
+
+static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
{
struct device *dev = &spicc->pdev->dev;
- struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
- struct clk_divider *pow2_div, *enh_div;
- struct clk_mux *mux;
+ struct clk_fixed_factor *pow2_fixed_div;
struct clk_init_data init;
struct clk *clk;
struct clk_parent_data parent_data[2];
@@ -560,31 +611,45 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
if (WARN_ON(IS_ERR(clk)))
return PTR_ERR(clk);
- pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
- if (!pow2_div)
- return -ENOMEM;
-
snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
init.name = name;
- init.ops = &clk_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
+ init.ops = &meson_spicc_pow2_clk_ops;
+ /*
+ * Set NOCACHE here to make sure we read the actual HW value
+ * since we reset the HW after each transfer.
+ */
+ init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
parent_data[0].hw = &pow2_fixed_div->hw;
init.num_parents = 1;
- pow2_div->shift = 16,
- pow2_div->width = 3,
- pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
- pow2_div->reg = spicc->base + SPICC_CONREG;
- pow2_div->hw.init = &init;
+ spicc->pow2_div.shift = 16,
+ spicc->pow2_div.width = 3,
+ spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
+ spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
+ spicc->pow2_div.hw.init = &init;
- clk = devm_clk_register(dev, &pow2_div->hw);
- if (WARN_ON(IS_ERR(clk)))
- return PTR_ERR(clk);
+ spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
- if (!spicc->data->has_enhance_clk_div) {
- spicc->clk = clk;
- return 0;
- }
+ return 0;
+}
+
+static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *enh_fixed_div;
+ struct clk_divider *enh_div;
+ struct clk_mux *mux;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
/* algorithm for enh div: rate = freq / 2 / (N + 1) */
@@ -637,7 +702,7 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
init.name = name;
init.ops = &clk_mux_ops;
- parent_data[0].hw = &pow2_div->hw;
+ parent_data[0].hw = &spicc->pow2_div.hw;
parent_data[1].hw = &enh_div->hw;
init.num_parents = 2;
init.flags = CLK_SET_RATE_PARENT;
@@ -754,12 +819,20 @@ static int meson_spicc_probe(struct platform_device *pdev)
meson_spicc_oen_enable(spicc);
- ret = meson_spicc_clk_init(spicc);
+ ret = meson_spicc_pow2_clk_init(spicc);
if (ret) {
- dev_err(&pdev->dev, "clock registration failed\n");
+ dev_err(&pdev->dev, "pow2 clock registration failed\n");
goto out_clk;
}
+ if (spicc->data->has_enhance_clk_div) {
+ ret = meson_spicc_enh_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "clock registration failed\n");
+ goto out_clk;
+ }
+ }
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "spi master registration failed\n");
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 3ebdce804b90..bc5e36fd4288 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -437,7 +437,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
ms->state = mpc52xx_spi_fsmstate_idle;
- ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ ms->ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
if (ms->gpio_cs_count > 0) {
master->num_chipselect = ms->gpio_cs_count;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8f97a3eacdea..83da8862b8f2 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -95,7 +95,7 @@ static ssize_t driver_override_show(struct device *dev,
}
static DEVICE_ATTR_RW(driver_override);
-static struct spi_statistics *spi_alloc_pcpu_stats(struct device *dev)
+static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
{
struct spi_statistics __percpu *pcpu_stats;
@@ -162,7 +162,7 @@ static struct device_attribute dev_attr_spi_device_##field = { \
}
#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
-static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
+static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
char *buf) \
{ \
ssize_t len; \
@@ -309,7 +309,7 @@ static const struct attribute_group *spi_master_groups[] = {
NULL,
};
-static void spi_statistics_add_transfer_stats(struct spi_statistics *pcpu_stats,
+static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
struct spi_transfer *xfer,
struct spi_controller *ctlr)
{
@@ -1275,8 +1275,8 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer *xfer)
{
- struct spi_statistics *statm = ctlr->pcpu_statistics;
- struct spi_statistics *stats = msg->spi->pcpu_statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
@@ -1432,8 +1432,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- struct spi_statistics *statm = ctlr->pcpu_statistics;
- struct spi_statistics *stats = msg->spi->pcpu_statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
spi_set_cs(msg->spi, true, false);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index e368f038ff5c..baf4da7bb3b4 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1004,8 +1004,10 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
int data_direction, payload_length;
+ struct iscsi_ecdb_ahdr *ecdb_ahdr;
struct iscsi_scsi_req *hdr;
int iscsi_task_attr;
+ unsigned char *cdb;
int sam_task_attr;
atomic_long_inc(&conn->sess->cmd_pdus);
@@ -1106,6 +1108,27 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
+ cdb = hdr->cdb;
+
+ if (hdr->hlength) {
+ ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1);
+ if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) {
+ pr_err("Additional Header Segment type %d not supported!\n",
+ ecdb_ahdr->ahstype);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
+ }
+
+ cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15,
+ GFP_KERNEL);
+ if (cdb == NULL)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
+ memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb,
+ be16_to_cpu(ecdb_ahdr->ahslength) - 1);
+ }
+
data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
(hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
DMA_NONE;
@@ -1153,9 +1176,12 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr;
dr = iscsit_allocate_datain_req();
- if (!dr)
+ if (!dr) {
+ if (cdb != hdr->cdb)
+ kfree(cdb);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ }
iscsit_attach_datain_req(cmd, dr);
}
@@ -1176,9 +1202,12 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
target_get_sess_cmd(&cmd->se_cmd, true);
cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
- cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb,
+ cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
GFP_KERNEL);
+ if (cdb != hdr->cdb)
+ kfree(cdb);
+
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
return iscsit_add_reject_cmd(cmd,
@@ -4036,8 +4065,9 @@ static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
{
int ret;
- u8 *buffer, opcode;
+ u8 *buffer, *tmp_buf, opcode;
u32 checksum = 0, digest = 0;
+ struct iscsi_hdr *hdr;
struct kvec iov;
buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
@@ -4062,6 +4092,25 @@ static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
break;
}
+ hdr = (struct iscsi_hdr *) buffer;
+ if (hdr->hlength) {
+ iov.iov_len = hdr->hlength * 4;
+ tmp_buf = krealloc(buffer,
+ ISCSI_HDR_LEN + iov.iov_len,
+ GFP_KERNEL);
+ if (!tmp_buf)
+ break;
+
+ buffer = tmp_buf;
+ iov.iov_base = &buffer[ISCSI_HDR_LEN];
+
+ ret = rx_data(conn, &iov, 1, iov.iov_len);
+ if (ret != iov.iov_len) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ break;
+ }
+ }
+
if (conn->conn_ops->HeaderDigest) {
iov.iov_base = &digest;
iov.iov_len = ISCSI_CRC_LEN;
@@ -4361,7 +4410,7 @@ int iscsit_close_connection(
spin_lock_bh(&sess->conn_lock);
atomic_dec(&sess->nconn);
- pr_debug("Decremented iSCSI connection count to %hu from node:"
+ pr_debug("Decremented iSCSI connection count to %d from node:"
" %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
/*
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 6e5611d8f51b..c8a248bd11be 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -205,6 +205,38 @@ static struct iscsi_chap *chap_server_open(
return chap;
}
+static const char base64_lookup_table[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+static int chap_base64_decode(u8 *dst, const char *src, size_t len)
+{
+ int i, bits = 0, ac = 0;
+ const char *p;
+ u8 *cp = dst;
+
+ for (i = 0; i < len; i++) {
+ if (src[i] == '=')
+ return cp - dst;
+
+ p = strchr(base64_lookup_table, src[i]);
+ if (p == NULL || src[i] == 0)
+ return -2;
+
+ ac <<= 6;
+ ac += (p - base64_lookup_table);
+ bits += 6;
+ if (bits >= 8) {
+ *cp++ = (ac >> (bits - 8)) & 0xff;
+ ac &= ~(BIT(16) - BIT(bits - 8));
+ bits -= 8;
+ }
+ }
+ if (ac)
+ return -1;
+
+ return cp - dst;
+}
+
static int chap_server_compute_hash(
struct iscsit_conn *conn,
struct iscsi_node_auth *auth,
@@ -295,16 +327,27 @@ static int chap_server_compute_hash(
pr_err("Could not find CHAP_R.\n");
goto out;
}
- if (type != HEX) {
- pr_err("Could not find CHAP_R.\n");
- goto out;
- }
- if (strlen(chap_r) != chap->digest_size * 2) {
- pr_err("Malformed CHAP_R\n");
- goto out;
- }
- if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
- pr_err("Malformed CHAP_R\n");
+
+ switch (type) {
+ case HEX:
+ if (strlen(chap_r) != chap->digest_size * 2) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
+ if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
+ pr_err("Malformed CHAP_R: invalid HEX\n");
+ goto out;
+ }
+ break;
+ case BASE64:
+ if (chap_base64_decode(client_digest, chap_r, strlen(chap_r)) !=
+ chap->digest_size) {
+ pr_err("Malformed CHAP_R: invalid BASE64\n");
+ goto out;
+ }
+ break;
+ default:
+ pr_err("Could not find CHAP_R\n");
goto out;
}
@@ -373,7 +416,13 @@ static int chap_server_compute_hash(
/*
* Get CHAP_I.
*/
- if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
+ ret = extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type);
+ if (ret == -ENOENT) {
+ pr_debug("Could not find CHAP_I. Initiator uses One way authentication.\n");
+ auth_ret = 0;
+ goto out;
+ }
+ if (ret < 0) {
pr_err("Could not find CHAP_I.\n");
goto out;
}
@@ -404,23 +453,46 @@ static int chap_server_compute_hash(
goto out;
}
- if (type != HEX) {
+ switch (type) {
+ case HEX:
+ initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
+ if (!initiatorchg_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ if (initiatorchg_len > 1024) {
+ pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ goto out;
+ }
+
+ if (hex2bin(initiatorchg_binhex, initiatorchg,
+ initiatorchg_len) < 0) {
+ pr_err("Malformed CHAP_C: invalid HEX\n");
+ goto out;
+ }
+ break;
+ case BASE64:
+ initiatorchg_len = chap_base64_decode(initiatorchg_binhex,
+ initiatorchg,
+ strlen(initiatorchg));
+ if (initiatorchg_len < 0) {
+ pr_err("Malformed CHAP_C: invalid BASE64\n");
+ goto out;
+ }
+ if (!initiatorchg_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ if (initiatorchg_len > 1024) {
+ pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ goto out;
+ }
+ break;
+ default:
pr_err("Could not find CHAP_C.\n");
goto out;
}
- initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
- if (!initiatorchg_len) {
- pr_err("Unable to convert incoming challenge\n");
- goto out;
- }
- if (initiatorchg_len > 1024) {
- pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
- goto out;
- }
- if (hex2bin(initiatorchg_binhex, initiatorchg, initiatorchg_len) < 0) {
- pr_err("Malformed CHAP_C\n");
- goto out;
- }
+
pr_debug("[server] Got CHAP_C=%s\n", initiatorchg);
/*
* During mutual authentication, the CHAP_C generated by the
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index ce14540ba650..5d0f51822414 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -210,7 +210,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
return ERR_PTR(ret);
}
- tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ tpg = to_iscsi_tpg(se_tpg);
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return ERR_PTR(-EINVAL);
@@ -281,9 +281,7 @@ static ssize_t iscsi_nacl_attrib_##name##_show(struct config_item *item,\
char *page) \
{ \
struct se_node_acl *se_nacl = attrib_to_nacl(item); \
- struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
- se_node_acl); \
- \
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
return sprintf(page, "%u\n", nacl->node_attrib.name); \
} \
\
@@ -291,8 +289,7 @@ static ssize_t iscsi_nacl_attrib_##name##_store(struct config_item *item,\
const char *page, size_t count) \
{ \
struct se_node_acl *se_nacl = attrib_to_nacl(item); \
- struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
- se_node_acl); \
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
u32 val; \
int ret; \
\
@@ -317,6 +314,36 @@ ISCSI_NACL_ATTR(random_datain_pdu_offsets);
ISCSI_NACL_ATTR(random_datain_seq_offsets);
ISCSI_NACL_ATTR(random_r2t_offsets);
+static ssize_t iscsi_nacl_attrib_authentication_show(struct config_item *item,
+ char *page)
+{
+ struct se_node_acl *se_nacl = attrib_to_nacl(item);
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
+
+ return sprintf(page, "%d\n", nacl->node_attrib.authentication);
+}
+
+static ssize_t iscsi_nacl_attrib_authentication_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_node_acl *se_nacl = attrib_to_nacl(item);
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
+ s32 val;
+ int ret;
+
+ ret = kstrtos32(page, 0, &val);
+ if (ret)
+ return ret;
+ if (val != 0 && val != 1 && val != NA_AUTHENTICATION_INHERITED)
+ return -EINVAL;
+
+ nacl->node_attrib.authentication = val;
+
+ return count;
+}
+
+CONFIGFS_ATTR(iscsi_nacl_attrib_, authentication);
+
static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
&iscsi_nacl_attrib_attr_dataout_timeout,
&iscsi_nacl_attrib_attr_dataout_timeout_retries,
@@ -326,6 +353,7 @@ static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
&iscsi_nacl_attrib_attr_random_datain_pdu_offsets,
&iscsi_nacl_attrib_attr_random_datain_seq_offsets,
&iscsi_nacl_attrib_attr_random_r2t_offsets,
+ &iscsi_nacl_attrib_attr_authentication,
NULL,
};
@@ -377,15 +405,14 @@ static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
- return __iscsi_nacl_auth_##name##_show(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page); \
+ return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \
} \
static ssize_t iscsi_nacl_auth_##name##_store(struct config_item *item, \
const char *page, size_t count) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
- return __iscsi_nacl_auth_##name##_store(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page, count); \
+ return __iscsi_nacl_auth_##name##_store(to_iscsi_nacl(nacl), \
+ page, count); \
} \
\
CONFIGFS_ATTR(iscsi_nacl_auth_, name)
@@ -417,8 +444,7 @@ static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
- return __iscsi_nacl_auth_##name##_show(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page); \
+ return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \
} \
\
CONFIGFS_ATTR_RO(iscsi_nacl_auth_, name)
@@ -623,8 +649,7 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
struct se_portal_group *se_tpg = se_nacl->se_tpg;
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
struct config_item *acl_ci, *tpg_ci, *wwn_ci;
u32 cmdsn_depth = 0;
int ret;
@@ -700,8 +725,7 @@ static struct configfs_attribute *lio_target_initiator_attrs[] = {
static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
const char *name)
{
- struct iscsi_node_acl *acl =
- container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl);
config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
@@ -720,8 +744,7 @@ static ssize_t iscsi_tpg_attrib_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_portal_group *se_tpg = attrib_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
ssize_t rb; \
\
if (iscsit_get_tpg(tpg) < 0) \
@@ -736,8 +759,7 @@ static ssize_t iscsi_tpg_attrib_##name##_store(struct config_item *item,\
const char *page, size_t count) \
{ \
struct se_portal_group *se_tpg = attrib_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
u32 val; \
int ret; \
\
@@ -800,8 +822,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \
char *page) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -813,8 +834,7 @@ static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg,
static ssize_t __iscsi_##prefix##_##name##_store(struct se_portal_group *se_tpg,\
const char *page, size_t count) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -861,8 +881,7 @@ DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \
char *page) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -900,8 +919,7 @@ static ssize_t iscsi_tpg_param_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_portal_group *se_tpg = param_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_param *param; \
ssize_t rb; \
\
@@ -923,8 +941,7 @@ static ssize_t iscsi_tpg_param_##name##_store(struct config_item *item, \
const char *page, size_t count) \
{ \
struct se_portal_group *se_tpg = param_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
char *buf; \
int ret, len; \
\
@@ -1073,8 +1090,7 @@ free_out:
static int lio_target_tiqn_enabletpg(struct se_portal_group *se_tpg,
bool enable)
{
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
int ret;
ret = iscsit_get_tpg(tpg);
@@ -1106,7 +1122,7 @@ static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
- tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ tpg = to_iscsi_tpg(se_tpg);
tiqn = tpg->tpg_tiqn;
/*
* iscsit_tpg_del_portal_group() assumes force=1
@@ -1416,46 +1432,41 @@ static void lio_aborted_task(struct se_cmd *se_cmd)
cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
}
-static inline struct iscsi_portal_group *iscsi_tpg(struct se_portal_group *se_tpg)
-{
- return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
-}
-
static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
+ return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
}
static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpgt;
+ return to_iscsi_tpg(se_tpg)->tpgt;
}
static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
}
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
}
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
}
static int lio_tpg_check_demo_mode_write_protect(
struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
}
static int lio_tpg_check_prod_mode_write_protect(
struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
}
static int lio_tpg_check_prot_fabric_only(
@@ -1465,9 +1476,9 @@ static int lio_tpg_check_prot_fabric_only(
* Only report fabric_prot_type if t10_pi has also been enabled
* for incoming ib_isert sessions.
*/
- if (!iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
+ if (!to_iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
return 0;
- return iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
}
/*
@@ -1504,16 +1515,14 @@ static void lio_tpg_close_session(struct se_session *se_sess)
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
+ return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
}
static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
{
- struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
- se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_acl);
struct se_portal_group *se_tpg = se_acl->se_tpg;
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
acl->node_attrib.nacl = acl;
iscsit_set_default_node_attribues(acl, tpg);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 6b94eecc4790..27e448c2d066 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -341,6 +341,7 @@ static int iscsi_login_zero_tsih_s2(
{
struct iscsi_node_attrib *na;
struct iscsit_session *sess = conn->sess;
+ struct iscsi_param *param;
bool iser = false;
sess->tpg = conn->tpg;
@@ -375,6 +376,18 @@ static int iscsi_login_zero_tsih_s2(
na = iscsit_tpg_get_node_attrib(sess);
/*
+ * If ACL allows non-authorized access in TPG with CHAP,
+ * then set None to AuthMethod.
+ */
+ param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+ if (param && !strstr(param->value, NONE)) {
+ if (!iscsi_conn_auth_required(conn))
+ if (iscsi_change_param_sprintf(conn, "AuthMethod=%s",
+ NONE))
+ return -1;
+ }
+
+ /*
* Need to send TargetPortalGroupTag back in first login response
* on any iSCSI connection where the Initiator provides TargetName.
* See 5.3.1. Login Phase Start
@@ -715,7 +728,7 @@ void iscsi_post_login_handler(
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
- pr_debug("Incremented iSCSI Connection count to %hu"
+ pr_debug("Incremented iSCSI Connection count to %d"
" from node: %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
@@ -763,7 +776,7 @@ void iscsi_post_login_handler(
spin_lock_bh(&sess->conn_lock);
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
- pr_debug("Incremented iSCSI Connection count to %hu from node:"
+ pr_debug("Incremented iSCSI Connection count to %d from node:"
" %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index b34ac9ecac31..f2919319ad38 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -62,31 +62,34 @@ int extract_param(
int len;
if (!in_buf || !pattern || !out_buf || !type)
- return -1;
+ return -EINVAL;
ptr = strstr(in_buf, pattern);
if (!ptr)
- return -1;
+ return -ENOENT;
ptr = strstr(ptr, "=");
if (!ptr)
- return -1;
+ return -EINVAL;
ptr += 1;
if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
ptr += 2; /* skip 0x */
*type = HEX;
+ } else if (*ptr == '0' && (*(ptr+1) == 'b' || *(ptr+1) == 'B')) {
+ ptr += 2; /* skip 0b */
+ *type = BASE64;
} else
*type = DECIMAL;
len = strlen_semi(ptr);
if (len < 0)
- return -1;
+ return -EINVAL;
if (len >= max_length) {
pr_err("Length of input: %d exceeds max_length:"
" %d\n", len, max_length);
- return -1;
+ return -EINVAL;
}
memcpy(out_buf, ptr, len);
out_buf[len] = '\0';
@@ -94,6 +97,31 @@ int extract_param(
return 0;
}
+static struct iscsi_node_auth *iscsi_get_node_auth(struct iscsit_conn *conn)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_node_acl *nacl;
+ struct se_node_acl *se_nacl;
+
+ if (conn->sess->sess_ops->SessionType)
+ return &iscsit_global->discovery_acl.node_auth;
+
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_err("Unable to locate struct se_node_acl for CHAP auth\n");
+ return NULL;
+ }
+
+ if (se_nacl->dynamic_node_acl) {
+ tpg = to_iscsi_tpg(se_nacl->se_tpg);
+ return &tpg->tpg_demo_auth;
+ }
+
+ nacl = to_iscsi_nacl(se_nacl);
+
+ return &nacl->node_auth;
+}
+
static u32 iscsi_handle_authentication(
struct iscsit_conn *conn,
char *in_buf,
@@ -102,40 +130,11 @@ static u32 iscsi_handle_authentication(
int *out_length,
unsigned char *authtype)
{
- struct iscsit_session *sess = conn->sess;
struct iscsi_node_auth *auth;
- struct iscsi_node_acl *iscsi_nacl;
- struct iscsi_portal_group *iscsi_tpg;
- struct se_node_acl *se_nacl;
-
- if (!sess->sess_ops->SessionType) {
- /*
- * For SessionType=Normal
- */
- se_nacl = conn->sess->se_sess->se_node_acl;
- if (!se_nacl) {
- pr_err("Unable to locate struct se_node_acl for"
- " CHAP auth\n");
- return -1;
- }
-
- if (se_nacl->dynamic_node_acl) {
- iscsi_tpg = container_of(se_nacl->se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
-
- auth = &iscsi_tpg->tpg_demo_auth;
- } else {
- iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
- auth = &iscsi_nacl->node_auth;
- }
- } else {
- /*
- * For SessionType=Discovery
- */
- auth = &iscsit_global->discovery_acl.node_auth;
- }
+ auth = iscsi_get_node_auth(conn);
+ if (!auth)
+ return -1;
if (strstr("CHAP", authtype))
strcpy(conn->sess->auth_type, "CHAP");
@@ -815,6 +814,42 @@ static int iscsi_target_do_authentication(
return 0;
}
+bool iscsi_conn_auth_required(struct iscsit_conn *conn)
+{
+ struct iscsi_node_acl *nacl;
+ struct se_node_acl *se_nacl;
+
+ if (conn->sess->sess_ops->SessionType) {
+ /*
+ * For SessionType=Discovery
+ */
+ return conn->tpg->tpg_attrib.authentication;
+ }
+ /*
+ * For SessionType=Normal
+ */
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_debug("Unknown ACL is trying to connect\n");
+ return true;
+ }
+
+ if (se_nacl->dynamic_node_acl) {
+ pr_debug("Dynamic ACL %s is trying to connect\n",
+ se_nacl->initiatorname);
+ return conn->tpg->tpg_attrib.authentication;
+ }
+
+ pr_debug("Known ACL %s is trying to connect\n",
+ se_nacl->initiatorname);
+
+ nacl = to_iscsi_nacl(se_nacl);
+ if (nacl->node_attrib.authentication == NA_AUTHENTICATION_INHERITED)
+ return conn->tpg->tpg_attrib.authentication;
+
+ return nacl->node_attrib.authentication;
+}
+
static int iscsi_target_handle_csg_zero(
struct iscsit_conn *conn,
struct iscsi_login *login)
@@ -876,22 +911,26 @@ static int iscsi_target_handle_csg_zero(
return -1;
if (!iscsi_check_negotiated_keys(conn->param_list)) {
- if (conn->tpg->tpg_attrib.authentication &&
- !strncmp(param->value, NONE, 4)) {
- pr_err("Initiator sent AuthMethod=None but"
- " Target is enforcing iSCSI Authentication,"
- " login failed.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
- ISCSI_LOGIN_STATUS_AUTH_FAILED);
- return -1;
- }
+ bool auth_required = iscsi_conn_auth_required(conn);
+
+ if (auth_required) {
+ if (!strncmp(param->value, NONE, 4)) {
+ pr_err("Initiator sent AuthMethod=None but"
+ " Target is enforcing iSCSI Authentication,"
+ " login failed.\n");
+ iscsit_tx_login_rsp(conn,
+ ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ }
- if (conn->tpg->tpg_attrib.authentication &&
- !login->auth_complete)
- return 0;
+ if (!login->auth_complete)
+ return 0;
- if (strncmp(param->value, NONE, 4) && !login->auth_complete)
- return 0;
+ if (strncmp(param->value, NONE, 4) &&
+ !login->auth_complete)
+ return 0;
+ }
if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
(login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
@@ -906,6 +945,18 @@ do_auth:
return iscsi_target_do_authentication(conn, login);
}
+static bool iscsi_conn_authenticated(struct iscsit_conn *conn,
+ struct iscsi_login *login)
+{
+ if (!iscsi_conn_auth_required(conn))
+ return true;
+
+ if (login->auth_complete)
+ return true;
+
+ return false;
+}
+
static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_login *login)
{
int ret;
@@ -949,11 +1000,10 @@ static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_lo
return -1;
}
- if (!login->auth_complete &&
- conn->tpg->tpg_attrib.authentication) {
+ if (!iscsi_conn_authenticated(conn, login)) {
pr_err("Initiator is requesting CSG: 1, has not been"
- " successfully authenticated, and the Target is"
- " enforcing iSCSI Authentication, login failed.\n");
+ " successfully authenticated, and the Target is"
+ " enforcing iSCSI Authentication, login failed.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_AUTH_FAILED);
return -1;
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index ed30b9ee75e6..41c3db3ddeaa 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -4,6 +4,7 @@
#define DECIMAL 0
#define HEX 1
+#define BASE64 2
struct iscsit_conn;
struct iscsi_login;
@@ -21,5 +22,5 @@ extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *,
extern int iscsi_target_start_negotiation(
struct iscsi_login *, struct iscsit_conn *);
extern void iscsi_target_nego_release(struct iscsit_conn *);
-
+extern bool iscsi_conn_auth_required(struct iscsit_conn *conn);
#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 874cb33c9be0..d63efdefb18e 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -30,6 +30,7 @@ void iscsit_set_default_node_attribues(
{
struct iscsi_node_attrib *a = &acl->node_attrib;
+ a->authentication = NA_AUTHENTICATION_INHERITED;
a->dataout_timeout = NA_DATAOUT_TIMEOUT;
a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
a->nopin_timeout = NA_NOPIN_TIMEOUT;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4339ee517434..3cac1aafef68 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -394,8 +394,7 @@ struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
{
struct se_session *se_sess = sess->se_sess;
struct se_node_acl *se_nacl = se_sess->se_node_acl;
- struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl);
return &acl->node_attrib;
}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index b56ef8af66e7..fb91423a4e2e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -385,7 +385,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
/*
* Extract the RELATIVE TARGET PORT IDENTIFIER to identify
- * the Target Port in question for the the incoming
+ * the Target Port in question for the incoming
* SET_TARGET_PORT_GROUPS op.
*/
rtpi = get_unaligned_be16(ptr + 2);
@@ -934,8 +934,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
spin_lock(&lun->lun_deve_lock);
list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
- lacl = rcu_dereference_check(se_deve->se_lun_acl,
- lockdep_is_held(&lun->lun_deve_lock));
+ lacl = se_deve->se_lun_acl;
/*
* spc4r37 p.242:
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index bbcbbfa72b07..416514c5c7ac 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -732,6 +732,7 @@ static ssize_t emulate_tpu_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
bool flag;
int ret;
@@ -744,8 +745,11 @@ static ssize_t emulate_tpu_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
+ if (!dev->transport->configure_unmap ||
+ !dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
}
da->emulate_tpu = flag;
@@ -758,6 +762,7 @@ static ssize_t emulate_tpws_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
bool flag;
int ret;
@@ -770,8 +775,11 @@ static ssize_t emulate_tpws_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
+ if (!dev->transport->configure_unmap ||
+ !dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
}
da->emulate_tpws = flag;
@@ -964,6 +972,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
bool flag;
int ret;
@@ -982,10 +991,12 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
* Discard supported is detected iblock_configure_device().
*/
if (flag && !da->max_unmap_block_desc_count) {
- pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
- " because max_unmap_block_desc_count is zero\n",
- da->da_dev);
- return -ENOSYS;
+ if (!dev->transport->configure_unmap ||
+ !dev->transport->configure_unmap(dev)) {
+ pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n",
+ da->da_dev);
+ return -ENOSYS;
+ }
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 25f33eb25337..b7f16ee8aa0e 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -75,7 +75,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd)
return TCM_WRITE_PROTECTED;
}
- se_lun = rcu_dereference(deve->se_lun);
+ se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
@@ -152,7 +152,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
if (deve) {
- se_lun = rcu_dereference(deve->se_lun);
+ se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
@@ -216,7 +216,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- lun = rcu_dereference(deve->se_lun);
+ lun = deve->se_lun;
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
@@ -243,11 +243,8 @@ void core_free_device_list_for_node(
struct se_dev_entry *deve;
mutex_lock(&nacl->lun_entry_mutex);
- hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- struct se_lun *lun = rcu_dereference_check(deve->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
- core_disable_device_list_for_node(lun, deve, nacl, tpg);
- }
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+ core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
mutex_unlock(&nacl->lun_entry_mutex);
}
@@ -334,8 +331,7 @@ int core_enable_device_list_for_node(
mutex_lock(&nacl->lun_entry_mutex);
orig = target_nacl_find_deve(nacl, mapped_lun);
if (orig && orig->se_lun) {
- struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
+ struct se_lun *orig_lun = orig->se_lun;
if (orig_lun != lun) {
pr_err("Existing orig->se_lun doesn't match new lun"
@@ -355,8 +351,8 @@ int core_enable_device_list_for_node(
return -EINVAL;
}
- rcu_assign_pointer(new->se_lun, lun);
- rcu_assign_pointer(new->se_lun_acl, lun_acl);
+ new->se_lun = lun;
+ new->se_lun_acl = lun_acl;
hlist_del_rcu(&orig->link);
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
@@ -374,8 +370,8 @@ int core_enable_device_list_for_node(
return 0;
}
- rcu_assign_pointer(new->se_lun, lun);
- rcu_assign_pointer(new->se_lun_acl, lun_acl);
+ new->se_lun = lun;
+ new->se_lun_acl = lun_acl;
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
@@ -434,9 +430,6 @@ void core_disable_device_list_for_node(
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
- rcu_assign_pointer(orig->se_lun, NULL);
- rcu_assign_pointer(orig->se_lun_acl, NULL);
-
kfree_rcu(orig, rcu_head);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
@@ -457,10 +450,7 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
-
- if (lun != tmp_lun)
+ if (lun != deve->se_lun)
continue;
core_disable_device_list_for_node(lun, deve, nacl, tpg);
@@ -960,6 +950,12 @@ int target_configure_device(struct se_device *dev)
ret = dev->transport->configure_device(dev);
if (ret)
goto out_free_index;
+
+ if (dev->transport->configure_unmap &&
+ dev->transport->configure_unmap(dev)) {
+ pr_debug("Discard support available, but disabled by default.\n");
+ }
+
/*
* XXX: there is not much point to have two different values here..
*/
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 6c8d8b051bfd..28aa643be5d5 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -86,6 +86,24 @@ static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
return &fd_dev->dev;
}
+static bool fd_configure_unmap(struct se_device *dev)
+{
+ struct file *file = FD_DEV(dev)->fd_file;
+ struct inode *inode = file->f_mapping->host;
+
+ if (S_ISBLK(inode->i_mode))
+ return target_configure_unmap_from_queue(&dev->dev_attrib,
+ I_BDEV(inode));
+
+ /* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
+ dev->dev_attrib.max_unmap_lba_count = 0x2000;
+ /* Currently hardcoded to 1 in Linux/SCSI code. */
+ dev->dev_attrib.max_unmap_block_desc_count = 1;
+ dev->dev_attrib.unmap_granularity = 1;
+ dev->dev_attrib.unmap_granularity_alignment = 0;
+ return true;
+}
+
static int fd_configure_device(struct se_device *dev)
{
struct fd_dev *fd_dev = FD_DEV(dev);
@@ -149,10 +167,6 @@ static int fd_configure_device(struct se_device *dev)
" block_device blocks: %llu logical_block_size: %d\n",
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
-
- if (target_configure_unmap_from_queue(&dev->dev_attrib, bdev))
- pr_debug("IFILE: BLOCK Discard support available,"
- " disabled by default\n");
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -170,16 +184,6 @@ static int fd_configure_device(struct se_device *dev)
}
fd_dev->fd_block_size = FD_BLOCKSIZE;
- /*
- * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
- */
- dev->dev_attrib.max_unmap_lba_count = 0x2000;
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity = 1;
- dev->dev_attrib.unmap_granularity_alignment = 0;
/*
* Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
@@ -438,10 +442,6 @@ fd_execute_write_same(struct se_cmd *cmd)
unsigned int len = 0, i;
ssize_t ret;
- if (!nolb) {
- target_complete_cmd(cmd, SAM_STAT_GOOD);
- return 0;
- }
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with FILEIO"
" backends not supported\n");
@@ -927,6 +927,7 @@ static const struct target_backend_ops fileio_ops = {
.configure_device = fd_configure_device,
.destroy_device = fd_destroy_device,
.free_device = fd_free_device,
+ .configure_unmap = fd_configure_unmap,
.parse_cdb = fd_parse_cdb,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 30712a12b151..8351c974cee3 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -76,6 +76,14 @@ free_dev:
return NULL;
}
+static bool iblock_configure_unmap(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+ return target_configure_unmap_from_queue(&dev->dev_attrib,
+ ib_dev->ibd_bd);
+}
+
static int iblock_configure_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -119,10 +127,6 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
- pr_debug("IBLOCK: BLOCK Discard support available,"
- " disabled by default\n");
-
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -903,6 +907,7 @@ static const struct target_backend_ops iblock_ops = {
.configure_device = iblock_configure_device,
.destroy_device = iblock_destroy_device,
.free_device = iblock_free_device,
+ .configure_unmap = iblock_configure_unmap,
.plug_device = iblock_plug_device,
.unplug_device = iblock_unplug_device,
.parse_cdb = iblock_parse_cdb,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 3829b61b56c1..a1d67554709f 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -739,8 +739,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (!deve_tmp->se_lun_acl)
continue;
- lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl,
- lockdep_is_held(&lun_tmp->lun_deve_lock));
+ lacl_tmp = deve_tmp->se_lun_acl;
nacl_tmp = lacl_tmp->se_lun_nacl;
/*
* Skip the matching struct se_node_acl that is allocated
@@ -784,8 +783,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* the original *pr_reg is processed in
* __core_scsi3_add_registration()
*/
- dest_lun = rcu_dereference_check(deve_tmp->se_lun,
- kref_read(&deve_tmp->pr_kref) != 0);
+ dest_lun = deve_tmp->se_lun;
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
nacl_tmp, dest_lun, deve_tmp,
@@ -1437,34 +1435,26 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl;
-
/*
* For nacl->dynamic_node_acl=1
*/
- lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
- kref_read(&se_deve->pr_kref) != 0);
- if (!lun_acl)
+ if (!se_deve->se_lun_acl)
return 0;
- return target_depend_item(&lun_acl->se_lun_group.cg_item);
+ return target_depend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
}
static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl;
-
/*
* For nacl->dynamic_node_acl=1
*/
- lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
- kref_read(&se_deve->pr_kref) != 0);
- if (!lun_acl) {
+ if (!se_deve->se_lun_acl) {
kref_put(&se_deve->pr_kref, target_pr_kref_release);
return;
}
- target_undepend_item(&lun_acl->se_lun_group.cg_item);
+ target_undepend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
kref_put(&se_deve->pr_kref, target_pr_kref_release);
}
@@ -1751,8 +1741,7 @@ core_scsi3_decode_spec_i_port(
* and then call __core_scsi3_add_registration() in the
* 2nd loop which will never fail.
*/
- dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
- kref_read(&dest_se_deve->pr_kref) != 0);
+ dest_lun = dest_se_deve->se_lun;
dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_lun, dest_se_deve,
@@ -3446,8 +3435,7 @@ after_iport_check:
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
if (!dest_pr_reg) {
- struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
- kref_read(&dest_se_deve->pr_kref) != 0);
+ struct se_lun *dest_lun = dest_se_deve->se_lun;
spin_unlock(&dev->dev_reservation_lock);
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index f6132836eb38..1e3216de1e04 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -345,68 +345,6 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op
return 0;
}
-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
- int *post_ret)
-{
- unsigned char *buf, *addr;
- struct scatterlist *sg;
- unsigned int offset;
- sense_reason_t ret = TCM_NO_SENSE;
- int i, count;
-
- if (!success)
- return 0;
-
- /*
- * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
- *
- * 1) read the specified logical block(s);
- * 2) transfer logical blocks from the data-out buffer;
- * 3) XOR the logical blocks transferred from the data-out buffer with
- * the logical blocks read, storing the resulting XOR data in a buffer;
- * 4) if the DISABLE WRITE bit is set to zero, then write the logical
- * blocks transferred from the data-out buffer; and
- * 5) transfer the resulting XOR data to the data-in buffer.
- */
- buf = kmalloc(cmd->data_length, GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate xor_callback buf\n");
- return TCM_OUT_OF_RESOURCES;
- }
- /*
- * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
- * into the locally allocated *buf
- */
- sg_copy_to_buffer(cmd->t_data_sg,
- cmd->t_data_nents,
- buf,
- cmd->data_length);
-
- /*
- * Now perform the XOR against the BIDI read memory located at
- * cmd->t_mem_bidi_list
- */
-
- offset = 0;
- for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
- addr = kmap_atomic(sg_page(sg));
- if (!addr) {
- ret = TCM_OUT_OF_RESOURCES;
- goto out;
- }
-
- for (i = 0; i < sg->length; i++)
- *(addr + sg->offset + i) ^= *(buf + offset + i);
-
- offset += sg->length;
- kunmap_atomic(addr);
- }
-
-out:
- kfree(buf);
- return ret;
-}
-
static sense_reason_t
sbc_execute_rw(struct se_cmd *cmd)
{
@@ -933,47 +871,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
- case XDWRITEREAD_10:
- if (cmd->data_direction != DMA_TO_DEVICE ||
- !(cmd->se_cmd_flags & SCF_BIDI))
- return TCM_INVALID_CDB_FIELD;
- sectors = transport_get_sectors_10(cdb);
-
- if (sbc_check_dpofua(dev, cmd, cdb))
- return TCM_INVALID_CDB_FIELD;
-
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-
- /*
- * Setup BIDI XOR callback to be run after I/O completion.
- */
- cmd->execute_cmd = sbc_execute_rw;
- cmd->transport_complete_callback = &xdreadwrite_callback;
- break;
case VARIABLE_LENGTH_CMD:
{
u16 service_action = get_unaligned_be16(&cdb[8]);
switch (service_action) {
- case XDWRITEREAD_32:
- sectors = transport_get_sectors_32(cdb);
-
- if (sbc_check_dpofua(dev, cmd, cdb))
- return TCM_INVALID_CDB_FIELD;
- /*
- * Use WRITE_32 and READ_32 opcodes for the emulated
- * XDWRITE_READ_32 logic.
- */
- cmd->t_task_lba = transport_lba_64_ext(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-
- /*
- * Setup BIDI XOR callback to be run during after I/O
- * completion.
- */
- cmd->execute_cmd = sbc_execute_rw;
- cmd->transport_complete_callback = &xdreadwrite_callback;
- break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb);
if (!sectors) {
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 62d15bcc3d93..f85ee5b0fd80 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -877,7 +877,6 @@ static ssize_t target_stat_auth_dev_show(struct config_item *item,
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- struct se_lun *lun;
ssize_t ret;
rcu_read_lock();
@@ -886,9 +885,9 @@ static ssize_t target_stat_auth_dev_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
- lun = rcu_dereference(deve->se_lun);
+
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
@@ -1217,7 +1216,6 @@ static ssize_t target_stat_iport_dev_show(struct config_item *item,
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- struct se_lun *lun;
ssize_t ret;
rcu_read_lock();
@@ -1226,9 +1224,9 @@ static ssize_t target_stat_iport_dev_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
- lun = rcu_dereference(deve->se_lun);
+
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 6bb20aa9c5bc..8713cda0c2fb 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -88,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
struct se_device *this_dev;
int rc;
- this_lun = rcu_dereference(deve->se_lun);
+ this_lun = deve->se_lun;
this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index f2b1bcefcadd..1175f3a46859 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -326,6 +326,9 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
void *ret;
int id;
+ if (!access_ok((void __user *)addr, length))
+ return ERR_PTR(-EFAULT);
+
mutex_lock(&teedev->mutex);
id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 0e5cc948373c..e052dae614eb 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -221,7 +221,7 @@ config THERMAL_EMULATION
config THERMAL_MMIO
tristate "Generic Thermal MMIO driver"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
help
This option enables the generic thermal MMIO driver that will use
@@ -496,7 +496,7 @@ config SPRD_THERMAL
config KHADAS_MCU_FAN_THERMAL
tristate "Khadas MCU controller FAN cooling support"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on MFD_KHADAS_MCU
select MFD_CORE
select REGMAP
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 80d4e0676083..365489bf4b8c 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -527,7 +527,7 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
obj->package.elements[0].buffer.length,
GFP_KERNEL);
- if (!priv->data_vault)
+ if (ZERO_OR_NULL_PTR(priv->data_vault))
goto out_free;
bin_attr_data_vault.private = priv->data_vault;
@@ -597,7 +597,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
goto free_imok;
}
- if (priv->data_vault) {
+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
result = sysfs_create_group(&pdev->dev.kobj,
&data_attribute_group);
if (result)
@@ -615,7 +615,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
free_sysfs:
cleanup_odvp(priv);
if (priv->data_vault) {
- sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
+ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
kfree(priv->data_vault);
}
free_uuid:
@@ -647,7 +648,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
if (!priv->rel_misc_dev_res)
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
- if (priv->data_vault)
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index a9596e7562ea..95adac427b6f 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -81,7 +81,9 @@ static const struct x86_cpu_id tcc_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
{}
};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6a5d0ae5d7a4..50d50cec7774 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1329,6 +1329,7 @@ free_tz:
kfree(tz);
return ERR_PTR(result);
}
+EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips);
struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
void *devdata, struct thermal_zone_device_ops *ops,
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 5018459e8dd9..3a8d6e747c25 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -813,12 +813,13 @@ static const struct attribute_group cooling_device_stats_attr_group = {
static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
{
+ const struct attribute_group *stats_attr_group = NULL;
struct cooling_dev_stats *stats;
unsigned long states;
int var;
if (cdev->ops->get_max_state(cdev, &states))
- return;
+ goto out;
states++; /* Total number of states is highest state + 1 */
@@ -828,7 +829,7 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
stats = kzalloc(var, GFP_KERNEL);
if (!stats)
- return;
+ goto out;
stats->time_in_state = (ktime_t *)(stats + 1);
stats->trans_table = (unsigned int *)(stats->time_in_state + states);
@@ -838,9 +839,12 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
spin_lock_init(&stats->lock);
+ stats_attr_group = &cooling_device_stats_attr_group;
+
+out:
/* Fill the empty slot left in cooling_device_attr_groups */
var = ARRAY_SIZE(cooling_device_attr_groups) - 2;
- cooling_device_attr_groups[var] = &cooling_device_stats_attr_group;
+ cooling_device_attr_groups[var] = stats_attr_group;
}
static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index afb2d373dd47..81e7f64c1739 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -12,7 +12,7 @@
* (non hardware specific) changes to serial.c.
*
* The port is registered with the tty driver as minor device 64, and
- * therefore other ports should should only use 65 upwards.
+ * therefore other ports should only use 65 upwards.
*
* Richard Lucock 28/12/99
*
@@ -51,6 +51,7 @@
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
+#include <linux/serial_core.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/slab.h>
@@ -283,12 +284,12 @@ static void transmit_chars(struct serial_state *info)
amiga_custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100;
mb();
- info->xmit.tail = info->xmit.tail & (SERIAL_XMIT_SIZE-1);
+ info->xmit.tail = info->xmit.tail & (UART_XMIT_SIZE - 1);
info->icount.tx++;
if (CIRC_CNT(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
+ UART_XMIT_SIZE) < WAKEUP_CHARS)
tty_wakeup(info->tport.tty);
#ifdef SERIAL_DEBUG_INTR
@@ -708,13 +709,13 @@ static int rs_put_char(struct tty_struct *tty, unsigned char ch)
local_irq_save(flags);
if (CIRC_SPACE(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE) == 0) {
+ UART_XMIT_SIZE) == 0) {
local_irq_restore(flags);
return 0;
}
info->xmit.buf[info->xmit.head++] = ch;
- info->xmit.head &= SERIAL_XMIT_SIZE-1;
+ info->xmit.head &= UART_XMIT_SIZE - 1;
local_irq_restore(flags);
return 1;
}
@@ -753,15 +754,14 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE);
+ UART_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0) {
break;
}
memcpy(info->xmit.buf + info->xmit.head, buf, c);
- info->xmit.head = ((info->xmit.head + c) &
- (SERIAL_XMIT_SIZE-1));
+ info->xmit.head = (info->xmit.head + c) & (UART_XMIT_SIZE - 1);
buf += c;
count -= c;
ret += c;
@@ -788,14 +788,14 @@ static unsigned int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+ return CIRC_SPACE(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
}
static unsigned int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+ return CIRC_CNT(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
}
static void rs_flush_buffer(struct tty_struct *tty)
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 31dceb5039b5..e81701a66429 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -916,7 +916,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
/* Make each port's xmit FIFO big enough to fill FDC TX FIFO */
- priv->xmit_size = min(tx_fifo * 4, (unsigned int)SERIAL_XMIT_SIZE);
+ priv->xmit_size = min(tx_fifo * 4, (unsigned int)UART_XMIT_SIZE);
driver = tty_alloc_driver(NUM_TTY_CHANNELS, TTY_DRIVER_REAL_RAW);
if (IS_ERR(driver))
@@ -1222,7 +1222,7 @@ static void kgdbfdc_push_one(void)
/* Construct a word from any data in buffer */
word = mips_ejtag_fdc_encode(bufs, &kgdbfdc_wbuflen, 1);
- /* Relocate any remaining data to beginnning of buffer */
+ /* Relocate any remaining data to beginning of buffer */
kgdbfdc_wbuflen -= word.bytes;
for (i = 0; i < kgdbfdc_wbuflen; ++i)
kgdbfdc_wbuf[i] = kgdbfdc_wbuf[i + word.bytes];
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index fd4d24f61c46..caa5c14ed57f 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -5,6 +5,14 @@
*
* * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
*
+ * Outgoing path:
+ * tty -> DLCI fifo -> scheduler -> GSM MUX data queue ---o-> ldisc
+ * control message -> GSM MUX control queue --´
+ *
+ * Incoming path:
+ * ldisc -> gsm_queue() -o--> tty
+ * `-> gsm_control_response()
+ *
* TO DO:
* Mostly done: ioctls for setting modes/timing
* Partly done: hooks so you can pull off frames to non tty devs
@@ -210,6 +218,9 @@ struct gsm_mux {
/* Events on the GSM channel */
wait_queue_head_t event;
+ /* ldisc send work */
+ struct work_struct tx_work;
+
/* Bits for GSM mode decoding */
/* Framing Layer */
@@ -235,14 +246,17 @@ struct gsm_mux {
struct gsm_dlci *dlci[NUM_DLCI];
int old_c_iflag; /* termios c_iflag value before attach */
bool constipated; /* Asked by remote to shut up */
+ bool has_devices; /* Devices were registered */
spinlock_t tx_lock;
unsigned int tx_bytes; /* TX data outstanding */
#define TX_THRESH_HI 8192
#define TX_THRESH_LO 2048
- struct list_head tx_list; /* Pending data packets */
+ struct list_head tx_ctrl_list; /* Pending control packets */
+ struct list_head tx_data_list; /* Pending data packets */
/* Control messages */
+ struct timer_list kick_timer; /* Kick TX queuing on timeout */
struct timer_list t2_timer; /* Retransmit timer for commands */
int cretries; /* Command retry counter */
struct gsm_control *pending_cmd;/* Our current pending command */
@@ -369,6 +383,11 @@ static const u8 gsm_fcs8[256] = {
static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len);
static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk);
+static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ u8 ctrl);
+static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg);
+static void gsmld_write_trigger(struct gsm_mux *gsm);
+static void gsmld_write_task(struct work_struct *work);
/**
* gsm_fcs_add - update FCS
@@ -420,6 +439,27 @@ static int gsm_read_ea(unsigned int *val, u8 c)
}
/**
+ * gsm_read_ea_val - read a value until EA
+ * @val: variable holding value
+ * @data: buffer of data
+ * @dlen: length of data
+ *
+ * Processes an EA value. Updates the passed variable and
+ * returns the processed data length.
+ */
+static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen)
+{
+ unsigned int len = 0;
+
+ for (; dlen > 0; dlen--) {
+ len++;
+ if (gsm_read_ea(val, *data++))
+ break;
+ }
+ return len;
+}
+
+/**
* gsm_encode_modem - encode modem data bits
* @dlci: DLCI to encode from
*
@@ -464,6 +504,68 @@ static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
}
/**
+ * gsm_register_devices - register all tty devices for a given mux index
+ *
+ * @driver: the tty driver that describes the tty devices
+ * @index: the mux number is used to calculate the minor numbers of the
+ * ttys for this mux and may differ from the position in the
+ * mux array.
+ */
+static int gsm_register_devices(struct tty_driver *driver, unsigned int index)
+{
+ struct device *dev;
+ int i;
+ unsigned int base;
+
+ if (!driver || index >= MAX_MUX)
+ return -EINVAL;
+
+ base = index * NUM_DLCI; /* first minor for this index */
+ for (i = 1; i < NUM_DLCI; i++) {
+ /* Don't register device 0 - this is the control channel
+ * and not a usable tty interface
+ */
+ dev = tty_register_device(gsm_tty_driver, base + i, NULL);
+ if (IS_ERR(dev)) {
+ if (debug & 8)
+ pr_info("%s failed to register device minor %u",
+ __func__, base + i);
+ for (i--; i >= 1; i--)
+ tty_unregister_device(gsm_tty_driver, base + i);
+ return PTR_ERR(dev);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * gsm_unregister_devices - unregister all tty devices for a given mux index
+ *
+ * @driver: the tty driver that describes the tty devices
+ * @index: the mux number is used to calculate the minor numbers of the
+ * ttys for this mux and may differ from the position in the
+ * mux array.
+ */
+static void gsm_unregister_devices(struct tty_driver *driver,
+ unsigned int index)
+{
+ int i;
+ unsigned int base;
+
+ if (!driver || index >= MAX_MUX)
+ return;
+
+ base = index * NUM_DLCI; /* first minor for this index */
+ for (i = 1; i < NUM_DLCI; i++) {
+ /* Don't unregister device 0 - this is the control
+ * channel and not a usable tty interface
+ */
+ tty_unregister_device(gsm_tty_driver, base + i);
+ }
+}
+
+/**
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
* @addr: address EA from the frame
@@ -570,57 +672,73 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
* @cr: command/response bit seen as initiator
* @control: control byte including PF bit
*
- * Format up and transmit a control frame. These do not go via the
- * queueing logic as they should be transmitted ahead of data when
- * they are needed.
- *
- * FIXME: Lock versus data TX path
+ * Format up and transmit a control frame. These should be transmitted
+ * ahead of data when they are needed.
*/
-
-static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
+static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
{
- int len;
- u8 cbuf[10];
- u8 ibuf[3];
+ struct gsm_msg *msg;
+ u8 *dp;
int ocr;
+ unsigned long flags;
+
+ msg = gsm_data_alloc(gsm, addr, 0, control);
+ if (!msg)
+ return -ENOMEM;
/* toggle C/R coding if not initiator */
ocr = cr ^ (gsm->initiator ? 0 : 1);
- switch (gsm->encoding) {
- case 0:
- cbuf[0] = GSM0_SOF;
- cbuf[1] = (addr << 2) | (ocr << 1) | EA;
- cbuf[2] = control;
- cbuf[3] = EA; /* Length of data = 0 */
- cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
- cbuf[5] = GSM0_SOF;
- len = 6;
- break;
- case 1:
- case 2:
- /* Control frame + packing (but not frame stuffing) in mode 1 */
- ibuf[0] = (addr << 2) | (ocr << 1) | EA;
- ibuf[1] = control;
- ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
- /* Stuffing may double the size worst case */
- len = gsm_stuff_frame(ibuf, cbuf + 1, 3);
- /* Now add the SOF markers */
- cbuf[0] = GSM1_SOF;
- cbuf[len + 1] = GSM1_SOF;
- /* FIXME: we can omit the lead one in many cases */
- len += 2;
- break;
- default:
- WARN_ON(1);
- return;
- }
- gsmld_output(gsm, cbuf, len);
- if (!gsm->initiator) {
- cr = cr & gsm->initiator;
- control = control & ~PF;
+ msg->data -= 3;
+ dp = msg->data;
+ *dp++ = (addr << 2) | (ocr << 1) | EA;
+ *dp++ = control;
+
+ if (gsm->encoding == 0)
+ *dp++ = EA; /* Length of data = 0 */
+
+ *dp = 0xFF - gsm_fcs_add_block(INIT_FCS, msg->data, dp - msg->data);
+ msg->len = (dp - msg->data) + 1;
+
+ gsm_print_packet("Q->", addr, cr, control, NULL, 0);
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ list_add_tail(&msg->list, &gsm->tx_ctrl_list);
+ gsm->tx_bytes += msg->len;
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
+
+ return 0;
+}
+
+/**
+ * gsm_dlci_clear_queues - remove outstanding data for a DLCI
+ * @gsm: mux
+ * @dlci: clear for this DLCI
+ *
+ * Clears the data queues for a given DLCI.
+ */
+static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg, *nmsg;
+ int addr = dlci->addr;
+ unsigned long flags;
+
+ /* Clear DLCI write fifo first */
+ spin_lock_irqsave(&dlci->lock, flags);
+ kfifo_reset(&dlci->fifo);
+ spin_unlock_irqrestore(&dlci->lock, flags);
+
+ /* Clear data packets in MUX write queue */
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
+ if (msg->addr != addr)
+ continue;
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
}
- gsm_print_packet("-->", addr, cr, control, NULL, 0);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
}
/**
@@ -683,59 +801,151 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
}
/**
- * gsm_data_kick - poke the queue
+ * gsm_send_packet - sends a single packet
* @gsm: GSM Mux
- * @dlci: DLCI sending the data
+ * @msg: packet to send
*
- * The tty device has called us to indicate that room has appeared in
- * the transmit queue. Ram more data into the pipe if we have any
- * If we have been flow-stopped by a CMD_FCOFF, then we can only
- * send messages on DLCI0 until CMD_FCON
+ * The given packet is encoded and sent out. No memory is freed.
+ * The caller must hold the gsm tx lock.
+ */
+static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg)
+{
+ int len, ret;
+
+
+ if (gsm->encoding == 0) {
+ gsm->txframe[0] = GSM0_SOF;
+ memcpy(gsm->txframe + 1, msg->data, msg->len);
+ gsm->txframe[msg->len + 1] = GSM0_SOF;
+ len = msg->len + 2;
+ } else {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data, gsm->txframe + 1, msg->len);
+ gsm->txframe[len + 1] = GSM1_SOF;
+ len += 2;
+ }
+
+ if (debug & 4)
+ gsm_hex_dump_bytes(__func__, gsm->txframe, len);
+ gsm_print_packet("-->", msg->addr, gsm->initiator, msg->ctrl, msg->data,
+ msg->len);
+
+ ret = gsmld_output(gsm, gsm->txframe, len);
+ if (ret <= 0)
+ return ret;
+ /* FIXME: Can eliminate one SOF in many more cases */
+ gsm->tx_bytes -= msg->len;
+
+ return 0;
+}
+
+/**
+ * gsm_is_flow_ctrl_msg - checks if flow control message
+ * @msg: message to check
*
- * FIXME: lock against link layer control transmissions
+ * Returns true if the given message is a flow control command of the
+ * control channel. False is returned in any other case.
*/
+static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg)
+{
+ unsigned int cmd;
+
+ if (msg->addr > 0)
+ return false;
+
+ switch (msg->ctrl & ~PF) {
+ case UI:
+ case UIH:
+ cmd = 0;
+ if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1)
+ break;
+ switch (cmd & ~PF) {
+ case CMD_FCOFF:
+ case CMD_FCON:
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
-static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+/**
+ * gsm_data_kick - poke the queue
+ * @gsm: GSM Mux
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any.
+ * If we have been flow-stopped by a CMD_FCOFF, then we can only
+ * send messages on DLCI0 until CMD_FCON. The caller must hold
+ * the gsm tx lock.
+ */
+static int gsm_data_kick(struct gsm_mux *gsm)
{
struct gsm_msg *msg, *nmsg;
- int len;
+ struct gsm_dlci *dlci;
+ int ret;
- list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
- if (gsm->constipated && msg->addr)
- continue;
- if (gsm->encoding != 0) {
- gsm->txframe[0] = GSM1_SOF;
- len = gsm_stuff_frame(msg->data,
- gsm->txframe + 1, msg->len);
- gsm->txframe[len + 1] = GSM1_SOF;
- len += 2;
- } else {
- gsm->txframe[0] = GSM0_SOF;
- memcpy(gsm->txframe + 1 , msg->data, msg->len);
- gsm->txframe[msg->len + 1] = GSM0_SOF;
- len = msg->len + 2;
- }
+ clear_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
- if (debug & 4)
- gsm_hex_dump_bytes(__func__, gsm->txframe, len);
- if (gsmld_output(gsm, gsm->txframe, len) <= 0)
+ /* Serialize control messages and control channel messages first */
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_ctrl_list, list) {
+ if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg))
+ continue;
+ ret = gsm_send_packet(gsm, msg);
+ switch (ret) {
+ case -ENOSPC:
+ return -ENOSPC;
+ case -ENODEV:
+ /* ldisc not open */
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ default:
+ if (ret >= 0) {
+ list_del(&msg->list);
+ kfree(msg);
+ }
break;
- /* FIXME: Can eliminate one SOF in many more cases */
- gsm->tx_bytes -= msg->len;
-
- list_del(&msg->list);
- kfree(msg);
+ }
+ }
- if (dlci) {
- tty_port_tty_wakeup(&dlci->port);
- } else {
- int i = 0;
+ if (gsm->constipated)
+ return -EAGAIN;
- for (i = 0; i < NUM_DLCI; i++)
- if (gsm->dlci[i])
- tty_port_tty_wakeup(&gsm->dlci[i]->port);
+ /* Serialize other channels */
+ if (list_empty(&gsm->tx_data_list))
+ return 0;
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
+ dlci = gsm->dlci[msg->addr];
+ /* Send only messages for DLCIs with valid state */
+ if (dlci->state != DLCI_OPEN) {
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ }
+ ret = gsm_send_packet(gsm, msg);
+ switch (ret) {
+ case -ENOSPC:
+ return -ENOSPC;
+ case -ENODEV:
+ /* ldisc not open */
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ default:
+ if (ret >= 0) {
+ list_del(&msg->list);
+ kfree(msg);
+ }
+ break;
}
}
+
+ return 1;
}
/**
@@ -784,9 +994,22 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
msg->data = dp;
/* Add to the actual output queue */
- list_add_tail(&msg->list, &gsm->tx_list);
+ switch (msg->ctrl & ~PF) {
+ case UI:
+ case UIH:
+ if (msg->addr > 0) {
+ list_add_tail(&msg->list, &gsm->tx_data_list);
+ break;
+ }
+ fallthrough;
+ default:
+ list_add_tail(&msg->list, &gsm->tx_ctrl_list);
+ break;
+ }
gsm->tx_bytes += msg->len;
- gsm_data_kick(gsm, dlci);
+
+ gsmld_write_trigger(gsm);
+ mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
}
/**
@@ -823,41 +1046,48 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg;
u8 *dp;
- int len, total_size, size;
- int h = dlci->adaption - 1;
+ int h, len, size;
- total_size = 0;
- while (1) {
- len = kfifo_len(&dlci->fifo);
- if (len == 0)
- return total_size;
-
- /* MTU/MRU count only the data bits */
- if (len > gsm->mtu)
- len = gsm->mtu;
-
- size = len + h;
-
- msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
- /* FIXME: need a timer or something to kick this so it can't
- get stuck with no work outstanding and no buffer free */
- if (msg == NULL)
- return -ENOMEM;
- dp = msg->data;
- switch (dlci->adaption) {
- case 1: /* Unstructured */
- break;
- case 2: /* Unstructed with modem bits.
- Always one byte as we never send inline break data */
- *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
- break;
- }
- WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len);
- __gsm_data_queue(dlci, msg);
- total_size += size;
+ /* for modem bits without break data */
+ h = ((dlci->adaption == 1) ? 0 : 1);
+
+ len = kfifo_len(&dlci->fifo);
+ if (len == 0)
+ return 0;
+
+ /* MTU/MRU count only the data bits but watch adaption mode */
+ if ((len + h) > gsm->mtu)
+ len = gsm->mtu - h;
+
+ size = len + h;
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ if (!msg)
+ return -ENOMEM;
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructured with modem bits.
+ * Always one byte as we never send inline break data
+ */
+ *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
+ break;
+ default:
+ pr_err("%s: unsupported adaption %d\n", __func__,
+ dlci->adaption);
+ break;
}
+
+ WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len,
+ &dlci->lock));
+
+ /* Notify upper layer about available send space. */
+ tty_port_tty_wakeup(&dlci->port);
+
+ __gsm_data_queue(dlci, msg);
/* Bytes of data we used up */
- return total_size;
+ return size;
}
/**
@@ -908,9 +1138,6 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
size = len + overhead;
msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
-
- /* FIXME: need a timer or something to kick this so it can't
- get stuck with no work outstanding and no buffer free */
if (msg == NULL) {
skb_queue_tail(&dlci->skb_list, dlci->skb);
dlci->skb = NULL;
@@ -1006,32 +1233,43 @@ static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
* renegotiate DLCI priorities with optional stuff. Needs optimising.
*/
-static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
{
- int len;
/* Priority ordering: We should do priority with RR of the groups */
- int i = 1;
-
- while (i < NUM_DLCI) {
- struct gsm_dlci *dlci;
+ int i, len, ret = 0;
+ bool sent;
+ struct gsm_dlci *dlci;
- if (gsm->tx_bytes > TX_THRESH_HI)
- break;
- dlci = gsm->dlci[i];
- if (dlci == NULL || dlci->constipated) {
- i++;
- continue;
+ while (gsm->tx_bytes < TX_THRESH_HI) {
+ for (sent = false, i = 1; i < NUM_DLCI; i++) {
+ dlci = gsm->dlci[i];
+ /* skip unused or blocked channel */
+ if (!dlci || dlci->constipated)
+ continue;
+ /* skip channels with invalid state */
+ if (dlci->state != DLCI_OPEN)
+ continue;
+ /* count the sent data per adaption */
+ if (dlci->adaption < 3 && !dlci->net)
+ len = gsm_dlci_data_output(gsm, dlci);
+ else
+ len = gsm_dlci_data_output_framed(gsm, dlci);
+ /* on error exit */
+ if (len < 0)
+ return ret;
+ if (len > 0) {
+ ret++;
+ sent = true;
+ /* The lower DLCs can starve the higher DLCs! */
+ break;
+ }
+ /* try next */
}
- if (dlci->adaption < 3 && !dlci->net)
- len = gsm_dlci_data_output(gsm, dlci);
- else
- len = gsm_dlci_data_output_framed(gsm, dlci);
- if (len < 0)
+ if (!sent)
break;
- /* DLCI empty - try the next */
- if (len == 0)
- i++;
- }
+ };
+
+ return ret;
}
/**
@@ -1277,7 +1515,6 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
const u8 *data, int clen)
{
u8 buf[1];
- unsigned long flags;
switch (command) {
case CMD_CLD: {
@@ -1299,9 +1536,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
gsm->constipated = false;
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
/* Kick the link in case it is idling */
- spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm, NULL);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
break;
case CMD_FCOFF:
/* Modem wants us to STFU */
@@ -1407,7 +1642,7 @@ static void gsm_control_retransmit(struct timer_list *t)
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
if (ctrl) {
- if (gsm->cretries == 0) {
+ if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) {
gsm->pending_cmd = NULL;
ctrl->error = -ETIMEDOUT;
ctrl->done = 1;
@@ -1504,25 +1739,24 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
static void gsm_dlci_close(struct gsm_dlci *dlci)
{
- unsigned long flags;
-
del_timer(&dlci->t1);
if (debug & 8)
pr_debug("DLCI %d goes closed.\n", dlci->addr);
dlci->state = DLCI_CLOSED;
+ /* Prevent us from sending data before the link is up again */
+ dlci->constipated = true;
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
- spin_lock_irqsave(&dlci->lock, flags);
- kfifo_reset(&dlci->fifo);
- spin_unlock_irqrestore(&dlci->lock, flags);
+ gsm_dlci_clear_queues(dlci->gsm, dlci);
/* Ensure that gsmtty_open() can return. */
tty_port_set_initialized(&dlci->port, 0);
wake_up_interruptible(&dlci->port.open_wait);
} else
dlci->gsm->dead = true;
- wake_up(&dlci->gsm->event);
/* A DLCI 0 close is a MUX termination so we need to kick that
back to userspace somehow */
+ gsm_dlci_data_kick(dlci);
+ wake_up(&dlci->gsm->event);
}
/**
@@ -1539,11 +1773,13 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
del_timer(&dlci->t1);
/* This will let a tty open continue */
dlci->state = DLCI_OPEN;
+ dlci->constipated = false;
if (debug & 8)
pr_debug("DLCI %d goes open.\n", dlci->addr);
/* Send current modem state */
if (dlci->addr)
gsm_modem_update(dlci, 0);
+ gsm_dlci_data_kick(dlci);
wake_up(&dlci->gsm->event);
}
@@ -1569,8 +1805,8 @@ static void gsm_dlci_t1(struct timer_list *t)
switch (dlci->state) {
case DLCI_OPENING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else if (!dlci->addr && gsm->control == (DM | PF)) {
@@ -1585,8 +1821,8 @@ static void gsm_dlci_t1(struct timer_list *t)
break;
case DLCI_CLOSING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else
@@ -1620,6 +1856,25 @@ static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
}
/**
+ * gsm_dlci_set_opening - change state to opening
+ * @dlci: DLCI to open
+ *
+ * Change internal state to wait for DLCI open from initiator side.
+ * We set off timers and responses upon reception of an SABM.
+ */
+static void gsm_dlci_set_opening(struct gsm_dlci *dlci)
+{
+ switch (dlci->state) {
+ case DLCI_CLOSED:
+ case DLCI_CLOSING:
+ dlci->state = DLCI_OPENING;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
* gsm_dlci_begin_close - start channel open procedure
* @dlci: DLCI to open
*
@@ -1728,6 +1983,30 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
}
}
+/**
+ * gsm_kick_timer - transmit if possible
+ * @t: timer contained in our gsm object
+ *
+ * Transmit data from DLCIs if the queue is empty. We can't rely on
+ * a tty wakeup except when we filled the pipe so we need to fire off
+ * new data ourselves in other cases.
+ */
+static void gsm_kick_timer(struct timer_list *t)
+{
+ struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
+ unsigned long flags;
+ int sent = 0;
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
+ if (gsm->tx_bytes < TX_THRESH_LO)
+ sent = gsm_dlci_data_sweep(gsm);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ if (sent && debug & 4)
+ pr_info("%s TX queue stalled\n", __func__);
+}
+
/*
* Allocate/Free DLCI channels
*/
@@ -1762,10 +2041,13 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
dlci->addr = addr;
dlci->adaption = gsm->adaption;
dlci->state = DLCI_CLOSED;
- if (addr)
+ if (addr) {
dlci->data = gsm_dlci_data;
- else
+ /* Prevent us from sending data before the link is up */
+ dlci->constipated = true;
+ } else {
dlci->data = gsm_dlci_command;
+ }
gsm->dlci[addr] = dlci;
return dlci;
}
@@ -1925,7 +2207,7 @@ static void gsm_queue(struct gsm_mux *gsm)
case UIH:
case UIH|PF:
if (dlci == NULL || dlci->state != DLCI_OPEN) {
- gsm_command(gsm, address, DM|PF);
+ gsm_response(gsm, address, DM|PF);
return;
}
dlci->data(dlci, gsm->buf, gsm->len);
@@ -2048,7 +2330,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
} else if ((c & ISO_IEC_646_MASK) == XOFF) {
gsm->constipated = false;
/* Kick the link in case it is idling */
- gsm_data_kick(gsm, NULL);
+ gsmld_write_trigger(gsm);
return;
}
if (c == GSM1_SOF) {
@@ -2176,18 +2458,29 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
}
/* Finish outstanding timers, making sure they are done */
+ del_timer_sync(&gsm->kick_timer);
del_timer_sync(&gsm->t2_timer);
+ /* Finish writing to ldisc */
+ flush_work(&gsm->tx_work);
+
/* Free up any link layer users and finally the control channel */
+ if (gsm->has_devices) {
+ gsm_unregister_devices(gsm_tty_driver, gsm->num);
+ gsm->has_devices = false;
+ }
for (i = NUM_DLCI - 1; i >= 0; i--)
if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);
- list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
+ list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list)
+ kfree(txq);
+ INIT_LIST_HEAD(&gsm->tx_ctrl_list);
+ list_for_each_entry_safe(txq, ntxq, &gsm->tx_data_list, list)
kfree(txq);
- INIT_LIST_HEAD(&gsm->tx_list);
+ INIT_LIST_HEAD(&gsm->tx_data_list);
}
/**
@@ -2202,8 +2495,15 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
static int gsm_activate_mux(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
+ int ret;
+
+ dlci = gsm_dlci_alloc(gsm, 0);
+ if (dlci == NULL)
+ return -ENOMEM;
+ timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ INIT_WORK(&gsm->tx_work, gsmld_write_task);
init_waitqueue_head(&gsm->event);
spin_lock_init(&gsm->control_lock);
spin_lock_init(&gsm->tx_lock);
@@ -2213,9 +2513,11 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
else
gsm->receive = gsm1_receive;
- dlci = gsm_dlci_alloc(gsm, 0);
- if (dlci == NULL)
- return -ENOMEM;
+ ret = gsm_register_devices(gsm_tty_driver, gsm->num);
+ if (ret)
+ return ret;
+
+ gsm->has_devices = true;
gsm->dead = false; /* Tty opens are now permissible */
return 0;
}
@@ -2308,7 +2610,8 @@ static struct gsm_mux *gsm_alloc_mux(void)
spin_lock_init(&gsm->lock);
mutex_init(&gsm->mutex);
kref_init(&gsm->ref);
- INIT_LIST_HEAD(&gsm->tx_list);
+ INIT_LIST_HEAD(&gsm->tx_ctrl_list);
+ INIT_LIST_HEAD(&gsm->tx_data_list);
gsm->t1 = T1;
gsm->t2 = T2;
@@ -2465,6 +2768,47 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
return gsm->tty->ops->write(gsm->tty, data, len);
}
+
+/**
+ * gsmld_write_trigger - schedule ldisc write task
+ * @gsm: our mux
+ */
+static void gsmld_write_trigger(struct gsm_mux *gsm)
+{
+ if (!gsm || !gsm->dlci[0] || gsm->dlci[0]->dead)
+ return;
+ schedule_work(&gsm->tx_work);
+}
+
+
+/**
+ * gsmld_write_task - ldisc write task
+ * @work: our tx write work
+ *
+ * Writes out data to the ldisc if possible. We are doing this here to
+ * avoid dead-locking. This returns if no space or data is left for output.
+ */
+static void gsmld_write_task(struct work_struct *work)
+{
+ struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
+ unsigned long flags;
+ int i, ret;
+
+ /* All outstanding control channel and control messages and one data
+ * frame is sent.
+ */
+ ret = -ENODEV;
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ if (gsm->tty)
+ ret = gsm_data_kick(gsm);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ if (ret >= 0)
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ tty_port_tty_wakeup(&gsm->dlci[i]->port);
+}
+
/**
* gsmld_attach_gsm - mode set up
* @tty: our tty structure
@@ -2475,39 +2819,14 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
* will need moving to an ioctl path.
*/
-static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+static void gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
- unsigned int base;
- int ret, i;
-
gsm->tty = tty_kref_get(tty);
/* Turn off tty XON/XOFF handling to handle it explicitly. */
gsm->old_c_iflag = tty->termios.c_iflag;
tty->termios.c_iflag &= (IXON | IXOFF);
- ret = gsm_activate_mux(gsm);
- if (ret != 0)
- tty_kref_put(gsm->tty);
- else {
- /* Don't register device 0 - this is the control channel and not
- a usable tty interface */
- base = mux_num_to_base(gsm); /* Base for this MUX */
- for (i = 1; i < NUM_DLCI; i++) {
- struct device *dev;
-
- dev = tty_register_device(gsm_tty_driver,
- base + i, NULL);
- if (IS_ERR(dev)) {
- for (i--; i >= 1; i--)
- tty_unregister_device(gsm_tty_driver,
- base + i);
- return PTR_ERR(dev);
- }
- }
- }
- return ret;
}
-
/**
* gsmld_detach_gsm - stop doing 0710 mux
* @tty: tty attached to the mux
@@ -2518,12 +2837,7 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
- unsigned int base = mux_num_to_base(gsm); /* Base for this MUX */
- int i;
-
WARN_ON(tty != gsm->tty);
- for (i = 1; i < NUM_DLCI; i++)
- tty_unregister_device(gsm_tty_driver, base + i);
/* Restore tty XON/XOFF handling. */
gsm->tty->termios.c_iflag = gsm->old_c_iflag;
tty_kref_put(gsm->tty);
@@ -2615,7 +2929,6 @@ static void gsmld_close(struct tty_struct *tty)
static int gsmld_open(struct tty_struct *tty)
{
struct gsm_mux *gsm;
- int ret;
if (tty->ops->write == NULL)
return -EINVAL;
@@ -2631,12 +2944,13 @@ static int gsmld_open(struct tty_struct *tty)
/* Attach the initial passive connection */
gsm->encoding = 1;
- ret = gsmld_attach_gsm(tty, gsm);
- if (ret != 0) {
- gsm_cleanup_mux(gsm, false);
- mux_put(gsm);
- }
- return ret;
+ gsmld_attach_gsm(tty, gsm);
+
+ timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ INIT_WORK(&gsm->tx_work, gsmld_write_task);
+
+ return 0;
}
/**
@@ -2651,16 +2965,9 @@ static int gsmld_open(struct tty_struct *tty)
static void gsmld_write_wakeup(struct tty_struct *tty)
{
struct gsm_mux *gsm = tty->disc_data;
- unsigned long flags;
/* Queue poll */
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm, NULL);
- if (gsm->tx_bytes < TX_THRESH_LO) {
- gsm_dlci_data_sweep(gsm);
- }
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
}
/**
@@ -2704,11 +3011,24 @@ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
- int space = tty_write_room(tty);
+ struct gsm_mux *gsm = tty->disc_data;
+ unsigned long flags;
+ int space;
+ int ret;
+
+ if (!gsm)
+ return -ENODEV;
+
+ ret = -ENOBUFS;
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ space = tty_write_room(tty);
if (space >= nr)
- return tty->ops->write(tty, buf, nr);
- set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- return -ENOBUFS;
+ ret = tty->ops->write(tty, buf, nr);
+ else
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ return ret;
}
/**
@@ -2733,12 +3053,15 @@ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
+
+ if (gsm->dead)
+ mask |= EPOLLHUP;
if (tty_hung_up_p(file))
mask |= EPOLLHUP;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= EPOLLHUP;
if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (gsm->dead)
- mask |= EPOLLHUP;
return mask;
}
@@ -3174,6 +3497,8 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
/* Start sending off SABM messages */
if (gsm->initiator)
gsm_dlci_begin_open(dlci);
+ else
+ gsm_dlci_set_opening(dlci);
/* And wait for virtual carrier */
return tty_port_block_til_ready(port, tty, filp);
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 640c9e871044..3afdd9033a9c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -118,6 +118,9 @@ struct n_tty_data {
size_t read_tail;
size_t line_start;
+ /* # of chars looked ahead (to find software flow control chars) */
+ size_t lookahead_count;
+
/* protected by output lock */
unsigned int column;
unsigned int canon_column;
@@ -333,6 +336,8 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
ldata->erasing = 0;
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->push = 0;
+
+ ldata->lookahead_count = 0;
}
static void n_tty_packet_mode_flush(struct tty_struct *tty)
@@ -1225,12 +1230,30 @@ static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
return c == START_CHAR(tty) || c == STOP_CHAR(tty);
}
-/* Returns true if c is consumed as flow-control character */
-static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+/**
+ * n_tty_receive_char_flow_ctrl - receive flow control chars
+ * @tty: terminal device
+ * @c: character
+ * @lookahead_done: lookahead has processed this character already
+ *
+ * Receive and process flow control character actions.
+ *
+ * In case lookahead for flow control chars already handled the character in
+ * advance to the normal receive, the actions are skipped during normal
+ * receive.
+ *
+ * Returns true if @c is consumed as flow-control character, the character
+ * must not be treated as normal character.
+ */
+static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
if (!n_tty_is_char_flow_ctrl(tty, c))
return false;
+ if (lookahead_done)
+ return true;
+
if (c == START_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
@@ -1242,11 +1265,12 @@ static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c
return true;
}
-static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
- if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c))
+ if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c, lookahead_done))
return;
if (L_ISIG(tty)) {
@@ -1401,7 +1425,8 @@ static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
put_tty_queue(c, ldata);
}
-static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
if (I_ISTRIP(tty))
c &= 0x7f;
@@ -1409,12 +1434,10 @@ static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
c = tolower(c);
if (I_IXON(tty)) {
- if (c == STOP_CHAR(tty))
- stop_tty(tty);
- else if (c == START_CHAR(tty) ||
- (tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) &&
- c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
- c != SUSP_CHAR(tty))) {
+ if (!n_tty_receive_char_flow_ctrl(tty, c, lookahead_done) &&
+ tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) &&
+ c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
+ c != SUSP_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
}
@@ -1457,6 +1480,27 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
n_tty_receive_char_flagged(tty, c, flag);
}
+/* Caller must ensure count > 0 */
+static void n_tty_lookahead_flow_ctrl(struct tty_struct *tty, const unsigned char *cp,
+ const unsigned char *fp, unsigned int count)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ unsigned char flag = TTY_NORMAL;
+
+ ldata->lookahead_count += count;
+
+ if (!I_IXON(tty))
+ return;
+
+ while (count--) {
+ if (fp)
+ flag = *fp++;
+ if (likely(flag == TTY_NORMAL))
+ n_tty_receive_char_flow_ctrl(tty, *cp, false);
+ cp++;
+ }
+}
+
static void
n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
const char *fp, int count)
@@ -1496,7 +1540,7 @@ n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
static void
n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+ const char *fp, int count, bool lookahead_done)
{
char flag = TTY_NORMAL;
@@ -1504,12 +1548,12 @@ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
if (fp)
flag = *fp++;
if (likely(flag == TTY_NORMAL))
- n_tty_receive_char_closing(tty, *cp++);
+ n_tty_receive_char_closing(tty, *cp++, lookahead_done);
}
}
static void n_tty_receive_buf_standard(struct tty_struct *tty,
- const unsigned char *cp, const char *fp, int count)
+ const unsigned char *cp, const char *fp, int count, bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
char flag = TTY_NORMAL;
@@ -1540,7 +1584,7 @@ static void n_tty_receive_buf_standard(struct tty_struct *tty,
}
if (test_bit(c, ldata->char_map))
- n_tty_receive_char_special(tty, c);
+ n_tty_receive_char_special(tty, c, lookahead_done);
else
n_tty_receive_char(tty, c);
}
@@ -1551,21 +1595,30 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
{
struct n_tty_data *ldata = tty->disc_data;
bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty));
+ size_t la_count = min_t(size_t, ldata->lookahead_count, count);
if (ldata->real_raw)
n_tty_receive_buf_real_raw(tty, cp, fp, count);
else if (ldata->raw || (L_EXTPROC(tty) && !preops))
n_tty_receive_buf_raw(tty, cp, fp, count);
- else if (tty->closing && !L_EXTPROC(tty))
- n_tty_receive_buf_closing(tty, cp, fp, count);
- else {
- n_tty_receive_buf_standard(tty, cp, fp, count);
+ else if (tty->closing && !L_EXTPROC(tty)) {
+ if (la_count > 0)
+ n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
+ if (count > la_count)
+ n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
+ } else {
+ if (la_count > 0)
+ n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
+ if (count > la_count)
+ n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
flush_echoes(tty);
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
+ ldata->lookahead_count -= la_count;
+
if (ldata->icanon && !L_EXTPROC(tty))
return;
@@ -2446,6 +2499,7 @@ static struct tty_ldisc_ops n_tty_ops = {
.receive_buf = n_tty_receive_buf,
.write_wakeup = n_tty_write_wakeup,
.receive_buf2 = n_tty_receive_buf2,
+ .lookahead_buf = n_tty_lookahead_flow_ctrl,
};
/**
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 696030cfcb09..287153d32536 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -123,6 +123,26 @@ static inline void serial_out(struct uart_8250_port *up, int offset, int value)
up->port.serial_out(&up->port, offset, value);
}
+/**
+ * serial_lsr_in - Read LSR register and preserve flags across reads
+ * @up: uart 8250 port
+ *
+ * Read LSR register and handle saving non-preserved flags across reads.
+ * The flags that are not preserved across reads are stored into
+ * up->lsr_saved_flags.
+ *
+ * Returns LSR value or'ed with the preserved flags (if any).
+ */
+static inline u16 serial_lsr_in(struct uart_8250_port *up)
+{
+ u16 lsr = up->lsr_saved_flags;
+
+ lsr |= serial_in(up, UART_LSR);
+ up->lsr_saved_flags = lsr & up->lsr_save_mask;
+
+ return lsr;
+}
+
/*
* For the 16C950
*/
@@ -183,10 +203,12 @@ void serial8250_rpm_put(struct uart_8250_port *p);
void serial8250_rpm_get_tx(struct uart_8250_port *p);
void serial8250_rpm_put_tx(struct uart_8250_port *p);
-int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485);
+int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485);
void serial8250_em485_start_tx(struct uart_8250_port *p);
void serial8250_em485_stop_tx(struct uart_8250_port *p);
void serial8250_em485_destroy(struct uart_8250_port *p);
+extern struct serial_rs485 serial8250_em485_supported;
/* MCR <-> TIOCM conversion */
static inline int serial8250_TIOCM_to_MCR(int tiocm)
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 2a1226a78a0c..15a2387a5b25 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -108,6 +108,7 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE |
UPF_SKIP_TEST | UPF_IOREMAP;
up.port.rs485_config = serial8250_em485_config;
+ up.port.rs485_supported = serial8250_em485_supported;
up.rs485_start_tx = bcm2835aux_rs485_start_tx;
up.rs485_stop_tx = bcm2835aux_rs485_stop_tx;
@@ -166,8 +167,10 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
uartclk = clk_get_rate(data->clk);
if (!uartclk) {
ret = device_property_read_u32(&pdev->dev, "clock-frequency", &uartclk);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "could not get clk rate\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "could not get clk rate\n");
+ goto dis_clk;
+ }
}
/* the HW-clock divider for bcm2835aux is 8,
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 9b878d023dac..8efdc271eb75 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1139,16 +1139,19 @@ static int __maybe_unused brcmuart_suspend(struct device *dev)
struct brcmuart_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
struct uart_port *port = &up->port;
-
- serial8250_suspend_port(priv->line);
- clk_disable_unprepare(priv->baud_mux_clk);
+ unsigned long flags;
/*
* This will prevent resume from enabling RTS before the
- * baud rate has been resored.
+ * baud rate has been restored.
*/
+ spin_lock_irqsave(&port->lock, flags);
priv->saved_mctrl = port->mctrl;
- port->mctrl = 0;
+ port->mctrl &= ~TIOCM_RTS;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ serial8250_suspend_port(priv->line);
+ clk_disable_unprepare(priv->baud_mux_clk);
return 0;
}
@@ -1158,6 +1161,7 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
struct brcmuart_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
struct uart_port *port = &up->port;
+ unsigned long flags;
int ret;
ret = clk_prepare_enable(priv->baud_mux_clk);
@@ -1180,7 +1184,15 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
start_rx_dma(serial8250_get_port(priv->line));
}
serial8250_resume_port(priv->line);
- port->mctrl = priv->saved_mctrl;
+
+ if (priv->saved_mctrl & TIOCM_RTS) {
+ /* Restore RTS */
+ spin_lock_irqsave(&port->lock, flags);
+ port->mctrl |= TIOCM_RTS;
+ port->ops->set_mctrl(port, port->mctrl);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 3f56dbc9432b..2e83e7367441 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -277,8 +277,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
* the "Diva" UART used on the management processor on many HP
* ia64 and parisc boxes.
*/
- lsr = serial_in(up, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ lsr = serial_lsr_in(up);
if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
(!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
(lsr & UART_LSR_THRE)) {
@@ -1008,9 +1007,11 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
uart->port.throttle = up->port.throttle;
uart->port.unthrottle = up->port.unthrottle;
uart->port.rs485_config = up->port.rs485_config;
+ uart->port.rs485_supported = up->port.rs485_supported;
uart->port.rs485 = up->port.rs485;
uart->rs485_start_tx = up->rs485_start_tx;
uart->rs485_stop_tx = up->rs485_stop_tx;
+ uart->lsr_save_mask = up->lsr_save_mask;
uart->dma = up->dma;
/* Take tx_loadsz from fifosize if it wasn't set separately */
@@ -1098,6 +1099,9 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
ret = 0;
}
+ if (!uart->lsr_save_mask)
+ uart->lsr_save_mask = LSR_SAVE_FLAGS; /* Use default LSR mask */
+
/* Initialise interrupt backoff work if required */
if (up->overrun_backoff_time_ms > 0) {
uart->overrun_backoff_time_ms =
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index bb6aca07ab56..a604b42e4458 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -9,26 +9,27 @@
* LCR is written whilst busy. If it is, then a busy detect interrupt is
* raised, the LCR needs to be rewritten and the uart status register read.
*/
+#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/serial_8250.h>
-#include <linux/serial_reg.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
-#include <linux/workqueue.h>
-#include <linux/notifier.h>
-#include <linux/slab.h>
-#include <linux/acpi.h>
-#include <linux/clk.h>
#include <linux/reset.h>
-#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+
#include "8250_dwlib.h"
/* Offsets for the DesignWare specific registers */
@@ -82,8 +83,21 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
static void dw8250_force_idle(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
+ unsigned int lsr;
serial8250_clear_and_reinit_fifos(up);
+
+ /*
+ * With PSLVERR_RESP_EN parameter set to 1, the device generates an
+ * error response when an attempt to read an empty RBR with FIFO
+ * enabled.
+ */
+ if (up->fcr & UART_FCR_ENABLE_FIFO) {
+ lsr = p->serial_in(p, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ return;
+ }
+
(void)p->serial_in(p, UART_RX);
}
@@ -122,12 +136,15 @@ static void dw8250_check_lcr(struct uart_port *p, int value)
/* Returns once the transmitter is empty or we run out of retries */
static void dw8250_tx_wait_empty(struct uart_port *p)
{
+ struct uart_8250_port *up = up_to_u8250p(p);
unsigned int tries = 20000;
unsigned int delay_threshold = tries - 1000;
unsigned int lsr;
while (tries--) {
lsr = readb (p->membase + (UART_LSR << p->regshift));
+ up->lsr_saved_flags |= lsr & up->lsr_save_mask;
+
if (lsr & UART_LSR_TEMT)
break;
@@ -140,29 +157,23 @@ static void dw8250_tx_wait_empty(struct uart_port *p)
}
}
-static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
+static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = to_dw8250_data(p->private_data);
- /* Allow the TX to drain before we reconfigure */
- if (offset == UART_LCR)
- dw8250_tx_wait_empty(p);
-
writeb(value, p->membase + (offset << p->regshift));
if (offset == UART_LCR && !d->uart_16550_compatible)
dw8250_check_lcr(p, value);
}
-
-static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = to_dw8250_data(p->private_data);
-
- writeb(value, p->membase + (offset << p->regshift));
+ /* Allow the TX to drain before we reconfigure */
+ if (offset == UART_LCR)
+ dw8250_tx_wait_empty(p);
- if (offset == UART_LCR && !d->uart_16550_compatible)
- dw8250_check_lcr(p, value);
+ dw8250_serial_out(p, offset, value);
}
static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
@@ -253,7 +264,7 @@ static int dw8250_handle_irq(struct uart_port *p)
*/
if (!up->dma && rx_timeout) {
spin_lock_irqsave(&p->lock, flags);
- status = p->serial_in(p, UART_LSR);
+ status = serial_lsr_in(up);
if (!(status & (UART_LSR_DR | UART_LSR_BI)))
(void) p->serial_in(p, UART_RX);
@@ -263,7 +274,10 @@ static int dw8250_handle_irq(struct uart_port *p)
/* Manually stop the Rx DMA transfer when acting as flow controller */
if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
- status = p->serial_in(p, UART_LSR);
+ spin_lock_irqsave(&p->lock, flags);
+ status = serial_lsr_in(up);
+ spin_unlock_irqrestore(&p->lock, flags);
+
if (status & (UART_LSR_DR | UART_LSR_BI)) {
dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
dw8250_writel_ext(p, DW_UART_DMASA, 1);
@@ -688,7 +702,6 @@ static int dw8250_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -706,9 +719,7 @@ static int dw8250_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
static int dw8250_runtime_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -730,11 +741,10 @@ static int dw8250_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops dw8250_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dw8250_suspend, dw8250_resume)
- SET_RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dw8250_suspend, dw8250_resume)
+ RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
};
static const struct dw8250_platform_data dw8250_dw_apb = {
@@ -792,7 +802,7 @@ MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
static struct platform_driver dw8250_platform_driver = {
.driver = {
.name = "dw-apb-uart",
- .pm = &dw8250_pm_ops,
+ .pm = pm_ptr(&dw8250_pm_ops),
.of_match_table = dw8250_of_match,
.acpi_match_table = dw8250_acpi_match,
},
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index fbabfdd8c7b8..dbe4d44f60d4 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -3,8 +3,10 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/property.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
@@ -16,9 +18,18 @@
#define DW_UART_DE_EN 0xb0 /* Driver Output Enable Register */
#define DW_UART_RE_EN 0xb4 /* Receiver Output Enable Register */
#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */
+#define DW_UART_RAR 0xc4 /* Receive Address Register */
+#define DW_UART_TAR 0xc8 /* Transmit Address Register */
+#define DW_UART_LCR_EXT 0xcc /* Line Extended Control Register */
#define DW_UART_CPR 0xf4 /* Component Parameter Register */
#define DW_UART_UCV 0xf8 /* UART Component Version */
+/* Receive / Transmit Address Register bits */
+#define DW_UART_ADDR_MASK GENMASK(7, 0)
+
+/* Line Status Register bits */
+#define DW_UART_LSR_ADDR_RCVD BIT(8)
+
/* Transceiver Control Register bits */
#define DW_UART_TCR_RS485_EN BIT(0)
#define DW_UART_TCR_RE_POL BIT(1)
@@ -28,22 +39,28 @@
#define DW_UART_TCR_XFER_MODE_SW_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 1)
#define DW_UART_TCR_XFER_MODE_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 2)
+/* Line Extended Control Register bits */
+#define DW_UART_LCR_EXT_DLS_E BIT(0)
+#define DW_UART_LCR_EXT_ADDR_MATCH BIT(1)
+#define DW_UART_LCR_EXT_SEND_ADDR BIT(2)
+#define DW_UART_LCR_EXT_TRANSMIT_MODE BIT(3)
+
/* Component Parameter Register bits */
-#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
-#define DW_UART_CPR_AFCE_MODE (1 << 4)
-#define DW_UART_CPR_THRE_MODE (1 << 5)
-#define DW_UART_CPR_SIR_MODE (1 << 6)
-#define DW_UART_CPR_SIR_LP_MODE (1 << 7)
-#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8)
-#define DW_UART_CPR_FIFO_ACCESS (1 << 9)
-#define DW_UART_CPR_FIFO_STAT (1 << 10)
-#define DW_UART_CPR_SHADOW (1 << 11)
-#define DW_UART_CPR_ENCODED_PARMS (1 << 12)
-#define DW_UART_CPR_DMA_EXTRA (1 << 13)
-#define DW_UART_CPR_FIFO_MODE (0xff << 16)
+#define DW_UART_CPR_ABP_DATA_WIDTH GENMASK(1, 0)
+#define DW_UART_CPR_AFCE_MODE BIT(4)
+#define DW_UART_CPR_THRE_MODE BIT(5)
+#define DW_UART_CPR_SIR_MODE BIT(6)
+#define DW_UART_CPR_SIR_LP_MODE BIT(7)
+#define DW_UART_CPR_ADDITIONAL_FEATURES BIT(8)
+#define DW_UART_CPR_FIFO_ACCESS BIT(9)
+#define DW_UART_CPR_FIFO_STAT BIT(10)
+#define DW_UART_CPR_SHADOW BIT(11)
+#define DW_UART_CPR_ENCODED_PARMS BIT(12)
+#define DW_UART_CPR_DMA_EXTRA BIT(13)
+#define DW_UART_CPR_FIFO_MODE GENMASK(23, 16)
/* Helper for FIFO size calculation */
-#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
+#define DW_UART_CPR_FIFO_SIZE(a) (FIELD_GET(DW_UART_CPR_FIFO_MODE, (a)) * 16)
/*
* divisor = div(I) + div(F)
@@ -82,10 +99,85 @@ void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct
p->status |= UPSTAT_AUTOCTS;
serial8250_do_set_termios(p, termios, old);
+
+ /* Filter addresses which have 9th bit set */
+ p->ignore_status_mask |= DW_UART_LSR_ADDR_RCVD;
+ p->read_status_mask |= DW_UART_LSR_ADDR_RCVD;
}
EXPORT_SYMBOL_GPL(dw8250_do_set_termios);
-static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
+/*
+ * Wait until re is de-asserted for sure. An ongoing receive will keep
+ * re asserted until end of frame. Without BUSY indication available,
+ * only available course of action is to wait for the time it takes to
+ * receive one frame (there might nothing to receive but w/o BUSY the
+ * driver cannot know).
+ */
+static void dw8250_wait_re_deassert(struct uart_port *p)
+{
+ ndelay(p->frame_time);
+}
+
+static void dw8250_update_rar(struct uart_port *p, u32 addr)
+{
+ u32 re_en = dw8250_readl_ext(p, DW_UART_RE_EN);
+
+ /*
+ * RAR shouldn't be changed while receiving. Thus, de-assert RE_EN
+ * if asserted and wait.
+ */
+ if (re_en)
+ dw8250_writel_ext(p, DW_UART_RE_EN, 0);
+ dw8250_wait_re_deassert(p);
+ dw8250_writel_ext(p, DW_UART_RAR, addr);
+ if (re_en)
+ dw8250_writel_ext(p, DW_UART_RE_EN, re_en);
+}
+
+static void dw8250_rs485_set_addr(struct uart_port *p, struct serial_rs485 *rs485,
+ struct ktermios *termios)
+{
+ u32 lcr = dw8250_readl_ext(p, DW_UART_LCR_EXT);
+
+ if (rs485->flags & SER_RS485_ADDRB) {
+ lcr |= DW_UART_LCR_EXT_DLS_E;
+ if (termios)
+ termios->c_cflag |= ADDRB;
+
+ if (rs485->flags & SER_RS485_ADDR_RECV) {
+ u32 delta = p->rs485.flags ^ rs485->flags;
+
+ /*
+ * rs485 (param) is equal to uart_port's rs485 only during init
+ * (during init, delta is not yet applicable).
+ */
+ if (unlikely(&p->rs485 == rs485))
+ delta = rs485->flags;
+
+ if ((delta & SER_RS485_ADDR_RECV) ||
+ (p->rs485.addr_recv != rs485->addr_recv))
+ dw8250_update_rar(p, rs485->addr_recv);
+ lcr |= DW_UART_LCR_EXT_ADDR_MATCH;
+ } else {
+ lcr &= ~DW_UART_LCR_EXT_ADDR_MATCH;
+ }
+ if (rs485->flags & SER_RS485_ADDR_DEST) {
+ /*
+ * Don't skip writes here as another endpoint could
+ * have changed communication line's destination
+ * address in between.
+ */
+ dw8250_writel_ext(p, DW_UART_TAR, rs485->addr_dest);
+ lcr |= DW_UART_LCR_EXT_SEND_ADDR;
+ }
+ } else {
+ lcr = 0;
+ }
+ dw8250_writel_ext(p, DW_UART_LCR_EXT, lcr);
+}
+
+static int dw8250_rs485_config(struct uart_port *p, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
u32 tcr;
@@ -93,25 +185,17 @@ static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
tcr &= ~DW_UART_TCR_XFER_MODE;
if (rs485->flags & SER_RS485_ENABLED) {
- /* Clear unsupported flags. */
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RX_DURING_TX |
- SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND;
tcr |= DW_UART_TCR_RS485_EN;
- if (rs485->flags & SER_RS485_RX_DURING_TX) {
+ if (rs485->flags & SER_RS485_RX_DURING_TX)
tcr |= DW_UART_TCR_XFER_MODE_DE_DURING_RE;
- } else {
- /* HW does not support same DE level for tx and rx */
- if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
- return -EINVAL;
-
+ else
tcr |= DW_UART_TCR_XFER_MODE_DE_OR_RE;
- }
dw8250_writel_ext(p, DW_UART_DE_EN, 1);
dw8250_writel_ext(p, DW_UART_RE_EN, 1);
} else {
- rs485->flags = 0;
+ if (termios)
+ termios->c_cflag &= ~ADDRB;
tcr &= ~DW_UART_TCR_RS485_EN;
}
@@ -127,10 +211,9 @@ static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
dw8250_writel_ext(p, DW_UART_TCR, tcr);
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
-
- p->rs485 = *rs485;
+ /* Addressing mode can only be set up after TCR */
+ if (rs485->flags & SER_RS485_ENABLED)
+ dw8250_rs485_set_addr(p, rs485, termios);
return 0;
}
@@ -149,6 +232,12 @@ static bool dw8250_detect_rs485_hw(struct uart_port *p)
return reg;
}
+static const struct serial_rs485 dw8250_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND | SER_RS485_ADDRB | SER_RS485_ADDR_RECV |
+ SER_RS485_ADDR_DEST,
+};
+
void dw8250_setup_port(struct uart_port *p)
{
struct dw8250_port_data *pd = p->private_data;
@@ -159,8 +248,11 @@ void dw8250_setup_port(struct uart_port *p)
pd->hw_rs485_support = dw8250_detect_rs485_hw(p);
if (pd->hw_rs485_support) {
p->rs485_config = dw8250_rs485_config;
+ up->lsr_save_mask = LSR_SAVE_FLAGS | DW_UART_LSR_ADDR_RCVD;
+ p->rs485_supported = dw8250_rs485_supported;
} else {
p->rs485_config = serial8250_em485_config;
+ p->rs485_supported = serial8250_em485_supported;
up->rs485_start_tx = serial8250_em485_start_tx;
up->rs485_stop_tx = serial8250_em485_stop_tx;
}
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index e52585064565..f271becfc46c 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -84,8 +84,6 @@ static void serial8250_early_out(struct uart_port *port, int offset, int value)
}
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
static void serial_putc(struct uart_port *port, unsigned char c)
{
unsigned int status;
@@ -94,7 +92,7 @@ static void serial_putc(struct uart_port *port, unsigned char c)
for (;;) {
status = serial8250_early_in(port, UART_LSR);
- if ((status & BOTH_EMPTY) == BOTH_EMPTY)
+ if (uart_lsr_tx_empty(status))
break;
cpu_relax();
}
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 7292917ac878..314a05e009df 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -112,7 +112,9 @@
struct exar8250;
struct exar8250_platform {
- int (*rs485_config)(struct uart_port *, struct serial_rs485 *);
+ int (*rs485_config)(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485);
+ const struct serial_rs485 *rs485_supported;
int (*register_gpio)(struct pci_dev *, struct uart_8250_port *);
void (*unregister_gpio)(struct uart_8250_port *);
};
@@ -194,11 +196,11 @@ static int xr17v35x_startup(struct uart_port *port)
static void exar_shutdown(struct uart_port *port)
{
- unsigned char lsr;
bool tx_complete = false;
struct uart_8250_port *up = up_to_u8250p(port);
struct circ_buf *xmit = &port->state->xmit;
int i = 0;
+ u16 lsr;
do {
lsr = serial_in(up, UART_LSR);
@@ -408,7 +410,7 @@ static void xr17v35x_unregister_gpio(struct uart_8250_port *port)
port->port.private_data = NULL;
}
-static int generic_rs485_config(struct uart_port *port,
+static int generic_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED);
@@ -426,18 +428,21 @@ static int generic_rs485_config(struct uart_port *port,
if (is_rs485)
writeb(UART_EXAR_RS485_DLY(4), p + UART_MSR);
- port->rs485 = *rs485;
-
return 0;
}
+static const struct serial_rs485 generic_rs485_supported = {
+ .flags = SER_RS485_ENABLED,
+};
+
static const struct exar8250_platform exar8250_default_platform = {
.register_gpio = xr17v35x_register_gpio,
.unregister_gpio = xr17v35x_unregister_gpio,
.rs485_config = generic_rs485_config,
+ .rs485_supported = &generic_rs485_supported,
};
-static int iot2040_rs485_config(struct uart_port *port,
+static int iot2040_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED);
@@ -467,9 +472,13 @@ static int iot2040_rs485_config(struct uart_port *port,
value |= mode;
writeb(value, p + UART_EXAR_MPIOLVL_7_0);
- return generic_rs485_config(port, rs485);
+ return generic_rs485_config(port, termios, rs485);
}
+static const struct serial_rs485 iot2040_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
+};
+
static const struct property_entry iot2040_gpio_properties[] = {
PROPERTY_ENTRY_U32("exar,first-pin", 10),
PROPERTY_ENTRY_U32("ngpios", 1),
@@ -498,6 +507,7 @@ static int iot2040_register_gpio(struct pci_dev *pcidev,
static const struct exar8250_platform iot2040_platform = {
.rs485_config = iot2040_rs485_config,
+ .rs485_supported = &iot2040_rs485_supported,
.register_gpio = iot2040_register_gpio,
.unregister_gpio = xr17v35x_unregister_gpio,
};
@@ -540,6 +550,7 @@ pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
port->port.uartclk = baud * 16;
port->port.rs485_config = platform->rs485_config;
+ port->port.rs485_supported = *(platform->rs485_supported);
/*
* Setup the UART clock for the devices on expansion slot to
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index dba5950b8d0e..65b6b3cbaff6 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -191,7 +191,7 @@ static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min,
return -ENODEV;
}
-static int fintek_8250_rs485_config(struct uart_port *port,
+static int fintek_8250_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
uint8_t config = 0;
@@ -206,19 +206,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
!(rs485->flags & SER_RS485_RTS_AFTER_SEND))
return -EINVAL;
- memset(rs485->padding, 0, sizeof(rs485->padding));
config |= RS485_URA;
- } else {
- memset(rs485, 0, sizeof(*rs485));
- }
-
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND;
-
- /* Only the first port supports delays */
- if (pdata->index) {
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
}
if (rs485->delay_rts_before_send) {
@@ -241,8 +229,6 @@ static int fintek_8250_rs485_config(struct uart_port *port,
sio_write_reg(pdata, RS485, config);
fintek_8250_exit_key(pdata->base_port);
- port->rs485 = *rs485;
-
return 0;
}
@@ -424,6 +410,17 @@ static int probe_setup_port(struct fintek_8250 *pdata,
return -ENODEV;
}
+/* Only the first port supports delays */
+static const struct serial_rs485 fintek_8250_rs485_supported_port0 = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
+static const struct serial_rs485 fintek_8250_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+};
+
static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
{
struct fintek_8250 *pdata = uart->port.private_data;
@@ -435,6 +432,10 @@ static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
case CHIP_ID_F81866:
case CHIP_ID_F81865:
uart->port.rs485_config = fintek_8250_rs485_config;
+ if (!pdata->index)
+ uart->port.rs485_supported = fintek_8250_rs485_supported_port0;
+ else
+ uart->port.rs485_supported = fintek_8250_rs485_supported;
break;
default: /* No RS485 Auto direction functional */
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 9c01c531349d..8aad15622a2e 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -25,8 +25,8 @@
int fsl8250_handle_irq(struct uart_port *port)
{
- unsigned char lsr, orig_lsr;
unsigned long flags;
+ u16 lsr, orig_lsr;
unsigned int iir;
struct uart_8250_port *up = up_to_u8250p(port);
@@ -77,7 +77,7 @@ int fsl8250_handle_irq(struct uart_port *port)
if ((lsr & UART_LSR_THRE) && (up->ier & UART_IER_THRI))
serial8250_tx_chars(up);
- up->lsr_saved_flags = orig_lsr;
+ up->lsr_saved_flags |= orig_lsr & UART_LSR_BI;
uart_unlock_and_check_sysrq_irqrestore(&up->port, flags);
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index cff91aa03f29..2b2f5d8d24b9 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -54,7 +54,7 @@ static void early_out(struct uart_port *port, int offset, uint8_t value)
static void ingenic_early_console_putc(struct uart_port *port, unsigned char c)
{
- uint8_t lsr;
+ u16 lsr;
do {
lsr = early_in(port, UART_LSR);
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 570e25d6f37e..6dc85aaba5d0 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -32,7 +32,7 @@ struct lpc18xx_uart_data {
int line;
};
-static int lpc18xx_rs485_config(struct uart_port *port,
+static int lpc18xx_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -40,24 +40,12 @@ static int lpc18xx_rs485_config(struct uart_port *port,
u32 rs485_dly_reg = 0;
unsigned baud_clk;
- if (rs485->flags & SER_RS485_ENABLED)
- memset(rs485->padding, 0, sizeof(rs485->padding));
- else
- memset(rs485, 0, sizeof(*rs485));
-
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND;
-
if (rs485->flags & SER_RS485_ENABLED) {
rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_NMMEN |
LPC18XX_UART_RS485CTRL_DCTRL;
- if (rs485->flags & SER_RS485_RTS_ON_SEND) {
+ if (rs485->flags & SER_RS485_RTS_ON_SEND)
rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_OINV;
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
- } else {
- rs485->flags |= SER_RS485_RTS_AFTER_SEND;
- }
}
if (rs485->delay_rts_after_send) {
@@ -73,14 +61,9 @@ static int lpc18xx_rs485_config(struct uart_port *port,
/ baud_clk;
}
- /* Delay RTS before send not supported */
- rs485->delay_rts_before_send = 0;
-
serial_out(up, LPC18XX_UART_RS485CTRL, rs485_ctrl_reg);
serial_out(up, LPC18XX_UART_RS485DLY, rs485_dly_reg);
- port->rs485 = *rs485;
-
return 0;
}
@@ -98,6 +81,12 @@ static void lpc18xx_uart_serial_out(struct uart_port *p, int offset, int value)
writel(value, p->membase + offset);
}
+static const struct serial_rs485 lpc18xx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_after_send = 1,
+ /* Delay RTS before send is not supported */
+};
+
static int lpc18xx_serial_probe(struct platform_device *pdev)
{
struct lpc18xx_uart_data *data;
@@ -168,6 +157,7 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
uart.port.uartclk = clk_get_rate(data->clk_uart);
uart.port.private_data = data;
uart.port.rs485_config = lpc18xx_rs485_config;
+ uart.port.rs485_supported = lpc18xx_rs485_supported;
uart.port.serial_out = lpc18xx_uart_serial_out;
uart.dma = &data->dma;
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 0f5af061e0b4..4ba43bef9933 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -330,7 +330,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
uart.port.irq = pci_irq_vector(pdev, 0);
uart.port.private_data = &lpss->data;
uart.port.type = PORT_16550A;
- uart.port.iotype = UPIO_MEM;
+ uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
uart.port.uartclk = lpss->board->base_baud * 16;
uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 5a699a1aa79c..1b461fba15a3 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -165,6 +165,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->dev = &ofdev->dev;
port->rs485_config = serial8250_em485_config;
+ port->rs485_supported = serial8250_em485_supported;
up->rs485_start_tx = serial8250_em485_start_tx;
up->rs485_stop_tx = serial8250_em485_stop_tx;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index ac8bfa042391..0dcecbbc3967 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1115,8 +1115,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
return omap_8250_rx_dma(up);
}
-static unsigned char omap_8250_handle_rx_dma(struct uart_8250_port *up,
- u8 iir, unsigned char status)
+static u16 omap_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir, u16 status)
{
if ((status & (UART_LSR_DR | UART_LSR_BI)) &&
(iir & UART_IIR_RDI)) {
@@ -1130,7 +1129,7 @@ static unsigned char omap_8250_handle_rx_dma(struct uart_8250_port *up,
}
static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir,
- unsigned char status)
+ u16 status)
{
/*
* Queue a new transfer if FIFO has data.
@@ -1164,7 +1163,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = up->port.private_data;
- unsigned char status;
+ u16 status;
u8 iir;
serial8250_rpm_get(up);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index a17619db7939..6f66dc2ebacc 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1553,7 +1553,7 @@ pci_brcm_trumanage_setup(struct serial_private *priv,
#define FINTEK_RTS_INVERT BIT(5)
/* We should do proper H/W transceiver setting before change to RS485 mode */
-static int pci_fintek_rs485_config(struct uart_port *port,
+static int pci_fintek_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct pci_dev *pci_dev = to_pci_dev(port->dev);
@@ -1562,16 +1562,6 @@ static int pci_fintek_rs485_config(struct uart_port *port,
pci_read_config_byte(pci_dev, 0x40 + 8 * *index + 7, &setting);
- if (!rs485)
- rs485 = &port->rs485;
- else if (rs485->flags & SER_RS485_ENABLED)
- memset(rs485->padding, 0, sizeof(rs485->padding));
- else
- memset(rs485, 0, sizeof(*rs485));
-
- /* F81504/508/512 not support RTS delay before or after send */
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable RTS H/W control mode */
setting |= FINTEK_RTS_CONTROL_BY_HW;
@@ -1583,9 +1573,6 @@ static int pci_fintek_rs485_config(struct uart_port *port,
/* RTS driving low on TX */
setting |= FINTEK_RTS_INVERT;
}
-
- rs485->delay_rts_after_send = 0;
- rs485->delay_rts_before_send = 0;
} else {
/* Disable RTS H/W control mode */
setting &= ~(FINTEK_RTS_CONTROL_BY_HW | FINTEK_RTS_INVERT);
@@ -1593,12 +1580,14 @@ static int pci_fintek_rs485_config(struct uart_port *port,
pci_write_config_byte(pci_dev, 0x40 + 8 * *index + 7, setting);
- if (rs485 != &port->rs485)
- port->rs485 = *rs485;
-
return 0;
}
+static const struct serial_rs485 pci_fintek_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND,
+ /* F81504/508/512 does not support RTS delay before or after send */
+};
+
static int pci_fintek_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1618,6 +1607,7 @@ static int pci_fintek_setup(struct serial_private *priv,
port->port.iotype = UPIO_PORT;
port->port.iobase = iobase;
port->port.rs485_config = pci_fintek_rs485_config;
+ port->port.rs485_supported = pci_fintek_rs485_supported;
data = devm_kzalloc(&pdev->dev, sizeof(u8), GFP_KERNEL);
if (!data)
@@ -1689,7 +1679,7 @@ static int pci_fintek_init(struct pci_dev *dev)
* pciserial_resume_ports()
*/
port = serial8250_get_port(priv->line[i]);
- pci_fintek_rs485_config(&port->port, NULL);
+ uart_rs485_config(&port->port);
} else {
/* First init without port data
* force init to RS232 Mode
@@ -5077,6 +5067,115 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_b2_4_115200 },
/*
+ * Brainboxes PX-101
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4005,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4019,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-235/246
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4004,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4016,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-203/PX-257
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4006,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4015,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-260/PX-701
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400A,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-310
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400E,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-313
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400C,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-320/324/PX-376/PX-387
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400B,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-335/346
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400F,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-368
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4010,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-420
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4000,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4011,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-803
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4009,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x401E,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-846
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4008,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4017,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+
+ /*
* Perle PCI-RAS cards
*/
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c
index 95ff10f25d58..b8d5b7714a9d 100644
--- a/drivers/tty/serial/8250/8250_pericom.c
+++ b/drivers/tty/serial/8250/8250_pericom.c
@@ -73,7 +73,7 @@ static void pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
struct uart_8250_port *up = up_to_u8250p(port);
int lcr = serial_port_in(port, UART_LCR);
- serial_port_out(port, UART_LCR, lcr | 0x80);
+ serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB);
serial_dl_write(up, divisor);
serial_port_out(port, 2, 16 - scr);
serial_port_out(port, UART_LCR, lcr);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 3c36a06a20b0..39b35a61958c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -50,8 +50,6 @@
#define DEBUG_AUTOCONF(fmt...) do { } while (0)
#endif
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Here we define the default xmit fifo size used for each type of UART.
*/
@@ -336,27 +334,29 @@ static void default_serial_dl_write(struct uart_8250_port *up, int value)
#ifdef CONFIG_SERIAL_8250_RT288X
+#define UART_REG_UNMAPPED -1
+
/* Au1x00/RT288x UART hardware has a weird register layout */
static const s8 au_io_in_map[8] = {
- 0, /* UART_RX */
- 2, /* UART_IER */
- 3, /* UART_IIR */
- 5, /* UART_LCR */
- 6, /* UART_MCR */
- 7, /* UART_LSR */
- 8, /* UART_MSR */
- -1, /* UART_SCR (unmapped) */
+ [UART_RX] = 0,
+ [UART_IER] = 2,
+ [UART_IIR] = 3,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+ [UART_LSR] = 7,
+ [UART_MSR] = 8,
+ [UART_SCR] = UART_REG_UNMAPPED,
};
static const s8 au_io_out_map[8] = {
- 1, /* UART_TX */
- 2, /* UART_IER */
- 4, /* UART_FCR */
- 5, /* UART_LCR */
- 6, /* UART_MCR */
- -1, /* UART_LSR (unmapped) */
- -1, /* UART_MSR (unmapped) */
- -1, /* UART_SCR (unmapped) */
+ [UART_TX] = 1,
+ [UART_IER] = 2,
+ [UART_FCR] = 4,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+ [UART_LSR] = UART_REG_UNMAPPED,
+ [UART_MSR] = UART_REG_UNMAPPED,
+ [UART_SCR] = UART_REG_UNMAPPED,
};
unsigned int au_serial_in(struct uart_port *p, int offset)
@@ -364,7 +364,7 @@ unsigned int au_serial_in(struct uart_port *p, int offset)
if (offset >= ARRAY_SIZE(au_io_in_map))
return UINT_MAX;
offset = au_io_in_map[offset];
- if (offset < 0)
+ if (offset == UART_REG_UNMAPPED)
return UINT_MAX;
return __raw_readl(p->membase + (offset << p->regshift));
}
@@ -374,7 +374,7 @@ void au_serial_out(struct uart_port *p, int offset, int value)
if (offset >= ARRAY_SIZE(au_io_out_map))
return;
offset = au_io_out_map[offset];
- if (offset < 0)
+ if (offset == UART_REG_UNMAPPED)
return;
__raw_writel(value, p->membase + (offset << p->regshift));
}
@@ -647,6 +647,14 @@ void serial8250_em485_destroy(struct uart_8250_port *p)
}
EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
+struct serial_rs485 serial8250_em485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_TERMINATE_BUS | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+EXPORT_SYMBOL_GPL(serial8250_em485_supported);
+
/**
* serial8250_em485_config() - generic ->rs485_config() callback
* @port: uart port
@@ -656,7 +664,8 @@ EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
* if the uart is incapable of driving RTS as a Transmit Enable signal in
* hardware, relying on software emulation instead.
*/
-int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
+int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -667,29 +676,12 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
}
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
- memset(rs485->padding, 0, sizeof(rs485->padding));
- port->rs485 = *rs485;
-
- gpiod_set_value(port->rs485_term_gpio,
- rs485->flags & SER_RS485_TERMINATE_BUS);
-
/*
* Both serial8250_em485_init() and serial8250_em485_destroy()
* are idempotent.
*/
- if (rs485->flags & SER_RS485_ENABLED) {
- int ret = serial8250_em485_init(up);
-
- if (ret) {
- rs485->flags &= ~SER_RS485_ENABLED;
- port->rs485.flags &= ~SER_RS485_ENABLED;
- }
- return ret;
- }
+ if (rs485->flags & SER_RS485_ENABLED)
+ return serial8250_em485_init(up);
serial8250_em485_destroy(up);
return 0;
@@ -849,7 +841,7 @@ static int size_fifo(struct uart_8250_port *up)
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
old_dl = serial_dl_read(up);
serial_dl_write(up, 0x0001);
- serial_out(up, UART_LCR, 0x03);
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
for (count = 0; count < 256; count++)
serial_out(up, UART_TX, count);
mdelay(20);/* FIXME - schedule_timeout */
@@ -1503,18 +1495,12 @@ static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
}
}
-static inline void __do_stop_tx(struct uart_8250_port *p)
-{
- if (serial8250_clear_THRI(p))
- serial8250_rpm_put_tx(p);
-}
-
static inline void __stop_tx(struct uart_8250_port *p)
{
struct uart_8250_em485 *em485 = p->em485;
if (em485) {
- unsigned char lsr = serial_in(p, UART_LSR);
+ u16 lsr = serial_lsr_in(p);
u64 stop_delay = 0;
p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
@@ -1522,7 +1508,7 @@ static inline void __stop_tx(struct uart_8250_port *p)
if (!(lsr & UART_LSR_THRE))
return;
/*
- * To provide required timeing and allow FIFO transfer,
+ * To provide required timing and allow FIFO transfer,
* __stop_tx_rs485() must be called only when both FIFO and
* shift register are empty. The device driver should either
* enable interrupt on TEMT or set UART_CAP_NOTEMT that will
@@ -1544,7 +1530,9 @@ static inline void __stop_tx(struct uart_8250_port *p)
__stop_tx_rs485(p, stop_delay);
}
- __do_stop_tx(p);
+
+ if (serial8250_clear_THRI(p))
+ serial8250_rpm_put_tx(p);
}
static void serial8250_stop_tx(struct uart_port *port)
@@ -1573,10 +1561,8 @@ static inline void __start_tx(struct uart_port *port)
if (serial8250_set_THRI(up)) {
if (up->bugs & UART_BUG_TXEN) {
- unsigned char lsr;
+ u16 lsr = serial_lsr_in(up);
- lsr = serial_in(up, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
if (lsr & UART_LSR_THRE)
serial8250_tx_chars(up);
}
@@ -1616,7 +1602,8 @@ void serial8250_em485_start_tx(struct uart_8250_port *up)
}
EXPORT_SYMBOL_GPL(serial8250_em485_start_tx);
-static inline void start_tx_rs485(struct uart_port *port)
+/* Returns false, if start_tx_timer was setup to defer TX start */
+static bool start_tx_rs485(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct uart_8250_em485 *em485 = up->em485;
@@ -1644,11 +1631,11 @@ static inline void start_tx_rs485(struct uart_port *port)
em485->active_timer = &em485->start_tx_timer;
start_hrtimer_ms(&em485->start_tx_timer,
up->port.rs485.delay_rts_before_send);
- return;
+ return false;
}
}
- __start_tx(port);
+ return true;
}
static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t)
@@ -1678,14 +1665,12 @@ static void serial8250_start_tx(struct uart_port *port)
serial8250_rpm_get_tx(up);
- if (em485 &&
- em485->active_timer == &em485->start_tx_timer)
- return;
-
- if (em485)
- start_tx_rs485(port);
- else
- __start_tx(port);
+ if (em485) {
+ if ((em485->active_timer == &em485->start_tx_timer) ||
+ !start_tx_rs485(port))
+ return;
+ }
+ __start_tx(port);
}
static void serial8250_throttle(struct uart_port *port)
@@ -1729,7 +1714,7 @@ static void serial8250_enable_ms(struct uart_port *port)
serial8250_rpm_put(up);
}
-void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
+void serial8250_read_char(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
unsigned char ch;
@@ -1792,11 +1777,13 @@ void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
EXPORT_SYMBOL_GPL(serial8250_read_char);
/*
- * serial8250_rx_chars: processes according to the passed in LSR
- * value, and returns the remaining LSR bits not handled
- * by this Rx routine.
+ * serial8250_rx_chars - Read characters. The first LSR value must be passed in.
+ *
+ * Returns LSR bits. The caller should rely only on non-Rx related LSR bits
+ * (such as THRE) because the LSR value might come from an already consumed
+ * character.
*/
-unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
+u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
int max_count = 256;
@@ -1852,7 +1839,7 @@ void serial8250_tx_chars(struct uart_8250_port *up)
if (uart_circ_empty(xmit))
break;
if ((up->capabilities & UART_CAP_HFIFO) &&
- (serial_in(up, UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
+ !uart_lsr_tx_empty(serial_in(up, UART_LSR)))
break;
/* The BCM2835 MINI UART THRE bit is really a not-full bit. */
if ((up->capabilities & UART_CAP_MINI) &&
@@ -1916,17 +1903,17 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
*/
int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
{
- unsigned char status;
struct uart_8250_port *up = up_to_u8250p(port);
bool skip_rx = false;
unsigned long flags;
+ u16 status;
if (iir & UART_IIR_NO_INT)
return 0;
spin_lock_irqsave(&port->lock, flags);
- status = serial_port_in(port, UART_LSR);
+ status = serial_lsr_in(up);
/*
* If port is stopped and there are no error conditions in the
@@ -2002,18 +1989,17 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- unsigned int lsr;
+ u16 lsr;
serial8250_rpm_get(up);
spin_lock_irqsave(&port->lock, flags);
- lsr = serial_port_in(port, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ lsr = serial_lsr_in(up);
spin_unlock_irqrestore(&port->lock, flags);
serial8250_rpm_put(up);
- return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+ return uart_lsr_tx_empty(lsr) ? TIOCSER_TEMT : 0;
}
unsigned int serial8250_do_get_mctrl(struct uart_port *port)
@@ -2084,9 +2070,7 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits)
/* Wait up to 10ms for the character(s) to be sent. */
for (;;) {
- status = serial_in(up, UART_LSR);
-
- up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+ status = serial_lsr_in(up);
if ((status & bits) == bits)
break;
@@ -2128,8 +2112,8 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
static int serial8250_get_poll_char(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
- unsigned char lsr;
int status;
+ u16 lsr;
serial8250_rpm_get(up);
@@ -2163,7 +2147,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
else
serial_port_out(port, UART_IER, 0);
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
/*
* Send the character out.
*/
@@ -2173,7 +2157,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
serial_port_out(port, UART_IER, ier);
serial8250_rpm_put(up);
}
@@ -2184,8 +2168,9 @@ int serial8250_do_startup(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- unsigned char lsr, iir;
+ unsigned char iir;
int retval;
+ u16 lsr;
if (!port->fifosize)
port->fifosize = uart_config[port->type].fifo_size;
@@ -2810,7 +2795,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
port->read_status_mask |= UART_LSR_BI;
/*
- * Characteres to ignore
+ * Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
@@ -3203,7 +3188,7 @@ static void serial8250_config_port(struct uart_port *port, int flags)
autoconfig(up);
if (port->rs485.flags & SER_RS485_ENABLED)
- port->rs485_config(port, &port->rs485);
+ uart_rs485_config(port);
/* if access method is AU, it is a 16550 with a quirk */
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
@@ -3445,7 +3430,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
if (em485) {
mdelay(port->rs485.delay_rts_after_send);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index fdb6c4188695..d0b49e15fbf5 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -254,6 +254,7 @@ config SERIAL_8250_ASPEED_VUART
depends on SERIAL_8250
depends on OF
depends on REGMAP && MFD_SYSCON
+ depends on ARCH_ASPEED || COMPILE_TEST
help
If you want to use the virtual UART (VUART) device on Aspeed
BMC platforms, enable this option. This enables the 16550A-
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 7172cd1792df..877173907c53 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -324,6 +324,7 @@ config SERIAL_MAX310X
depends on SPI_MASTER
select SERIAL_CORE
select REGMAP_SPI if SPI_MASTER
+ select REGMAP_I2C if I2C
help
This selects support for an advanced UART from Maxim (Dallas).
Supported ICs are MAX3107, MAX3108, MAX3109, MAX14830.
@@ -889,23 +890,6 @@ config SERIAL_TXX9_STDSERIAL
bool "TX39XX/49XX SIO act as standard serial"
depends on !SERIAL_8250 && SERIAL_TXX9
-config SERIAL_VR41XX
- tristate "NEC VR4100 series Serial Interface Unit support"
- depends on CPU_VR41XX
- select SERIAL_CORE
- help
- If you have a NEC VR4100 series processor and you want to use
- Serial Interface Unit(SIU) or Debug Serial Interface Unit(DSIU)
- (not include VR4111/VR4121 DSIU), say Y. Otherwise, say N.
-
-config SERIAL_VR41XX_CONSOLE
- bool "Enable NEC VR4100 series Serial Interface Unit console"
- depends on SERIAL_VR41XX=y
- select SERIAL_CORE_CONSOLE
- help
- If you have a NEC VR4100 series processor and you want to use
- a console on a serial port, say Y. Otherwise, say N.
-
config SERIAL_JSM
tristate "Digi International NEO and Classic PCI Support"
depends on PCI
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 61cc8de95571..238a9557b487 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
-obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 16a21422ddce..15f0e4d88c5a 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2214,7 +2214,7 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
return ret;
}
-static int pl011_rs485_config(struct uart_port *port,
+static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct uart_amba_port *uap =
@@ -2700,17 +2700,12 @@ static int pl011_find_free_port(void)
static int pl011_get_rs485_mode(struct uart_amba_port *uap)
{
struct uart_port *port = &uap->port;
- struct serial_rs485 *rs485 = &port->rs485;
int ret;
ret = uart_get_rs485_mode(port);
if (ret)
return ret;
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
return 0;
}
@@ -2770,6 +2765,13 @@ static int pl011_register_port(struct uart_amba_port *uap)
return ret;
}
+static const struct serial_rs485 pl011_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
@@ -2796,6 +2798,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.irq = dev->irq[0];
uap->port.ops = &amba_pl011_pops;
uap->port.rs485_config = pl011_rs485_config;
+ uap->port.rs485_supported = pl011_rs485_supported;
snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 6269dbf93546..32caeac12985 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -580,18 +580,9 @@ static const struct uart_ops ar933x_uart_ops = {
.verify_port = ar933x_uart_verify_port,
};
-static int ar933x_config_rs485(struct uart_port *port,
+static int ar933x_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
- struct ar933x_uart_port *up =
- container_of(port, struct ar933x_uart_port, port);
-
- if ((rs485conf->flags & SER_RS485_ENABLED) &&
- !up->rts_gpiod) {
- dev_err(port->dev, "RS485 needs rts-gpio\n");
- return 1;
- }
- port->rs485 = *rs485conf;
return 0;
}
@@ -702,6 +693,11 @@ static struct uart_driver ar933x_uart_driver = {
.cons = NULL, /* filled in runtime */
};
+static const struct serial_rs485 ar933x_no_rs485 = {};
+static const struct serial_rs485 ar933x_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+};
+
static int ar933x_uart_probe(struct platform_device *pdev)
{
struct ar933x_uart_port *up;
@@ -773,6 +769,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
port->fifosize = AR933X_UART_FIFO_SIZE;
port->ops = &ar933x_uart_ops;
port->rs485_config = ar933x_config_rs485;
+ port->rs485_supported = ar933x_rs485_supported;
baud = ar933x_uart_get_baud(port->uartclk, AR933X_UART_MAX_SCALE, 1);
up->min_baud = max_t(unsigned int, baud, AR933X_UART_MIN_BAUD);
@@ -792,10 +789,12 @@ static int ar933x_uart_probe(struct platform_device *pdev)
up->rts_gpiod = mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS);
- if ((port->rs485.flags & SER_RS485_ENABLED) &&
- !up->rts_gpiod) {
- dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
- port->rs485.flags &= ~SER_RS485_ENABLED;
+ if (!up->rts_gpiod) {
+ port->rs485_supported = ar933x_no_rs485;
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
+ port->rs485.flags &= ~SER_RS485_ENABLED;
+ }
}
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index dd1c7e4bd1c9..30ba9eef7b39 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,7 +166,6 @@ struct atmel_uart_port {
unsigned int fidi_min;
unsigned int fidi_max;
-#ifdef CONFIG_PM
struct {
u32 cr;
u32 mr;
@@ -177,7 +176,6 @@ struct atmel_uart_port {
u32 fmr;
u32 fimr;
} cache;
-#endif
int (*prepare_rx)(struct uart_port *port);
int (*prepare_tx)(struct uart_port *port);
@@ -285,7 +283,7 @@ static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
}
/* Enable or disable the rs485 support */
-static int atmel_config_rs485(struct uart_port *port,
+static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -2473,6 +2471,12 @@ static const struct uart_ops atmel_pops = {
#endif
};
+static const struct serial_rs485 atmel_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
/*
* Configure the port from the platform device resource info.
*/
@@ -2494,6 +2498,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
port->mapbase = mpdev->resource[0].start;
port->irq = platform_get_irq(mpdev, 0);
port->rs485_config = atmel_config_rs485;
+ port->rs485_supported = atmel_rs485_supported;
port->iso7816_config = atmel_config_iso7816;
port->membase = NULL;
@@ -2503,24 +2508,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
if (ret)
return ret;
- /* for console, the clock could already be configured */
- if (!atmel_port->clk) {
- atmel_port->clk = clk_get(&mpdev->dev, "usart");
- if (IS_ERR(atmel_port->clk)) {
- ret = PTR_ERR(atmel_port->clk);
- atmel_port->clk = NULL;
- return ret;
- }
- ret = clk_prepare_enable(atmel_port->clk);
- if (ret) {
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
- return ret;
- }
- port->uartclk = clk_get_rate(atmel_port->clk);
- clk_disable_unprepare(atmel_port->clk);
- /* only enable clock when USART is in use */
- }
+ port->uartclk = clk_get_rate(atmel_port->clk);
/*
* Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
@@ -2629,7 +2617,6 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
static int __init atmel_console_setup(struct console *co, char *options)
{
- int ret;
struct uart_port *port = &atmel_ports[co->index].uart;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int baud = 115200;
@@ -2642,10 +2629,6 @@ static int __init atmel_console_setup(struct console *co, char *options)
return -ENODEV;
}
- ret = clk_prepare_enable(atmel_ports[co->index].clk);
- if (ret)
- return ret;
-
atmel_uart_writel(port, ATMEL_US_IDR, -1);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
@@ -2711,7 +2694,6 @@ static struct uart_driver atmel_uart = {
.cons = ATMEL_CONSOLE_DEVICE,
};
-#ifdef CONFIG_PM
static bool atmel_serial_clk_will_stop(void)
{
#ifdef CONFIG_ARCH_AT91
@@ -2721,10 +2703,9 @@ static bool atmel_serial_clk_will_stop(void)
#endif
}
-static int atmel_serial_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused atmel_serial_suspend(struct device *dev)
{
- struct uart_port *port = platform_get_drvdata(pdev);
+ struct uart_port *port = dev_get_drvdata(dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (uart_console(port) && console_suspend_enabled) {
@@ -2749,14 +2730,14 @@ static int atmel_serial_suspend(struct platform_device *pdev,
}
/* we can not wake up if we're running on slow clock */
- atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
+ atmel_port->may_wakeup = device_may_wakeup(dev);
if (atmel_serial_clk_will_stop()) {
unsigned long flags;
spin_lock_irqsave(&atmel_port->lock_suspended, flags);
atmel_port->suspended = true;
spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
- device_set_wakeup_enable(&pdev->dev, 0);
+ device_set_wakeup_enable(dev, 0);
}
uart_suspend_port(&atmel_uart, port);
@@ -2764,9 +2745,9 @@ static int atmel_serial_suspend(struct platform_device *pdev,
return 0;
}
-static int atmel_serial_resume(struct platform_device *pdev)
+static int __maybe_unused atmel_serial_resume(struct device *dev)
{
- struct uart_port *port = platform_get_drvdata(pdev);
+ struct uart_port *port = dev_get_drvdata(dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned long flags;
@@ -2801,14 +2782,10 @@ static int atmel_serial_resume(struct platform_device *pdev)
spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
uart_resume_port(&atmel_uart, port);
- device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
+ device_set_wakeup_enable(dev, atmel_port->may_wakeup);
return 0;
}
-#else
-#define atmel_serial_suspend NULL
-#define atmel_serial_resume NULL
-#endif
static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
@@ -2897,14 +2874,23 @@ static int atmel_serial_probe(struct platform_device *pdev)
atomic_set(&atmel_port->tasklet_shutdown, 0);
spin_lock_init(&atmel_port->lock_suspended);
+ atmel_port->clk = devm_clk_get(&pdev->dev, "usart");
+ if (IS_ERR(atmel_port->clk)) {
+ ret = PTR_ERR(atmel_port->clk);
+ goto err;
+ }
+ ret = clk_prepare_enable(atmel_port->clk);
+ if (ret)
+ goto err;
+
ret = atmel_init_port(atmel_port, pdev);
if (ret)
- goto err_clear_bit;
+ goto err_clk_disable_unprepare;
atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
if (IS_ERR(atmel_port->gpios)) {
ret = PTR_ERR(atmel_port->gpios);
- goto err_clear_bit;
+ goto err_clk_disable_unprepare;
}
if (!atmel_use_pdc_rx(&atmel_port->uart)) {
@@ -2913,7 +2899,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
sizeof(struct atmel_uart_char),
GFP_KERNEL);
if (!data)
- goto err_alloc_ring;
+ goto err_clk_disable_unprepare;
atmel_port->rx_ring.buf = data;
}
@@ -2923,26 +2909,9 @@ static int atmel_serial_probe(struct platform_device *pdev)
if (ret)
goto err_add_port;
-#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
- if (uart_console(&atmel_port->uart)
- && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
- /*
- * The serial core enabled the clock for us, so undo
- * the clk_prepare_enable() in atmel_console_setup()
- */
- clk_disable_unprepare(atmel_port->clk);
- }
-#endif
-
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, atmel_port);
- /*
- * The peripheral clock has been disabled by atmel_init_port():
- * enable it before accessing I/O registers
- */
- clk_prepare_enable(atmel_port->clk);
-
if (rs485_enabled) {
atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
ATMEL_US_USMODE_NORMAL);
@@ -2966,12 +2935,8 @@ static int atmel_serial_probe(struct platform_device *pdev)
err_add_port:
kfree(atmel_port->rx_ring.buf);
atmel_port->rx_ring.buf = NULL;
-err_alloc_ring:
- if (!uart_console(&atmel_port->uart)) {
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
- }
-err_clear_bit:
+err_clk_disable_unprepare:
+ clk_disable_unprepare(atmel_port->clk);
clear_bit(atmel_port->uart.line, atmel_ports_in_use);
err:
return ret;
@@ -3005,21 +2970,21 @@ static int atmel_serial_remove(struct platform_device *pdev)
clear_bit(port->line, atmel_ports_in_use);
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
pdev->dev.of_node = NULL;
return ret;
}
+static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
+ atmel_serial_resume);
+
static struct platform_driver atmel_serial_driver = {
.probe = atmel_serial_probe,
.remove = atmel_serial_remove,
- .suspend = atmel_serial_suspend,
- .resume = atmel_serial_resume,
.driver = {
.name = "atmel_usart_serial",
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
+ .pm = pm_ptr(&atmel_serial_pm_ops),
},
};
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 57c70851f22a..88d08ba1ca83 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -253,6 +253,9 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
bool big_endian;
u64 addr;
+ if (early_con.flags & CON_ENABLED)
+ return -EALREADY;
+
spin_lock_init(&port->lock);
port->iotype = UPIO_MEM;
addr = of_flat_dt_translate_address(node);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 0d6e62f6bb07..f6c33cd228c8 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -274,6 +274,8 @@ struct lpuart_port {
int rx_dma_rng_buf_len;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
+ bool is_cs7; /* Set to true when character size is 7 */
+ /* and the parity is enabled */
};
struct lpuart_soc_data {
@@ -990,12 +992,12 @@ static void lpuart32_rxint(struct lpuart_port *sport)
if (sr & (UARTSTAT_PE | UARTSTAT_OR | UARTSTAT_FE)) {
if (sr & UARTSTAT_PE) {
+ sport->port.icount.parity++;
+ } else if (sr & UARTSTAT_FE) {
if (is_break)
sport->port.icount.brk++;
else
- sport->port.icount.parity++;
- } else if (sr & UARTSTAT_FE) {
- sport->port.icount.frame++;
+ sport->port.icount.frame++;
}
if (sr & UARTSTAT_OR)
@@ -1010,18 +1012,21 @@ static void lpuart32_rxint(struct lpuart_port *sport)
sr &= sport->port.read_status_mask;
if (sr & UARTSTAT_PE) {
+ flg = TTY_PARITY;
+ } else if (sr & UARTSTAT_FE) {
if (is_break)
flg = TTY_BREAK;
else
- flg = TTY_PARITY;
- } else if (sr & UARTSTAT_FE) {
- flg = TTY_FRAME;
+ flg = TTY_FRAME;
}
if (sr & UARTSTAT_OR)
flg = TTY_OVERRUN;
}
+ if (sport->is_cs7)
+ rx &= 0x7F;
+
if (tty_insert_flip_char(port, rx, flg) == 0)
sport->port.icount.buf_overrun++;
}
@@ -1107,6 +1112,17 @@ static void lpuart_handle_sysrq(struct lpuart_port *sport)
}
}
+static int lpuart_tty_insert_flip_string(struct tty_port *port,
+ unsigned char *chars, size_t size, bool is_cs7)
+{
+ int i;
+
+ if (is_cs7)
+ for (i = 0; i < size; i++)
+ chars[i] &= 0x7F;
+ return tty_insert_flip_string(port, chars, size);
+}
+
static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
{
struct tty_port *port = &sport->port.state->port;
@@ -1217,7 +1233,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
if (ring->head < ring->tail) {
count = sport->rx_sgl.length - ring->tail;
- copied = tty_insert_flip_string(port, ring->buf + ring->tail, count);
+ copied = lpuart_tty_insert_flip_string(port, ring->buf + ring->tail,
+ count, sport->is_cs7);
if (copied != count)
sport->port.icount.buf_overrun++;
ring->tail = 0;
@@ -1227,7 +1244,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
/* Finally we read data from tail to head */
if (ring->tail < ring->head) {
count = ring->head - ring->tail;
- copied = tty_insert_flip_string(port, ring->buf + ring->tail, count);
+ copied = lpuart_tty_insert_flip_string(port, ring->buf + ring->tail,
+ count, sport->is_cs7);
if (copied != count)
sport->port.icount.buf_overrun++;
/* Wrap ring->head if needed */
@@ -1355,7 +1373,7 @@ static void lpuart_dma_rx_free(struct uart_port *port)
sport->dma_rx_cookie = -EINVAL;
}
-static int lpuart_config_rs485(struct uart_port *port,
+static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct lpuart_port *sport = container_of(port,
@@ -1365,11 +1383,6 @@ static int lpuart_config_rs485(struct uart_port *port,
~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
writeb(modem, sport->port.membase + UARTMODEM);
- /* clear unsupported configurations */
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
- rs485->flags &= ~SER_RS485_RX_DURING_TX;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
modem |= UARTMODEM_TXRTSE;
@@ -1390,7 +1403,7 @@ static int lpuart_config_rs485(struct uart_port *port,
return 0;
}
-static int lpuart32_config_rs485(struct uart_port *port,
+static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct lpuart_port *sport = container_of(port,
@@ -1400,11 +1413,6 @@ static int lpuart32_config_rs485(struct uart_port *port,
& ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
lpuart32_write(&sport->port, modem, UARTMODIR);
- /* clear unsupported configurations */
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
- rs485->flags &= ~SER_RS485_RX_DURING_TX;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
modem |= UARTMODEM_TXRTSE;
@@ -2076,6 +2084,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
bd = lpuart32_read(&sport->port, UARTBAUD);
modem = lpuart32_read(&sport->port, UARTMODIR);
+ sport->is_cs7 = false;
/*
* only support CS8 and CS7, and for CS7 must enable PE.
* supported mode:
@@ -2194,6 +2203,9 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
lpuart32_write(&sport->port, ctrl, UARTCTRL);
/* restore control register */
+ if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
+ sport->is_cs7 = true;
+
if (old && sport->lpuart_dma_rx_use) {
if (!lpuart_start_rx_dma(sport))
rx_dma_timer_init(sport);
@@ -2621,6 +2633,11 @@ static struct uart_driver lpuart_reg = {
.cons = LPUART_CONSOLE,
};
+static const struct serial_rs485 lpuart_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ /* delay_rts_* and RX_DURING_TX are not supported */
+};
+
static int lpuart_probe(struct platform_device *pdev)
{
const struct lpuart_soc_data *sdata = of_device_get_match_data(&pdev->dev);
@@ -2660,6 +2677,7 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485_config = lpuart32_config_rs485;
else
sport->port.rs485_config = lpuart_config_rs485;
+ sport->port.rs485_supported = lpuart_rs485_supported;
sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->ipg_clk)) {
@@ -2717,14 +2735,7 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_get_rs485;
- if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX)
- dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
-
- if (sport->port.rs485.delay_rts_before_send ||
- sport->port.rs485.delay_rts_after_send)
- dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
-
- sport->port.rs485_config(&sport->port, &sport->port.rs485);
+ uart_rs485_config(&sport->port);
ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
DRIVER_NAME, sport);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 30edb35a6a15..522445a8f666 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1907,16 +1907,12 @@ static void imx_uart_poll_put_char(struct uart_port *port, unsigned char c)
#endif
/* called with port.lock taken and irqs off or from .probe without locking */
-static int imx_uart_rs485_config(struct uart_port *port,
+static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr2;
- /* RTS is required to control the transmitter */
- if (!sport->have_rtscts && !sport->have_rtsgpio)
- rs485conf->flags &= ~SER_RS485_ENABLED;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
/* Enable receiver if low-active RTS signal is requested */
if (sport->have_rtscts && !sport->have_rtsgpio &&
@@ -2200,6 +2196,14 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
return HRTIMER_NORESTART;
}
+static const struct serial_rs485 imx_no_rs485 = {}; /* No RS485 if no RTS */
+static const struct serial_rs485 imx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
/* Default RX DMA buffer configuration */
#define RX_DMA_PERIODS 16
#define RX_DMA_PERIOD_LEN (PAGE_SIZE / 4)
@@ -2279,6 +2283,11 @@ static int imx_uart_probe(struct platform_device *pdev)
sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE);
sport->port.ops = &imx_uart_pops;
sport->port.rs485_config = imx_uart_rs485_config;
+ /* RTS is required to control the RS485 transmitter */
+ if (sport->have_rtscts || sport->have_rtsgpio)
+ sport->port.rs485_supported = imx_rs485_supported;
+ else
+ sport->port.rs485_supported = imx_no_rs485;
sport->port.flags = UPF_BOOT_AUTOCONF;
timer_setup(&sport->timer, imx_uart_timeout, 0);
@@ -2338,7 +2347,7 @@ static int imx_uart_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"low-active RTS not possible when receiver is off, enabling receiver\n");
- imx_uart_rs485_config(&sport->port, &sport->port.rs485);
+ uart_rs485_config(&sport->port);
/* Disable interrupts before requesting them */
ucr1 = imx_uart_readl(sport, UCR1);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 79b7db8580e0..7aa37be3216a 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -342,7 +342,7 @@ static int param_set_kgdboc_var(const char *kmessage,
/*
* Configure with the new params as long as init already ran.
* Note that we can get called before init if someone loads us
- * with "modprobe kgdboc kgdboc=..." or if they happen to use the
+ * with "modprobe kgdboc kgdboc=..." or if they happen to use
* the odd syntax of "kgdboc.kgdboc=..." on the kernel command.
*/
if (configured >= 0)
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index a0b6ea52d133..ab10ca4a45b5 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
@@ -72,7 +73,8 @@
#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */
/* Extended registers */
-#define MAX310X_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
+#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
+#define MAX310X_I2C_REVID_EXTREG (0x25) /* Revision ID */
/* IRQ register bits */
#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
@@ -245,7 +247,17 @@
#define MAX14830_BRGCFG_CLKDIS_BIT (1 << 6) /* Clock Disable */
#define MAX14830_REV_ID (0xb0)
+struct max310x_if_cfg {
+ int (*extended_reg_enable)(struct device *dev, bool enable);
+
+ unsigned int rev_id_reg;
+};
+
struct max310x_devtype {
+ struct {
+ unsigned short min;
+ unsigned short max;
+ } slave_addr;
char name[9];
int nr;
u8 mode1;
@@ -258,9 +270,8 @@ struct max310x_one {
struct work_struct tx_work;
struct work_struct md_work;
struct work_struct rs_work;
+ struct regmap *regmap;
- u8 wr_header;
- u8 rd_header;
u8 rx_buf[MAX310X_FIFO_SIZE];
};
#define to_max310x_port(_port) \
@@ -268,6 +279,7 @@ struct max310x_one {
struct max310x_port {
const struct max310x_devtype *devtype;
+ const struct max310x_if_cfg *if_cfg;
struct regmap *regmap;
struct clk *clk;
#ifdef CONFIG_GPIOLIB
@@ -289,26 +301,26 @@ static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX);
static u8 max310x_port_read(struct uart_port *port, u8 reg)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
unsigned int val = 0;
- regmap_read(s->regmap, port->iobase + reg, &val);
+ regmap_read(one->regmap, reg, &val);
return val;
}
static void max310x_port_write(struct uart_port *port, u8 reg, u8 val)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
- regmap_write(s->regmap, port->iobase + reg, val);
+ regmap_write(one->regmap, reg, val);
}
static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
- regmap_update_bits(s->regmap, port->iobase + reg, mask, val);
+ regmap_update_bits(one->regmap, reg, mask, val);
}
static int max3107_detect(struct device *dev)
@@ -357,13 +369,12 @@ static int max3109_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
- MAX310X_EXTREG_ENBL);
+ ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
- regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
- regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
+ s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -388,13 +399,12 @@ static int max14830_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
- MAX310X_EXTREG_ENBL);
+ ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
- regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
- regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
+ s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -419,6 +429,10 @@ static const struct max310x_devtype max3107_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT,
.detect = max3107_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x2c,
+ .max = 0x2f,
+ },
};
static const struct max310x_devtype max3108_devtype = {
@@ -427,6 +441,10 @@ static const struct max310x_devtype max3108_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3108_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static const struct max310x_devtype max3109_devtype = {
@@ -435,6 +453,10 @@ static const struct max310x_devtype max3109_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3109_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static const struct max310x_devtype max14830_devtype = {
@@ -443,11 +465,15 @@ static const struct max310x_devtype max14830_devtype = {
.mode1 = MAX310X_MODE1_IRQSEL_BIT,
.detect = max14830_detect,
.power = max14830_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -464,7 +490,7 @@ static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
@@ -486,7 +512,7 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
static bool max310x_reg_precious(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -624,31 +650,15 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
- struct spi_transfer xfer[] = {
- {
- .tx_buf = &one->wr_header,
- .len = sizeof(one->wr_header),
- }, {
- .tx_buf = txbuf,
- .len = len,
- }
- };
- spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+
+ regmap_raw_write(one->regmap, MAX310X_THR_REG, txbuf, len);
}
static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
- struct spi_transfer xfer[] = {
- {
- .tx_buf = &one->rd_header,
- .len = sizeof(one->rd_header),
- }, {
- .rx_buf = rxbuf,
- .len = len,
- }
- };
- spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+
+ regmap_raw_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
}
static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
@@ -1026,7 +1036,7 @@ static void max310x_rs_proc(struct work_struct *ws)
MAX310X_MODE2_ECHOSUPR_BIT, mode2);
}
-static int max310x_rs485_config(struct uart_port *port,
+static int max310x_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct max310x_one *one = to_max310x_port(port);
@@ -1035,8 +1045,6 @@ static int max310x_rs485_config(struct uart_port *port,
(rs485->delay_rts_after_send > 0x0f))
return -ERANGE;
- rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX |
- SER_RS485_ENABLED;
port->rs485 = *rs485;
schedule_work(&one->rs_work);
@@ -1249,16 +1257,24 @@ static int max310x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
#endif
+static const struct serial_rs485 max310x_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int max310x_probe(struct device *dev, const struct max310x_devtype *devtype,
- struct regmap *regmap, int irq)
+ const struct max310x_if_cfg *if_cfg,
+ struct regmap *regmaps[], int irq)
{
int i, ret, fmin, fmax, freq;
struct max310x_port *s;
u32 uartclk = 0;
bool xtal;
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ for (i = 0; i < devtype->nr; i++)
+ if (IS_ERR(regmaps[i]))
+ return PTR_ERR(regmaps[i]);
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
@@ -1305,8 +1321,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
goto out_clk;
}
- s->regmap = regmap;
+ s->regmap = regmaps[0];
s->devtype = devtype;
+ s->if_cfg = if_cfg;
dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
@@ -1315,22 +1332,18 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
goto out_clk;
for (i = 0; i < devtype->nr; i++) {
- unsigned int offs = i << 5;
-
/* Reset port */
- regmap_write(s->regmap, MAX310X_MODE2_REG + offs,
+ regmap_write(regmaps[i], MAX310X_MODE2_REG,
MAX310X_MODE2_RST_BIT);
/* Clear port reset */
- regmap_write(s->regmap, MAX310X_MODE2_REG + offs, 0);
+ regmap_write(regmaps[i], MAX310X_MODE2_REG, 0);
/* Wait for port startup */
do {
- regmap_read(s->regmap,
- MAX310X_BRGDIVLSB_REG + offs, &ret);
+ regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret);
} while (ret != 0x01);
- regmap_write(s->regmap, MAX310X_MODE1_REG + offs,
- devtype->mode1);
+ regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
}
uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
@@ -1353,11 +1366,14 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
s->p[i].port.fifosize = MAX310X_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
- s->p[i].port.iobase = i * 0x20;
+ s->p[i].port.iobase = i;
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.uartclk = uartclk;
s->p[i].port.rs485_config = max310x_rs485_config;
+ s->p[i].port.rs485_supported = max310x_rs485_supported;
s->p[i].port.ops = &max310x_ops;
+ s->p[i].regmap = regmaps[i];
+
/* Disable all interrupts */
max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
/* Clear IRQ status register */
@@ -1368,10 +1384,6 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
INIT_WORK(&s->p[i].md_work, max310x_md_proc);
/* Initialize queue for changing RS485 mode */
INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
- /* Initialize SPI-transfer buffers */
- s->p[i].wr_header = (s->p[i].port.iobase + MAX310X_THR_REG) |
- MAX310X_WRITE_BIT;
- s->p[i].rd_header = (s->p[i].port.iobase + MAX310X_RHR_REG);
/* Register port */
ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
@@ -1456,16 +1468,31 @@ static struct regmap_config regcfg = {
.val_bits = 8,
.write_flag_mask = MAX310X_WRITE_BIT,
.cache_type = REGCACHE_RBTREE,
+ .max_register = MAX310X_REG_1F,
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
};
#ifdef CONFIG_SPI_MASTER
+static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
+{
+ struct max310x_port *s = dev_get_drvdata(dev);
+
+ return regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
+ enable ? MAX310X_EXTREG_ENBL : MAX310X_EXTREG_DSBL);
+}
+
+static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = {
+ .extended_reg_enable = max310x_spi_extended_reg_enable,
+ .rev_id_reg = MAX310X_SPI_REVID_EXTREG,
+};
+
static int max310x_spi_probe(struct spi_device *spi)
{
const struct max310x_devtype *devtype;
- struct regmap *regmap;
+ struct regmap *regmaps[4];
+ unsigned int i;
int ret;
/* Setup SPI bus */
@@ -1480,10 +1507,14 @@ static int max310x_spi_probe(struct spi_device *spi)
if (!devtype)
devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
- regcfg.max_register = devtype->nr * 0x20 - 1;
- regmap = devm_regmap_init_spi(spi, &regcfg);
+ for (i = 0; i < devtype->nr; i++) {
+ u8 port_mask = i * 0x20;
+ regcfg.read_flag_mask = port_mask;
+ regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT;
+ regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
+ }
- return max310x_probe(&spi->dev, devtype, regmap, spi->irq);
+ return max310x_probe(&spi->dev, devtype, &max310x_spi_if_cfg, regmaps, spi->irq);
}
static void max310x_spi_remove(struct spi_device *spi)
@@ -1512,6 +1543,97 @@ static struct spi_driver max310x_spi_driver = {
};
#endif
+#ifdef CONFIG_I2C
+static int max310x_i2c_extended_reg_enable(struct device *dev, bool enable)
+{
+ return 0;
+}
+
+static struct regmap_config regcfg_i2c = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = max310x_reg_writeable,
+ .volatile_reg = max310x_reg_volatile,
+ .precious_reg = max310x_reg_precious,
+ .max_register = MAX310X_I2C_REVID_EXTREG,
+};
+
+static const struct max310x_if_cfg max310x_i2c_if_cfg = {
+ .extended_reg_enable = max310x_i2c_extended_reg_enable,
+ .rev_id_reg = MAX310X_I2C_REVID_EXTREG,
+};
+
+static unsigned short max310x_i2c_slave_addr(unsigned short addr,
+ unsigned int nr)
+{
+ /*
+ * For MAX14830 and MAX3109, the slave address depends on what the
+ * A0 and A1 pins are tied to.
+ * See Table I2C Address Map of the datasheet.
+ * Based on that table, the following formulas were determined.
+ * UART1 - UART0 = 0x10
+ * UART2 - UART1 = 0x20 + 0x10
+ * UART3 - UART2 = 0x10
+ */
+
+ addr -= nr * 0x10;
+
+ if (nr >= 2)
+ addr -= 0x20;
+
+ return addr;
+}
+
+static int max310x_i2c_probe(struct i2c_client *client)
+{
+ const struct max310x_devtype *devtype =
+ device_get_match_data(&client->dev);
+ struct i2c_client *port_client;
+ struct regmap *regmaps[4];
+ unsigned int i;
+ u8 port_addr;
+
+ if (client->addr < devtype->slave_addr.min ||
+ client->addr > devtype->slave_addr.max)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "Slave addr 0x%x outside of range [0x%x, 0x%x]\n",
+ client->addr, devtype->slave_addr.min,
+ devtype->slave_addr.max);
+
+ regmaps[0] = devm_regmap_init_i2c(client, &regcfg_i2c);
+
+ for (i = 1; i < devtype->nr; i++) {
+ port_addr = max310x_i2c_slave_addr(client->addr, i);
+ port_client = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ port_addr);
+
+ regmaps[i] = devm_regmap_init_i2c(port_client, &regcfg_i2c);
+ }
+
+ return max310x_probe(&client->dev, devtype, &max310x_i2c_if_cfg,
+ regmaps, client->irq);
+}
+
+static int max310x_i2c_remove(struct i2c_client *client)
+{
+ max310x_remove(&client->dev);
+
+ return 0;
+}
+
+static struct i2c_driver max310x_i2c_driver = {
+ .driver = {
+ .name = MAX310X_NAME,
+ .of_match_table = max310x_dt_ids,
+ .pm = &max310x_pm_ops,
+ },
+ .probe_new = max310x_i2c_probe,
+ .remove = max310x_i2c_remove,
+};
+#endif
+
static int __init max310x_uart_init(void)
{
int ret;
@@ -1525,15 +1647,35 @@ static int __init max310x_uart_init(void)
#ifdef CONFIG_SPI_MASTER
ret = spi_register_driver(&max310x_spi_driver);
if (ret)
- uart_unregister_driver(&max310x_uart);
+ goto err_spi_register;
+#endif
+
+#ifdef CONFIG_I2C
+ ret = i2c_add_driver(&max310x_i2c_driver);
+ if (ret)
+ goto err_i2c_register;
+#endif
+
+ return 0;
+
+#ifdef CONFIG_I2C
+err_i2c_register:
+ spi_unregister_driver(&max310x_spi_driver);
#endif
+err_spi_register:
+ uart_unregister_driver(&max310x_uart);
+
return ret;
}
module_init(max310x_uart_init);
static void __exit max310x_uart_exit(void)
{
+#ifdef CONFIG_I2C
+ i2c_del_driver(&max310x_i2c_driver);
+#endif
+
#ifdef CONFIG_SPI_MASTER
spi_unregister_driver(&max310x_spi_driver);
#endif
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 2aec62b5d6c4..f4aaaadd0742 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -431,7 +431,8 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
/****************************************************************************/
/* Enable or disable the RS485 support */
-static int mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+static int mcf_config_rs485(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
unsigned char mr1, mr2;
@@ -448,11 +449,14 @@ static int mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
}
writeb(mr1, port->membase + MCFUART_UMR);
writeb(mr2, port->membase + MCFUART_UMR);
- port->rs485 = *rs485;
return 0;
}
+static const struct serial_rs485 mcf_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
+};
+
/****************************************************************************/
/*
@@ -502,6 +506,7 @@ int __init early_mcf_setup(struct mcf_platform_uart *platp)
port->uartclk = MCF_BUSCLK;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
+ port->rs485_supported = mcf_rs485_supported;
port->ops = &mcf_uart_ops;
}
@@ -629,6 +634,7 @@ static int mcf_probe(struct platform_device *pdev)
port->ops = &mcf_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
+ port->rs485_supported = mcf_rs485_supported;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MCF_CONSOLE);
uart_add_one_port(&mcf_driver, port);
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 4869c0059c98..6c8db19fd572 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -162,7 +162,7 @@ static void meson_uart_start_tx(struct uart_port *port)
ch = xmit->buf[xmit->tail];
writel(ch, port->membase + AML_UART_WFIFO);
- xmit->tail = (xmit->tail+1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index e50f069b5ebb..3f1986c89694 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1630,7 +1630,7 @@ mpc52xx_console_setup(struct console *co, char *options)
return ret;
}
- uartclk = mpc5xxx_get_bus_frequency(np);
+ uartclk = mpc5xxx_fwnode_get_bus_frequency(of_fwnode_handle(np));
if (uartclk == 0) {
pr_debug("Could not find uart clock frequency!\n");
return -EINVAL;
@@ -1747,7 +1747,7 @@ static int mpc52xx_uart_of_probe(struct platform_device *op)
/* set the uart clock to the input clock of the psc, the different
* prescalers are taken into account in the set_baudrate() methods
* of the respective chip */
- uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ uartclk = mpc5xxx_get_bus_frequency(&op->dev);
if (uartclk == 0) {
dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
return -EINVAL;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index e676ec761f18..3159889ddae1 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -29,103 +29,103 @@
#include <linux/of_device.h>
#include <linux/wait.h>
-#define UART_MR1 0x0000
-
-#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
-#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
-#define UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
-#define UART_MR1_RX_RDY_CTL BIT(7)
-#define UART_MR1_CTS_CTL BIT(6)
-
-#define UART_MR2 0x0004
-#define UART_MR2_ERROR_MODE BIT(6)
-#define UART_MR2_BITS_PER_CHAR 0x30
-#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
-#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
-#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
-#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
-#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
-#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
-#define UART_MR2_PARITY_MODE_NONE 0x0
-#define UART_MR2_PARITY_MODE_ODD 0x1
-#define UART_MR2_PARITY_MODE_EVEN 0x2
-#define UART_MR2_PARITY_MODE_SPACE 0x3
-#define UART_MR2_PARITY_MODE 0x3
-
-#define UART_CSR 0x0008
-
-#define UART_TF 0x000C
+#define MSM_UART_MR1 0x0000
+
+#define MSM_UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define MSM_UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define MSM_UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
+#define MSM_UART_MR1_RX_RDY_CTL BIT(7)
+#define MSM_UART_MR1_CTS_CTL BIT(6)
+
+#define MSM_UART_MR2 0x0004
+#define MSM_UART_MR2_ERROR_MODE BIT(6)
+#define MSM_UART_MR2_BITS_PER_CHAR 0x30
+#define MSM_UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define MSM_UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define MSM_UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define MSM_UART_MR2_PARITY_MODE_NONE 0x0
+#define MSM_UART_MR2_PARITY_MODE_ODD 0x1
+#define MSM_UART_MR2_PARITY_MODE_EVEN 0x2
+#define MSM_UART_MR2_PARITY_MODE_SPACE 0x3
+#define MSM_UART_MR2_PARITY_MODE 0x3
+
+#define MSM_UART_CSR 0x0008
+
+#define MSM_UART_TF 0x000C
#define UARTDM_TF 0x0070
-#define UART_CR 0x0010
-#define UART_CR_CMD_NULL (0 << 4)
-#define UART_CR_CMD_RESET_RX (1 << 4)
-#define UART_CR_CMD_RESET_TX (2 << 4)
-#define UART_CR_CMD_RESET_ERR (3 << 4)
-#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
-#define UART_CR_CMD_START_BREAK (5 << 4)
-#define UART_CR_CMD_STOP_BREAK (6 << 4)
-#define UART_CR_CMD_RESET_CTS (7 << 4)
-#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
-#define UART_CR_CMD_PACKET_MODE (9 << 4)
-#define UART_CR_CMD_MODE_RESET (12 << 4)
-#define UART_CR_CMD_SET_RFR (13 << 4)
-#define UART_CR_CMD_RESET_RFR (14 << 4)
-#define UART_CR_CMD_PROTECTION_EN (16 << 4)
-#define UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
-#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
-#define UART_CR_CMD_FORCE_STALE (4 << 8)
-#define UART_CR_CMD_RESET_TX_READY (3 << 8)
-#define UART_CR_TX_DISABLE BIT(3)
-#define UART_CR_TX_ENABLE BIT(2)
-#define UART_CR_RX_DISABLE BIT(1)
-#define UART_CR_RX_ENABLE BIT(0)
-#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
-
-#define UART_IMR 0x0014
-#define UART_IMR_TXLEV BIT(0)
-#define UART_IMR_RXSTALE BIT(3)
-#define UART_IMR_RXLEV BIT(4)
-#define UART_IMR_DELTA_CTS BIT(5)
-#define UART_IMR_CURRENT_CTS BIT(6)
-#define UART_IMR_RXBREAK_START BIT(10)
-
-#define UART_IPR_RXSTALE_LAST 0x20
-#define UART_IPR_STALE_LSB 0x1F
-#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
-#define UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
-
-#define UART_IPR 0x0018
-#define UART_TFWR 0x001C
-#define UART_RFWR 0x0020
-#define UART_HCR 0x0024
-
-#define UART_MREG 0x0028
-#define UART_NREG 0x002C
-#define UART_DREG 0x0030
-#define UART_MNDREG 0x0034
-#define UART_IRDA 0x0038
-#define UART_MISR_MODE 0x0040
-#define UART_MISR_RESET 0x0044
-#define UART_MISR_EXPORT 0x0048
-#define UART_MISR_VAL 0x004C
-#define UART_TEST_CTRL 0x0050
-
-#define UART_SR 0x0008
-#define UART_SR_HUNT_CHAR BIT(7)
-#define UART_SR_RX_BREAK BIT(6)
-#define UART_SR_PAR_FRAME_ERR BIT(5)
-#define UART_SR_OVERRUN BIT(4)
-#define UART_SR_TX_EMPTY BIT(3)
-#define UART_SR_TX_READY BIT(2)
-#define UART_SR_RX_FULL BIT(1)
-#define UART_SR_RX_READY BIT(0)
-
-#define UART_RF 0x000C
+#define MSM_UART_CR 0x0010
+#define MSM_UART_CR_CMD_NULL (0 << 4)
+#define MSM_UART_CR_CMD_RESET_RX (1 << 4)
+#define MSM_UART_CR_CMD_RESET_TX (2 << 4)
+#define MSM_UART_CR_CMD_RESET_ERR (3 << 4)
+#define MSM_UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define MSM_UART_CR_CMD_START_BREAK (5 << 4)
+#define MSM_UART_CR_CMD_STOP_BREAK (6 << 4)
+#define MSM_UART_CR_CMD_RESET_CTS (7 << 4)
+#define MSM_UART_CR_CMD_RESET_STALE_INT (8 << 4)
+#define MSM_UART_CR_CMD_PACKET_MODE (9 << 4)
+#define MSM_UART_CR_CMD_MODE_RESET (12 << 4)
+#define MSM_UART_CR_CMD_SET_RFR (13 << 4)
+#define MSM_UART_CR_CMD_RESET_RFR (14 << 4)
+#define MSM_UART_CR_CMD_PROTECTION_EN (16 << 4)
+#define MSM_UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
+#define MSM_UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define MSM_UART_CR_CMD_FORCE_STALE (4 << 8)
+#define MSM_UART_CR_CMD_RESET_TX_READY (3 << 8)
+#define MSM_UART_CR_TX_DISABLE BIT(3)
+#define MSM_UART_CR_TX_ENABLE BIT(2)
+#define MSM_UART_CR_RX_DISABLE BIT(1)
+#define MSM_UART_CR_RX_ENABLE BIT(0)
+#define MSM_UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
+
+#define MSM_UART_IMR 0x0014
+#define MSM_UART_IMR_TXLEV BIT(0)
+#define MSM_UART_IMR_RXSTALE BIT(3)
+#define MSM_UART_IMR_RXLEV BIT(4)
+#define MSM_UART_IMR_DELTA_CTS BIT(5)
+#define MSM_UART_IMR_CURRENT_CTS BIT(6)
+#define MSM_UART_IMR_RXBREAK_START BIT(10)
+
+#define MSM_UART_IPR_RXSTALE_LAST 0x20
+#define MSM_UART_IPR_STALE_LSB 0x1F
+#define MSM_UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+#define MSM_UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
+
+#define MSM_UART_IPR 0x0018
+#define MSM_UART_TFWR 0x001C
+#define MSM_UART_RFWR 0x0020
+#define MSM_UART_HCR 0x0024
+
+#define MSM_UART_MREG 0x0028
+#define MSM_UART_NREG 0x002C
+#define MSM_UART_DREG 0x0030
+#define MSM_UART_MNDREG 0x0034
+#define MSM_UART_IRDA 0x0038
+#define MSM_UART_MISR_MODE 0x0040
+#define MSM_UART_MISR_RESET 0x0044
+#define MSM_UART_MISR_EXPORT 0x0048
+#define MSM_UART_MISR_VAL 0x004C
+#define MSM_UART_TEST_CTRL 0x0050
+
+#define MSM_UART_SR 0x0008
+#define MSM_UART_SR_HUNT_CHAR BIT(7)
+#define MSM_UART_SR_RX_BREAK BIT(6)
+#define MSM_UART_SR_PAR_FRAME_ERR BIT(5)
+#define MSM_UART_SR_OVERRUN BIT(4)
+#define MSM_UART_SR_TX_EMPTY BIT(3)
+#define MSM_UART_SR_TX_READY BIT(2)
+#define MSM_UART_SR_RX_FULL BIT(1)
+#define MSM_UART_SR_RX_READY BIT(0)
+
+#define MSM_UART_RF 0x000C
#define UARTDM_RF 0x0070
-#define UART_MISR 0x0010
-#define UART_ISR 0x0014
-#define UART_ISR_TX_READY BIT(7)
+#define MSM_UART_MISR 0x0010
+#define MSM_UART_ISR 0x0014
+#define MSM_UART_ISR_TX_READY BIT(7)
#define UARTDM_RXFS 0x50
#define UARTDM_RXFS_BUF_SHIFT 0x7
@@ -181,7 +181,10 @@ struct msm_port {
struct msm_dma rx_dma;
};
-#define UART_TO_MSM(uart_port) container_of(uart_port, struct msm_port, uart)
+static inline struct msm_port *to_msm_port(struct uart_port *up)
+{
+ return container_of(up, struct msm_port, uart);
+}
static
void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
@@ -200,10 +203,10 @@ unsigned int msm_read(struct uart_port *port, unsigned int off)
*/
static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
{
- msm_write(port, 0x06, UART_MREG);
- msm_write(port, 0xF1, UART_NREG);
- msm_write(port, 0x0F, UART_DREG);
- msm_write(port, 0x1A, UART_MNDREG);
+ msm_write(port, 0x06, MSM_UART_MREG);
+ msm_write(port, 0xF1, MSM_UART_NREG);
+ msm_write(port, 0x0F, MSM_UART_DREG);
+ msm_write(port, 0x1A, MSM_UART_MNDREG);
port->uartclk = 1843200;
}
@@ -212,16 +215,16 @@ static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
*/
static void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
{
- msm_write(port, 0x18, UART_MREG);
- msm_write(port, 0xF6, UART_NREG);
- msm_write(port, 0x0F, UART_DREG);
- msm_write(port, 0x0A, UART_MNDREG);
+ msm_write(port, 0x18, MSM_UART_MREG);
+ msm_write(port, 0xF6, MSM_UART_NREG);
+ msm_write(port, 0x0F, MSM_UART_DREG);
+ msm_write(port, 0x0A, MSM_UART_MNDREG);
port->uartclk = 1843200;
}
static void msm_serial_set_mnd_regs(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/*
* These registers don't exist so we change the clk input rate
@@ -392,35 +395,35 @@ static inline void msm_wait_for_xmitr(struct uart_port *port)
{
unsigned int timeout = 500000;
- while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
- if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_EMPTY)) {
+ if (msm_read(port, MSM_UART_ISR) & MSM_UART_ISR_TX_READY)
break;
udelay(1);
if (!timeout--)
break;
}
- msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX_READY, MSM_UART_CR);
}
static void msm_stop_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- msm_port->imr &= ~UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_start_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->tx_dma;
/* Already started in DMA mode */
if (dma->count)
return;
- msm_port->imr |= UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_reset_dm_count(struct uart_port *port, int count)
@@ -456,8 +459,8 @@ static void msm_complete_tx_dma(void *args)
msm_write(port, val, UARTDM_DMEN);
if (msm_port->is_uartdm > UARTDM_1P3) {
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR);
}
count = dma->count - state.residue;
@@ -468,8 +471,8 @@ static void msm_complete_tx_dma(void *args)
xmit->tail &= UART_XMIT_SIZE - 1;
/* Restore "Tx FIFO below watermark" interrupt */
- msm_port->imr |= UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -516,8 +519,8 @@ static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
* Using DMA complete for Tx FIFO reload, no need for
* "Tx FIFO below watermark" one, disable it
*/
- msm_port->imr &= ~UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
dma->count = count;
@@ -559,10 +562,10 @@ static void msm_complete_rx_dma(void *args)
val &= ~dma->enable_bit;
msm_write(port, val, UARTDM_DMEN);
- if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
+ if (msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
count = msm_read(port, UARTDM_RX_TOTAL_SNAP);
@@ -584,7 +587,7 @@ static void msm_complete_rx_dma(void *args)
continue;
}
- if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
spin_unlock_irqrestore(&port->lock, flags);
@@ -638,23 +641,23 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
* Using DMA for FIFO off-load, no need for "Rx FIFO over
* watermark" or "stale" interrupts, disable them
*/
- msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_port->imr &= ~(MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE);
/*
* Well, when DMA is ADM3 engine(implied by <= UARTDM v1.3),
* we need RXSTALE to flush input DMA fifo to memory
*/
if (msm_port->is_uartdm < UARTDM_1P4)
- msm_port->imr |= UART_IMR_RXSTALE;
+ msm_port->imr |= MSM_UART_IMR_RXSTALE;
- msm_write(uart, msm_port->imr, UART_IMR);
+ msm_write(uart, msm_port->imr, MSM_UART_IMR);
dma->count = UARTDM_RX_SIZE;
dma_async_issue_pending(dma->chan);
- msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
- msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
val = msm_read(uart, UARTDM_DMEN);
val |= dma->enable_bit;
@@ -676,25 +679,25 @@ sw_mode:
* Switch from DMA to SW/FIFO mode. After clearing Rx BAM (UARTDM_DMEN),
* receiver must be reset.
*/
- msm_write(uart, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(uart, UART_CR_RX_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
+ msm_write(uart, MSM_UART_CR_RX_ENABLE, MSM_UART_CR);
- msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(uart, 0xFFFFFF, UARTDM_DMRX);
- msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
/* Re-enable RX interrupts */
- msm_port->imr |= (UART_IMR_RXLEV | UART_IMR_RXSTALE);
- msm_write(uart, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE;
+ msm_write(uart, msm_port->imr, MSM_UART_IMR);
}
static void msm_stop_rx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
- msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~(MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE);
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (dma->chan)
msm_stop_dma(port, dma);
@@ -702,10 +705,10 @@ static void msm_stop_rx(struct uart_port *port)
static void msm_enable_ms(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- msm_port->imr |= UART_IMR_DELTA_CTS;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_DELTA_CTS;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
@@ -714,20 +717,20 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
struct tty_port *tport = &port->state->port;
unsigned int sr;
int count = 0;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ if ((msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN)) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
- if (misr & UART_IMR_RXSTALE) {
+ if (misr & MSM_UART_IMR_RXSTALE) {
count = msm_read(port, UARTDM_RX_TOTAL_SNAP) -
msm_port->old_snap_state;
msm_port->old_snap_state = 0;
} else {
- count = 4 * (msm_read(port, UART_RFWR));
+ count = 4 * (msm_read(port, MSM_UART_RFWR));
msm_port->old_snap_state += count;
}
@@ -739,8 +742,8 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
unsigned char buf[4];
int sysrq, r_count, i;
- sr = msm_read(port, UART_SR);
- if ((sr & UART_SR_RX_READY) == 0) {
+ sr = msm_read(port, MSM_UART_SR);
+ if ((sr & MSM_UART_SR_RX_READY) == 0) {
msm_port->old_snap_state -= count;
break;
}
@@ -759,7 +762,7 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
continue;
}
- if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
spin_unlock(&port->lock);
@@ -773,10 +776,10 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
tty_flip_buffer_push(tport);
- if (misr & (UART_IMR_RXSTALE))
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ if (misr & (MSM_UART_IMR_RXSTALE))
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
/* Try to use DMA */
msm_start_rx_dma(msm_port);
@@ -792,25 +795,25 @@ static void msm_handle_rx(struct uart_port *port)
* Handle overrun. My understanding of the hardware is that overrun
* is not tied to the RX buffer, so we handle the case out of band.
*/
- if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ if ((msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN)) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
/* and now the main RX loop */
- while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
+ while ((sr = msm_read(port, MSM_UART_SR)) & MSM_UART_SR_RX_READY) {
unsigned int c;
char flag = TTY_NORMAL;
int sysrq;
- c = msm_read(port, UART_RF);
+ c = msm_read(port, MSM_UART_RF);
- if (sr & UART_SR_RX_BREAK) {
+ if (sr & MSM_UART_SR_RX_BREAK) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
- } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ } else if (sr & MSM_UART_SR_PAR_FRAME_ERR) {
port->icount.frame++;
} else {
port->icount.rx++;
@@ -819,9 +822,9 @@ static void msm_handle_rx(struct uart_port *port)
/* Mask conditions we're ignorning. */
sr &= port->read_status_mask;
- if (sr & UART_SR_RX_BREAK)
+ if (sr & MSM_UART_SR_RX_BREAK)
flag = TTY_BREAK;
- else if (sr & UART_SR_PAR_FRAME_ERR)
+ else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
flag = TTY_FRAME;
spin_unlock(&port->lock);
@@ -837,7 +840,7 @@ static void msm_handle_rx(struct uart_port *port)
static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
{
struct circ_buf *xmit = &port->state->xmit;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int num_chars;
unsigned int tf_pointer = 0;
void __iomem *tf;
@@ -845,7 +848,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
if (msm_port->is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
if (tx_count && msm_port->is_uartdm)
msm_reset_dm_count(port, tx_count);
@@ -854,7 +857,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
int i;
char buf[4] = { 0 };
- if (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
break;
if (msm_port->is_uartdm)
@@ -883,7 +886,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
static void msm_handle_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct circ_buf *xmit = &msm_port->uart.state->xmit;
struct msm_dma *dma = &msm_port->tx_dma;
unsigned int pio_count, dma_count, dma_min;
@@ -895,7 +898,7 @@ static void msm_handle_tx(struct uart_port *port)
if (msm_port->is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
buf[0] = port->x_char;
@@ -939,7 +942,7 @@ static void msm_handle_tx(struct uart_port *port)
static void msm_handle_delta_cts(struct uart_port *port)
{
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_CTS, MSM_UART_CR);
port->icount.cts++;
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
@@ -947,27 +950,27 @@ static void msm_handle_delta_cts(struct uart_port *port)
static irqreturn_t msm_uart_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int misr;
u32 val;
spin_lock_irqsave(&port->lock, flags);
- misr = msm_read(port, UART_MISR);
- msm_write(port, 0, UART_IMR); /* disable interrupt */
+ misr = msm_read(port, MSM_UART_MISR);
+ msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
- if (misr & UART_IMR_RXBREAK_START) {
+ if (misr & MSM_UART_IMR_RXBREAK_START) {
msm_port->break_detected = true;
- msm_write(port, UART_CR_CMD_RESET_RXBREAK_START, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RXBREAK_START, MSM_UART_CR);
}
- if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
+ if (misr & (MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE)) {
if (dma->count) {
- val = UART_CR_CMD_STALE_EVENT_DISABLE;
- msm_write(port, val, UART_CR);
- val = UART_CR_CMD_RESET_STALE_INT;
- msm_write(port, val, UART_CR);
+ val = MSM_UART_CR_CMD_STALE_EVENT_DISABLE;
+ msm_write(port, val, MSM_UART_CR);
+ val = MSM_UART_CR_CMD_RESET_STALE_INT;
+ msm_write(port, val, MSM_UART_CR);
/*
* Flush DMA input fifo to memory, this will also
* trigger DMA RX completion
@@ -979,12 +982,12 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
msm_handle_rx(port);
}
}
- if (misr & UART_IMR_TXLEV)
+ if (misr & MSM_UART_IMR_TXLEV)
msm_handle_tx(port);
- if (misr & UART_IMR_DELTA_CTS)
+ if (misr & MSM_UART_IMR_DELTA_CTS)
msm_handle_delta_cts(port);
- msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
+ msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
@@ -992,7 +995,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
static unsigned int msm_tx_empty(struct uart_port *port)
{
- return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+ return (msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
}
static unsigned int msm_get_mctrl(struct uart_port *port)
@@ -1002,19 +1005,19 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int mr;
/* reset everything */
- msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
- mr = msm_read(port, UART_MR1);
- mr &= ~UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_BREAK_INT, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_CTS, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RFR, MSM_UART_CR);
+ mr = msm_read(port, MSM_UART_MR1);
+ mr &= ~MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
/* Disable DM modes */
if (msm_port->is_uartdm)
@@ -1025,24 +1028,24 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int mr;
- mr = msm_read(port, UART_MR1);
+ mr = msm_read(port, MSM_UART_MR1);
if (!(mctrl & TIOCM_RTS)) {
- mr &= ~UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
- msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ mr &= ~MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RFR, MSM_UART_CR);
} else {
- mr |= UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
+ mr |= MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
}
}
static void msm_break_ctl(struct uart_port *port, int break_ctl)
{
if (break_ctl)
- msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_START_BREAK, MSM_UART_CR);
else
- msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STOP_BREAK, MSM_UART_CR);
}
struct msm_baud_map {
@@ -1055,7 +1058,7 @@ static const struct msm_baud_map *
msm_find_best_baud(struct uart_port *port, unsigned int baud,
unsigned long *rate)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int divisor, result;
unsigned long target, old, best_rate = 0, diff, best_diff = ULONG_MAX;
const struct msm_baud_map *entry, *end, *best;
@@ -1124,7 +1127,7 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
unsigned long *saved_flags)
{
unsigned int rxstale, watermark, mask;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
const struct msm_baud_map *entry;
unsigned long flags, rate;
@@ -1139,45 +1142,45 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
*saved_flags = flags;
port->uartclk = rate;
- msm_write(port, entry->code, UART_CSR);
+ msm_write(port, entry->code, MSM_UART_CSR);
/* RX stale watermark */
rxstale = entry->rxstale;
- watermark = UART_IPR_STALE_LSB & rxstale;
+ watermark = MSM_UART_IPR_STALE_LSB & rxstale;
if (msm_port->is_uartdm) {
- mask = UART_DM_IPR_STALE_TIMEOUT_MSB;
+ mask = MSM_UART_DM_IPR_STALE_TIMEOUT_MSB;
} else {
- watermark |= UART_IPR_RXSTALE_LAST;
- mask = UART_IPR_STALE_TIMEOUT_MSB;
+ watermark |= MSM_UART_IPR_RXSTALE_LAST;
+ mask = MSM_UART_IPR_STALE_TIMEOUT_MSB;
}
watermark |= mask & (rxstale << 2);
- msm_write(port, watermark, UART_IPR);
+ msm_write(port, watermark, MSM_UART_IPR);
/* set RX watermark */
watermark = (port->fifosize * 3) / 4;
- msm_write(port, watermark, UART_RFWR);
+ msm_write(port, watermark, MSM_UART_RFWR);
/* set TX watermark */
- msm_write(port, 10, UART_TFWR);
+ msm_write(port, 10, MSM_UART_TFWR);
- msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_PROTECTION_EN, MSM_UART_CR);
msm_reset(port);
/* Enable RX and TX */
- msm_write(port, UART_CR_TX_ENABLE | UART_CR_RX_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_TX_ENABLE | MSM_UART_CR_RX_ENABLE, MSM_UART_CR);
/* turn on RX and CTS interrupts */
- msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
- UART_IMR_CURRENT_CTS | UART_IMR_RXBREAK_START;
+ msm_port->imr = MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE |
+ MSM_UART_IMR_CURRENT_CTS | MSM_UART_IMR_RXBREAK_START;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (msm_port->is_uartdm) {
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
}
return baud;
@@ -1185,7 +1188,7 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
static void msm_init_clock(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
clk_prepare_enable(msm_port->clk);
clk_prepare_enable(msm_port->pclk);
@@ -1194,7 +1197,7 @@ static void msm_init_clock(struct uart_port *port)
static int msm_startup(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int data, rfr_level, mask;
int ret;
@@ -1209,18 +1212,18 @@ static int msm_startup(struct uart_port *port)
rfr_level = port->fifosize;
/* set automatic RFR level */
- data = msm_read(port, UART_MR1);
+ data = msm_read(port, MSM_UART_MR1);
if (msm_port->is_uartdm)
- mask = UART_DM_MR1_AUTO_RFR_LEVEL1;
+ mask = MSM_UART_DM_MR1_AUTO_RFR_LEVEL1;
else
- mask = UART_MR1_AUTO_RFR_LEVEL1;
+ mask = MSM_UART_MR1_AUTO_RFR_LEVEL1;
data &= ~mask;
- data &= ~UART_MR1_AUTO_RFR_LEVEL0;
+ data &= ~MSM_UART_MR1_AUTO_RFR_LEVEL0;
data |= mask & (rfr_level << 2);
- data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
- msm_write(port, data, UART_MR1);
+ data |= MSM_UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
+ msm_write(port, data, MSM_UART_MR1);
if (msm_port->is_uartdm) {
msm_request_tx_dma(msm_port, msm_port->uart.mapbase);
@@ -1246,10 +1249,10 @@ err_irq:
static void msm_shutdown(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
msm_port->imr = 0;
- msm_write(port, 0, UART_IMR); /* disable interrupts */
+ msm_write(port, 0, MSM_UART_IMR); /* disable interrupts */
if (msm_port->is_uartdm)
msm_release_dma(msm_port);
@@ -1262,7 +1265,7 @@ static void msm_shutdown(struct uart_port *port)
static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int baud, mr;
@@ -1279,60 +1282,60 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
tty_termios_encode_baud_rate(termios, baud, baud);
/* calculate parity */
- mr = msm_read(port, UART_MR2);
- mr &= ~UART_MR2_PARITY_MODE;
+ mr = msm_read(port, MSM_UART_MR2);
+ mr &= ~MSM_UART_MR2_PARITY_MODE;
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & PARODD)
- mr |= UART_MR2_PARITY_MODE_ODD;
+ mr |= MSM_UART_MR2_PARITY_MODE_ODD;
else if (termios->c_cflag & CMSPAR)
- mr |= UART_MR2_PARITY_MODE_SPACE;
+ mr |= MSM_UART_MR2_PARITY_MODE_SPACE;
else
- mr |= UART_MR2_PARITY_MODE_EVEN;
+ mr |= MSM_UART_MR2_PARITY_MODE_EVEN;
}
/* calculate bits per char */
- mr &= ~UART_MR2_BITS_PER_CHAR;
+ mr &= ~MSM_UART_MR2_BITS_PER_CHAR;
switch (termios->c_cflag & CSIZE) {
case CS5:
- mr |= UART_MR2_BITS_PER_CHAR_5;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_5;
break;
case CS6:
- mr |= UART_MR2_BITS_PER_CHAR_6;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_6;
break;
case CS7:
- mr |= UART_MR2_BITS_PER_CHAR_7;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_7;
break;
case CS8:
default:
- mr |= UART_MR2_BITS_PER_CHAR_8;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_8;
break;
}
/* calculate stop bits */
- mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
+ mr &= ~(MSM_UART_MR2_STOP_BIT_LEN_ONE | MSM_UART_MR2_STOP_BIT_LEN_TWO);
if (termios->c_cflag & CSTOPB)
- mr |= UART_MR2_STOP_BIT_LEN_TWO;
+ mr |= MSM_UART_MR2_STOP_BIT_LEN_TWO;
else
- mr |= UART_MR2_STOP_BIT_LEN_ONE;
+ mr |= MSM_UART_MR2_STOP_BIT_LEN_ONE;
/* set parity, bits per char, and stop bit */
- msm_write(port, mr, UART_MR2);
+ msm_write(port, mr, MSM_UART_MR2);
/* calculate and set hardware flow control */
- mr = msm_read(port, UART_MR1);
- mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
+ mr = msm_read(port, MSM_UART_MR1);
+ mr &= ~(MSM_UART_MR1_CTS_CTL | MSM_UART_MR1_RX_RDY_CTL);
if (termios->c_cflag & CRTSCTS) {
- mr |= UART_MR1_CTS_CTL;
- mr |= UART_MR1_RX_RDY_CTL;
+ mr |= MSM_UART_MR1_CTS_CTL;
+ mr |= MSM_UART_MR1_RX_RDY_CTL;
}
- msm_write(port, mr, UART_MR1);
+ msm_write(port, mr, MSM_UART_MR1);
/* Configure status bits to ignore based on termio flags. */
port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
- port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+ port->read_status_mask |= MSM_UART_SR_PAR_FRAME_ERR;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= UART_SR_RX_BREAK;
+ port->read_status_mask |= MSM_UART_SR_RX_BREAK;
uart_update_timeout(port, termios->c_cflag, baud);
@@ -1416,7 +1419,7 @@ static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
static void msm_power(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
switch (state) {
case 0:
@@ -1435,10 +1438,10 @@ static void msm_power(struct uart_port *port, unsigned int state,
#ifdef CONFIG_CONSOLE_POLL
static int msm_poll_get_char_single(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
- unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : UART_RF;
+ struct msm_port *msm_port = to_msm_port(port);
+ unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : MSM_UART_RF;
- if (!(msm_read(port, UART_SR) & UART_SR_RX_READY))
+ if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_RX_READY))
return NO_POLL_CHAR;
return msm_read(port, rf_reg) & 0xff;
@@ -1456,7 +1459,7 @@ static int msm_poll_get_char_dm(struct uart_port *port)
c = sp[sizeof(slop) - count];
count--;
/* Or if FIFO is empty */
- } else if (!(msm_read(port, UART_SR) & UART_SR_RX_READY)) {
+ } else if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_RX_READY)) {
/*
* If RX packing buffer has less than a word, force stale to
* push contents into RX FIFO
@@ -1464,14 +1467,13 @@ static int msm_poll_get_char_dm(struct uart_port *port)
count = msm_read(port, UARTDM_RXFS);
count = (count >> UARTDM_RXFS_BUF_SHIFT) & UARTDM_RXFS_BUF_MASK;
if (count) {
- msm_write(port, UART_CR_CMD_FORCE_STALE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_FORCE_STALE, MSM_UART_CR);
slop = msm_read(port, UARTDM_RF);
c = sp[0];
count--;
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE,
- UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
} else {
c = NO_POLL_CHAR;
}
@@ -1489,11 +1491,11 @@ static int msm_poll_get_char(struct uart_port *port)
{
u32 imr;
int c;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/* Disable all interrupts */
- imr = msm_read(port, UART_IMR);
- msm_write(port, 0, UART_IMR);
+ imr = msm_read(port, MSM_UART_IMR);
+ msm_write(port, 0, MSM_UART_IMR);
if (msm_port->is_uartdm)
c = msm_poll_get_char_dm(port);
@@ -1501,7 +1503,7 @@ static int msm_poll_get_char(struct uart_port *port)
c = msm_poll_get_char_single(port);
/* Enable interrupts */
- msm_write(port, imr, UART_IMR);
+ msm_write(port, imr, MSM_UART_IMR);
return c;
}
@@ -1509,28 +1511,28 @@ static int msm_poll_get_char(struct uart_port *port)
static void msm_poll_put_char(struct uart_port *port, unsigned char c)
{
u32 imr;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/* Disable all interrupts */
- imr = msm_read(port, UART_IMR);
- msm_write(port, 0, UART_IMR);
+ imr = msm_read(port, MSM_UART_IMR);
+ msm_write(port, 0, MSM_UART_IMR);
if (msm_port->is_uartdm)
msm_reset_dm_count(port, 1);
/* Wait until FIFO is empty */
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
/* Write a character */
- msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+ msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : MSM_UART_TF);
/* Wait until FIFO is empty */
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
/* Enable interrupts */
- msm_write(port, imr, UART_IMR);
+ msm_write(port, imr, MSM_UART_IMR);
}
#endif
@@ -1588,7 +1590,7 @@ static struct msm_port msm_uart_ports[] = {
},
};
-#define UART_NR ARRAY_SIZE(msm_uart_ports)
+#define MSM_UART_NR ARRAY_SIZE(msm_uart_ports)
static inline struct uart_port *msm_get_port_from_line(unsigned int line)
{
@@ -1609,7 +1611,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
if (is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
/* Account for newlines that will get a carriage return added */
for (i = 0; i < count; i++)
@@ -1655,7 +1657,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
}
}
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
iowrite32_rep(tf, buf, 1);
@@ -1674,10 +1676,10 @@ static void msm_console_write(struct console *co, const char *s,
struct uart_port *port;
struct msm_port *msm_port;
- BUG_ON(co->index < 0 || co->index >= UART_NR);
+ BUG_ON(co->index < 0 || co->index >= MSM_UART_NR);
port = msm_get_port_from_line(co->index);
- msm_port = UART_TO_MSM(port);
+ msm_port = to_msm_port(port);
__msm_console_write(port, s, count, msm_port->is_uartdm);
}
@@ -1690,7 +1692,7 @@ static int msm_console_setup(struct console *co, char *options)
int parity = 'n';
int flow = 'n';
- if (unlikely(co->index >= UART_NR || co->index < 0))
+ if (unlikely(co->index >= MSM_UART_NR || co->index < 0))
return -ENXIO;
port = msm_get_port_from_line(co->index);
@@ -1771,7 +1773,7 @@ static struct uart_driver msm_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "msm_serial",
.dev_name = "ttyMSM",
- .nr = UART_NR,
+ .nr = MSM_UART_NR,
.cons = MSM_CONSOLE,
};
@@ -1801,14 +1803,14 @@ static int msm_serial_probe(struct platform_device *pdev)
if (line < 0)
line = atomic_inc_return(&msm_uart_next_id) - 1;
- if (unlikely(line < 0 || line >= UART_NR))
+ if (unlikely(line < 0 || line >= MSM_UART_NR))
return -ENXIO;
dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line);
port = msm_get_port_from_line(line);
port->dev = &pdev->dev;
- msm_port = UART_TO_MSM(port);
+ msm_port = to_msm_port(port);
id = of_match_device(msm_uartdm_table, &pdev->dev);
if (id)
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 643dfbcc43f9..0ba0f4d9459d 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -481,12 +481,6 @@ static int __init mux_probe(struct parisc_device *dev)
port->line = port_cnt;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MUX_CONSOLE);
- /* The port->timeout needs to match what is present in
- * uart_wait_until_sent in serial_core.c. Otherwise
- * the time spent in msleep_interruptable will be very
- * long, causing the appearance of a console hang.
- */
- port->timeout = HZ / 50;
spin_lock_init(&port->lock);
status = uart_add_one_port(&mux_driver, port);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 93489fe334d0..65eaecd10b7c 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -265,6 +265,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = 0;
+ int ret;
do {
if (status & STAT_RX_RDY(port)) {
@@ -277,6 +278,16 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
port->icount.parity++;
}
+ /*
+ * For UART2, error bits are not cleared on buffer read.
+ * This causes interrupt loop and system hang.
+ */
+ if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
+ ret = readl(port->membase + UART_STAT);
+ ret |= STAT_BRK_ERR;
+ writel(ret, port->membase + UART_STAT);
+ }
+
if (status & STAT_BRK_DET) {
port->icount.brk++;
status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 46f4d4cacb6e..0aa666e247d5 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -1102,8 +1103,6 @@ serial_omap_type(struct uart_port *port)
return up->name;
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
{
unsigned int status, tmout = 10000;
@@ -1118,7 +1117,7 @@ static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
@@ -1186,7 +1185,7 @@ static void omap_serial_early_putc(struct uart_port *port, unsigned char c)
for (;;) {
status = omap_serial_early_in(port, UART_LSR);
- if ((status & BOTH_EMPTY) == BOTH_EMPTY)
+ if (uart_lsr_tx_empty(status))
break;
cpu_relax();
}
@@ -1325,7 +1324,8 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
/* Enable or disable the rs485 support */
static int
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+serial_omap_config_rs485(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int mode;
@@ -1559,6 +1559,13 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
return 0;
}
+static const struct serial_rs485 serial_omap_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int serial_omap_probe(struct platform_device *pdev)
{
struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
@@ -1636,6 +1643,7 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.flags = omap_up_info->flags;
up->port.uartclk = omap_up_info->uartclk;
up->port.rs485_config = serial_omap_config_rs485;
+ up->port.rs485_supported = serial_omap_rs485_supported;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
dev_warn(&pdev->dev,
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 44d20e5a7dd3..888e17e3f25f 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -201,7 +201,7 @@ static void owl_uart_send_chars(struct uart_port *port)
ch = xmit->buf[xmit->tail];
owl_uart_write(port, ch, OWL_UART_TXDAT);
- xmit->tail = (xmit->tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 3b26524d48e3..8a9065e4a903 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -3,6 +3,7 @@
*Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
#include <linux/kernel.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -189,8 +190,6 @@ enum {
#define PCH_UART_HAL_LOOP (PCH_UART_MCR_LOOP)
#define PCH_UART_HAL_AFE (PCH_UART_MCR_AFE)
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
#define DEFAULT_UARTCLK 1843200 /* 1.8432 MHz */
#define CMITC_UARTCLK 192000000 /* 192.0000 MHz */
#define FRI2_64_UARTCLK 64000000 /* 64.0000 MHz */
@@ -1516,7 +1515,7 @@ static void pch_uart_put_poll_char(struct uart_port *port,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(priv, BOTH_EMPTY);
+ wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -1602,7 +1601,7 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(priv, BOTH_EMPTY);
+ wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
if (port_locked)
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index b399aac530fe..f418f1de66b3 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -503,7 +503,7 @@ static int pic32_uart_startup(struct uart_port *port)
if (!sport->irq_fault_name) {
dev_err(port->dev, "%s: kasprintf err!", __func__);
ret = -ENOMEM;
- goto out_done;
+ goto out_disable_clk;
}
irq_set_status_flags(sport->irq_fault, IRQ_NOAUTOEN);
ret = request_irq(sport->irq_fault, pic32_uart_fault_interrupt,
@@ -579,6 +579,8 @@ out_r:
out_f:
free_irq(sport->irq_fault, port);
kfree(sport->irq_fault_name);
+out_disable_clk:
+ clk_disable_unprepare(sport->clk);
out_done:
return ret;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 3133446e806c..f63257b8e872 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -52,7 +52,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/dbdma.h>
#include <asm/macio.h>
#else
#include <linux/platform_device.h>
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index e80ba8e10407..9309ffd87c8e 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
@@ -575,8 +576,6 @@ static struct uart_driver serial_pxa_reg;
#ifdef CONFIG_SERIAL_PXA_CONSOLE
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Wait for transmitter & holding register to empty
*/
@@ -594,7 +593,7 @@ static void wait_for_xmitr(struct uart_pxa_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index f8f950641ad9..f4698a064a4d 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
+#define __DISABLE_TRACE_MMIO__
+
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
@@ -940,52 +943,63 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
return 0;
}
-static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
- unsigned int sampling_rate, unsigned int *clk_div)
+static unsigned long find_clk_rate_in_tol(struct clk *clk, unsigned int desired_clk,
+ unsigned int *clk_div, unsigned int percent_tol)
{
- unsigned long ser_clk;
- unsigned long desired_clk;
- unsigned long freq, prev;
+ unsigned long freq;
unsigned long div, maxdiv;
- int64_t mult;
-
- desired_clk = baud * sampling_rate;
- if (!desired_clk) {
- pr_err("%s: Invalid frequency\n", __func__);
- return 0;
- }
+ u64 mult;
+ unsigned long offset, abs_tol, achieved;
+ abs_tol = div_u64((u64)desired_clk * percent_tol, 100);
maxdiv = CLK_DIV_MSK >> CLK_DIV_SHFT;
- prev = 0;
-
- for (div = 1; div <= maxdiv; div++) {
- mult = div * desired_clk;
- if (mult > ULONG_MAX)
+ div = 1;
+ while (div <= maxdiv) {
+ mult = (u64)div * desired_clk;
+ if (mult != (unsigned long)mult)
break;
- freq = clk_round_rate(clk, (unsigned long)mult);
- if (!(freq % desired_clk)) {
- ser_clk = freq;
- break;
- }
+ offset = div * abs_tol;
+ freq = clk_round_rate(clk, mult - offset);
- if (!prev)
- ser_clk = freq;
- else if (prev == freq)
+ /* Can only get lower if we're done */
+ if (freq < mult - offset)
break;
- prev = freq;
- }
+ /*
+ * Re-calculate div in case rounding skipped rates but we
+ * ended up at a good one, then check for a match.
+ */
+ div = DIV_ROUND_CLOSEST(freq, desired_clk);
+ achieved = DIV_ROUND_CLOSEST(freq, div);
+ if (achieved <= desired_clk + abs_tol &&
+ achieved >= desired_clk - abs_tol) {
+ *clk_div = div;
+ return freq;
+ }
- if (!ser_clk) {
- pr_err("%s: Can't find matching DFS entry for baud %d\n",
- __func__, baud);
- return ser_clk;
+ div = DIV_ROUND_UP(freq, desired_clk);
}
- *clk_div = ser_clk / desired_clk;
- if (!(*clk_div))
- *clk_div = 1;
+ return 0;
+}
+
+static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
+ unsigned int sampling_rate, unsigned int *clk_div)
+{
+ unsigned long ser_clk;
+ unsigned long desired_clk;
+
+ desired_clk = baud * sampling_rate;
+ if (!desired_clk)
+ return 0;
+
+ /*
+ * try to find a clock rate within 2% tolerance, then within 5%
+ */
+ ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 2);
+ if (!ser_clk)
+ ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 5);
return ser_clk;
}
@@ -1020,8 +1034,15 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
clk_rate = get_clk_div_rate(port->se.clk, baud,
sampling_rate, &clk_div);
- if (!clk_rate)
+ if (!clk_rate) {
+ dev_err(port->se.dev,
+ "Couldn't find suitable clock rate for %u\n",
+ baud * sampling_rate);
goto out_restart_rx;
+ }
+
+ dev_dbg(port->se.dev, "desired_rate-%u, clk_rate-%lu, clk_div-%u\n",
+ baud * sampling_rate, clk_rate, clk_div);
uport->uartclk = clk_rate;
dev_pm_opp_set_rate(uport->dev, clk_rate);
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index f556b4955f59..feb2054aba37 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -353,7 +353,7 @@ static void rda_uart_send_chars(struct uart_port *port)
ch = xmit->buf[xmit->tail];
rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER);
- xmit->tail = (xmit->tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 1afe47b62ad5..b7a4b47ce74e 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -48,6 +48,12 @@
#define S3C24XX_SERIAL_MAJOR 204
#define S3C24XX_SERIAL_MINOR 64
+#ifdef CONFIG_ARM64
+#define UART_NR 12
+#else
+#define UART_NR CONFIG_SERIAL_SAMSUNG_UARTS
+#endif
+
#define S3C24XX_TX_PIO 1
#define S3C24XX_TX_DMA 2
#define S3C24XX_RX_PIO 1
@@ -87,7 +93,7 @@ struct s3c24xx_uart_info {
struct s3c24xx_serial_drv_data {
const struct s3c24xx_uart_info info;
const struct s3c2410_uartcfg def_cfg;
- const unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
+ const unsigned int fifosize[UART_NR];
};
struct s3c24xx_uart_dma {
@@ -1011,6 +1017,7 @@ static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int umcon = rd_regl(port, S3C2410_UMCON);
+ unsigned int ucon = rd_regl(port, S3C2410_UCON);
if (mctrl & TIOCM_RTS)
umcon |= S3C2410_UMCOM_RTS_LOW;
@@ -1018,6 +1025,13 @@ static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
umcon &= ~S3C2410_UMCOM_RTS_LOW;
wr_regl(port, S3C2410_UMCON, umcon);
+
+ if (mctrl & TIOCM_LOOP)
+ ucon |= S3C2410_UCON_LOOPBACK;
+ else
+ ucon &= ~S3C2410_UCON_LOOPBACK;
+
+ wr_regl(port, S3C2410_UCON, ucon);
}
static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
@@ -1801,67 +1815,27 @@ static const struct uart_ops apple_s5l_serial_ops = {
static struct uart_driver s3c24xx_uart_drv = {
.owner = THIS_MODULE,
.driver_name = "s3c2410_serial",
- .nr = CONFIG_SERIAL_SAMSUNG_UARTS,
+ .nr = UART_NR,
.cons = S3C24XX_SERIAL_CONSOLE,
.dev_name = S3C24XX_SERIAL_NAME,
.major = S3C24XX_SERIAL_MAJOR,
.minor = S3C24XX_SERIAL_MINOR,
};
-#define __PORT_LOCK_UNLOCKED(i) \
- __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[i].port.lock)
-static struct s3c24xx_uart_port
-s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
- [0] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(0),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 0,
- }
- },
- [1] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(1),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 1,
- }
- },
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 2
- [2] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(2),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 2,
- }
- },
-#endif
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
- [3] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(3),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 3,
- }
- }
-#endif
-};
-#undef __PORT_LOCK_UNLOCKED
+static struct s3c24xx_uart_port s3c24xx_serial_ports[UART_NR];
+
+static void s3c24xx_serial_init_port_default(int index) {
+ struct uart_port *port = &s3c24xx_serial_ports[index].port;
+
+ spin_lock_init(&port->lock);
+
+ port->iotype = UPIO_MEM;
+ port->uartclk = 0;
+ port->fifosize = 16;
+ port->ops = &s3c24xx_serial_ops;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->line = index;
+}
/* s3c24xx_serial_resetport
*
@@ -2177,6 +2151,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
}
ourport = &s3c24xx_serial_ports[index];
+ s3c24xx_serial_init_port_default(index);
+
ourport->drv_data = s3c24xx_get_driver_data(pdev);
if (!ourport->drv_data) {
dev_err(&pdev->dev, "could not find driver data\n");
@@ -2575,7 +2551,7 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
/* is this a valid port */
- if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS)
+ if (co->index == -1 || co->index >= UART_NR)
co->index = 0;
port = &s3c24xx_serial_ports[co->index].port;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 8472bf70477c..259e08cc347c 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1127,7 +1127,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&port->lock, flags);
}
-static int sc16is7xx_config_rs485(struct uart_port *port,
+static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
@@ -1143,7 +1143,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
return -EINVAL;
}
- port->rs485 = *rs485;
one->config.flags |= SC16IS7XX_RECONF_RS485;
kthread_queue_work(&s->kworker, &one->reg_work);
@@ -1354,6 +1353,12 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
}
#endif
+static const struct serial_rs485 sc16is7xx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1, /* Not supported but keep returning -EINVAL */
+};
+
static int sc16is7xx_probe(struct device *dev,
const struct sc16is7xx_devtype *devtype,
struct regmap *regmap, int irq)
@@ -1456,6 +1461,7 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.uartclk = freq;
s->p[i].port.rs485_config = sc16is7xx_config_rs485;
+ s->p[i].port.rs485_supported = sc16is7xx_rs485_supported;
s->p[i].port.ops = &sc16is7xx_ops;
s->p[i].old_mctrl = 0;
s->p[i].port.line = sc16is7xx_alloc_line();
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d942ab152f5a..ad4f3567ff90 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -441,7 +441,7 @@ static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
if (lsr & UART_LSR_OE) {
- /* Overrrun error */
+ /* Overrun error */
flag = TTY_OVERRUN;
tup->uport.icount.overrun++;
dev_dbg(tup->uport.dev, "Got overrun errors\n");
@@ -1080,7 +1080,7 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
tup->rx_in_progress = 1;
/*
- * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RXS for the receive status interrupts like line errors.
* Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
*
* EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
@@ -1667,6 +1667,7 @@ static int __init tegra_uart_init(void)
node = of_find_matching_node(NULL, tegra_uart_of_match);
if (node)
match = of_match_node(tegra_uart_of_match, node);
+ of_node_put(node);
if (match)
cdata = match->data;
if (cdata)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 3dc926d6c00a..12c87cd201a7 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -97,9 +97,16 @@ static inline struct uart_port *uart_port_check(struct uart_state *state)
return state->uart_port;
}
-/*
- * This routine is used by the interrupt handler to schedule processing in
- * the software interrupt portion of the driver.
+/**
+ * uart_write_wakeup - schedule write processing
+ * @port: port to be processed
+ *
+ * This routine is used by the interrupt handler to schedule processing in the
+ * software interrupt portion of the driver. A driver is expected to call this
+ * function when the number of characters in the transmit buffer have dropped
+ * below a threshold.
+ *
+ * Locking: @port->lock should be held
*/
void uart_write_wakeup(struct uart_port *port)
{
@@ -327,13 +334,16 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
}
/**
- * uart_update_timeout - update per-port FIFO timeout.
- * @port: uart_port structure describing the port
- * @cflag: termios cflag value
- * @baud: speed of the port
+ * uart_update_timeout - update per-port frame timing information
+ * @port: uart_port structure describing the port
+ * @cflag: termios cflag value
+ * @baud: speed of the port
*
- * Set the port FIFO timeout value. The @cflag value should
- * reflect the actual hardware settings.
+ * Set the @port frame timing information from which the FIFO timeout value is
+ * derived. The @cflag value should reflect the actual hardware settings as
+ * number of bits, parity, stop bits and baud rate is taken into account here.
+ *
+ * Locking: caller is expected to take @port->lock
*/
void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
@@ -343,35 +353,30 @@ uart_update_timeout(struct uart_port *port, unsigned int cflag,
u64 frame_time;
frame_time = (u64)size * NSEC_PER_SEC;
- size *= port->fifosize;
-
- /*
- * Figure the timeout to send the above number of bits.
- * Add .02 seconds of slop
- */
- port->timeout = (HZ * size) / baud + HZ/50;
port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud);
}
EXPORT_SYMBOL(uart_update_timeout);
/**
- * uart_get_baud_rate - return baud rate for a particular port
- * @port: uart_port structure describing the port in question.
- * @termios: desired termios settings.
- * @old: old termios (or NULL)
- * @min: minimum acceptable baud rate
- * @max: maximum acceptable baud rate
+ * uart_get_baud_rate - return baud rate for a particular port
+ * @port: uart_port structure describing the port in question.
+ * @termios: desired termios settings
+ * @old: old termios (or %NULL)
+ * @min: minimum acceptable baud rate
+ * @max: maximum acceptable baud rate
+ *
+ * Decode the termios structure into a numeric baud rate, taking account of the
+ * magic 38400 baud rate (with spd_* flags), and mapping the %B0 rate to 9600
+ * baud.
*
- * Decode the termios structure into a numeric baud rate,
- * taking account of the magic 38400 baud rate (with spd_*
- * flags), and mapping the %B0 rate to 9600 baud.
+ * If the new baud rate is invalid, try the @old termios setting. If it's still
+ * invalid, we try 9600 baud.
*
- * If the new baud rate is invalid, try the old termios setting.
- * If it's still invalid, we try 9600 baud.
+ * The @termios structure is updated to reflect the baud rate we're actually
+ * going to be using. Don't do this for the case where B0 is requested ("hang
+ * up").
*
- * Update the @termios structure to reflect the baud rate
- * we're actually going to be using. Don't do this for the case
- * where B0 is requested ("hang up").
+ * Locking: caller dependent
*/
unsigned int
uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
@@ -456,11 +461,17 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
EXPORT_SYMBOL(uart_get_baud_rate);
/**
- * uart_get_divisor - return uart clock divisor
- * @port: uart_port structure describing the port.
- * @baud: desired baud rate
+ * uart_get_divisor - return uart clock divisor
+ * @port: uart_port structure describing the port
+ * @baud: desired baud rate
+ *
+ * Calculate the divisor (baud_base / baud) for the specified @baud,
+ * appropriately rounded.
*
- * Calculate the uart clock divisor for the port.
+ * If 38400 baud and custom divisor is selected, return the custom divisor
+ * instead.
+ *
+ * Locking: caller dependent
*/
unsigned int
uart_get_divisor(struct uart_port *port, unsigned int baud)
@@ -1023,10 +1034,10 @@ static int uart_set_info_user(struct tty_struct *tty, struct serial_struct *ss)
}
/**
- * uart_get_lsr_info - get line status register info
- * @tty: tty associated with the UART
- * @state: UART being queried
- * @value: returned modem value
+ * uart_get_lsr_info - get line status register info
+ * @tty: tty associated with the UART
+ * @state: UART being queried
+ * @value: returned modem value
*/
static int uart_get_lsr_info(struct tty_struct *tty,
struct uart_state *state, unsigned int __user *value)
@@ -1276,6 +1287,126 @@ static int uart_get_icount(struct tty_struct *tty,
return 0;
}
+#define SER_RS485_LEGACY_FLAGS (SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | \
+ SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX | \
+ SER_RS485_TERMINATE_BUS)
+
+static int uart_check_rs485_flags(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ u32 flags = rs485->flags;
+
+ /* Don't return -EINVAL for unsupported legacy flags */
+ flags &= ~SER_RS485_LEGACY_FLAGS;
+
+ /*
+ * For any bit outside of the legacy ones that is not supported by
+ * the driver, return -EINVAL.
+ */
+ if (flags & ~port->rs485_supported.flags)
+ return -EINVAL;
+
+ /* Asking for address w/o addressing mode? */
+ if (!(rs485->flags & SER_RS485_ADDRB) &&
+ (rs485->flags & (SER_RS485_ADDR_RECV|SER_RS485_ADDR_DEST)))
+ return -EINVAL;
+
+ /* Address given but not enabled? */
+ if (!(rs485->flags & SER_RS485_ADDR_RECV) && rs485->addr_recv)
+ return -EINVAL;
+ if (!(rs485->flags & SER_RS485_ADDR_DEST) && rs485->addr_dest)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void uart_sanitize_serial_rs485_delays(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ if (!port->rs485_supported.delay_rts_before_send) {
+ if (rs485->delay_rts_before_send) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending not supported\n",
+ port->name, port->line);
+ }
+ rs485->delay_rts_before_send = 0;
+ } else if (rs485->delay_rts_before_send > RS485_MAX_RTS_DELAY) {
+ rs485->delay_rts_before_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending clamped to %u ms\n",
+ port->name, port->line, rs485->delay_rts_before_send);
+ }
+
+ if (!port->rs485_supported.delay_rts_after_send) {
+ if (rs485->delay_rts_after_send) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending not supported\n",
+ port->name, port->line);
+ }
+ rs485->delay_rts_after_send = 0;
+ } else if (rs485->delay_rts_after_send > RS485_MAX_RTS_DELAY) {
+ rs485->delay_rts_after_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending clamped to %u ms\n",
+ port->name, port->line, rs485->delay_rts_after_send);
+ }
+}
+
+static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ u32 supported_flags = port->rs485_supported.flags;
+
+ if (!(rs485->flags & SER_RS485_ENABLED)) {
+ memset(rs485, 0, sizeof(*rs485));
+ return;
+ }
+
+ /* Pick sane settings if the user hasn't */
+ if ((supported_flags & (SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND)) &&
+ !(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+ port->name, port->line);
+ rs485->flags |= SER_RS485_RTS_ON_SEND;
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+ supported_flags |= SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND;
+ }
+
+ rs485->flags &= supported_flags;
+
+ uart_sanitize_serial_rs485_delays(port, rs485);
+
+ /* Return clean padding area to userspace */
+ memset(rs485->padding0, 0, sizeof(rs485->padding0));
+ memset(rs485->padding1, 0, sizeof(rs485->padding1));
+}
+
+static void uart_set_rs485_termination(struct uart_port *port,
+ const struct serial_rs485 *rs485)
+{
+ if (!(rs485->flags & SER_RS485_ENABLED))
+ return;
+
+ gpiod_set_value_cansleep(port->rs485_term_gpio,
+ !!(rs485->flags & SER_RS485_TERMINATE_BUS));
+}
+
+int uart_rs485_config(struct uart_port *port)
+{
+ struct serial_rs485 *rs485 = &port->rs485;
+ int ret;
+
+ uart_sanitize_serial_rs485(port, rs485);
+ uart_set_rs485_termination(port, rs485);
+
+ ret = port->rs485_config(port, NULL, rs485);
+ if (ret)
+ memset(rs485, 0, sizeof(*rs485));
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(uart_rs485_config);
+
static int uart_get_rs485_config(struct uart_port *port,
struct serial_rs485 __user *rs485)
{
@@ -1292,7 +1423,7 @@ static int uart_get_rs485_config(struct uart_port *port,
return 0;
}
-static int uart_set_rs485_config(struct uart_port *port,
+static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
struct serial_rs485 __user *rs485_user)
{
struct serial_rs485 rs485;
@@ -1305,34 +1436,14 @@ static int uart_set_rs485_config(struct uart_port *port,
if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
return -EFAULT;
- /* pick sane settings if the user hasn't */
- if (!(rs485.flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485.flags & SER_RS485_RTS_AFTER_SEND)) {
- dev_warn_ratelimited(port->dev,
- "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
- port->name, port->line);
- rs485.flags |= SER_RS485_RTS_ON_SEND;
- rs485.flags &= ~SER_RS485_RTS_AFTER_SEND;
- }
-
- if (rs485.delay_rts_before_send > RS485_MAX_RTS_DELAY) {
- rs485.delay_rts_before_send = RS485_MAX_RTS_DELAY;
- dev_warn_ratelimited(port->dev,
- "%s (%d): RTS delay before sending clamped to %u ms\n",
- port->name, port->line, rs485.delay_rts_before_send);
- }
-
- if (rs485.delay_rts_after_send > RS485_MAX_RTS_DELAY) {
- rs485.delay_rts_after_send = RS485_MAX_RTS_DELAY;
- dev_warn_ratelimited(port->dev,
- "%s (%d): RTS delay after sending clamped to %u ms\n",
- port->name, port->line, rs485.delay_rts_after_send);
- }
- /* Return clean padding area to userspace */
- memset(rs485.padding, 0, sizeof(rs485.padding));
+ ret = uart_check_rs485_flags(port, &rs485);
+ if (ret)
+ return ret;
+ uart_sanitize_serial_rs485(port, &rs485);
+ uart_set_rs485_termination(port, &rs485);
spin_lock_irqsave(&port->lock, flags);
- ret = port->rs485_config(port, &rs485);
+ ret = port->rs485_config(port, &tty->termios, &rs485);
if (!ret)
port->rs485 = rs485;
spin_unlock_irqrestore(&port->lock, flags);
@@ -1441,6 +1552,10 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
if (ret != -ENOIOCTLCMD)
goto out;
+ /* rs485_config requires more locking than others */
+ if (cmd == TIOCGRS485)
+ down_write(&tty->termios_rwsem);
+
mutex_lock(&port->mutex);
uport = uart_port_check(state);
@@ -1464,7 +1579,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
break;
case TIOCSRS485:
- ret = uart_set_rs485_config(uport, uarg);
+ ret = uart_set_rs485_config(tty, uport, uarg);
break;
case TIOCSISO7816:
@@ -1481,6 +1596,8 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
}
out_up:
mutex_unlock(&port->mutex);
+ if (cmd == TIOCGRS485)
+ up_write(&tty->termios_rwsem);
out:
return ret;
}
@@ -1628,7 +1745,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
- unsigned long char_time, expire;
+ unsigned long char_time, expire, fifo_timeout;
port = uart_port_ref(state);
if (!port)
@@ -1658,12 +1775,13 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
- * takes longer than port->timeout, this is probably due to a
+ * takes longer than FIFO timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
- * 2*port->timeout.
+ * 2 * FIFO timeout.
*/
- if (timeout == 0 || timeout > 2 * port->timeout)
- timeout = 2 * port->timeout;
+ fifo_timeout = uart_fifo_timeout(port);
+ if (timeout == 0 || timeout > 2 * fifo_timeout)
+ timeout = 2 * fifo_timeout;
}
expire = jiffies + timeout;
@@ -1949,11 +2067,11 @@ static void uart_port_spin_lock_init(struct uart_port *port)
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/**
- * uart_console_write - write a console message to a serial port
- * @port: the port to write the message
- * @s: array of characters
- * @count: number of characters in string to write
- * @putchar: function to write character to port
+ * uart_console_write - write a console message to a serial port
+ * @port: the port to write the message
+ * @s: array of characters
+ * @count: number of characters in string to write
+ * @putchar: function to write character to port
*/
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
@@ -1969,10 +2087,15 @@ void uart_console_write(struct uart_port *port, const char *s,
}
EXPORT_SYMBOL_GPL(uart_console_write);
-/*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
+/**
+ * uart_get_console - get uart port for console
+ * @ports: ports to search in
+ * @nr: number of @ports
+ * @co: console to search for
+ * Returns: uart_port for the console @co
+ *
+ * Check whether an invalid uart number has been specified (as @co->index), and
+ * if so, search for the first available port that does have console support.
*/
struct uart_port * __init
uart_get_console(struct uart_port *ports, int nr, struct console *co)
@@ -1992,24 +2115,23 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
}
/**
- * uart_parse_earlycon - Parse earlycon options
- * @p: ptr to 2nd field (ie., just beyond '<name>,')
- * @iotype: ptr for decoded iotype (out)
- * @addr: ptr for decoded mapbase/iobase (out)
- * @options: ptr for <options> field; NULL if not present (out)
- *
- * Decodes earlycon kernel command line parameters of the form
- * earlycon=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
- * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
+ * uart_parse_earlycon - Parse earlycon options
+ * @p: ptr to 2nd field (ie., just beyond '<name>,')
+ * @iotype: ptr for decoded iotype (out)
+ * @addr: ptr for decoded mapbase/iobase (out)
+ * @options: ptr for <options> field; %NULL if not present (out)
*
- * The optional form
+ * Decodes earlycon kernel command line parameters of the form:
+ * * earlycon=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
+ * * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
*
- * earlycon=<name>,0x<addr>,<options>
- * console=<name>,0x<addr>,<options>
+ * The optional form:
+ * * earlycon=<name>,0x<addr>,<options>
+ * * console=<name>,0x<addr>,<options>
*
- * is also accepted; the returned @iotype will be UPIO_MEM.
+ * is also accepted; the returned @iotype will be %UPIO_MEM.
*
- * Returns 0 on success or -EINVAL on failure
+ * Returns: 0 on success or -%EINVAL on failure
*/
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options)
@@ -2054,16 +2176,16 @@ int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
EXPORT_SYMBOL_GPL(uart_parse_earlycon);
/**
- * uart_parse_options - Parse serial port baud/parity/bits/flow control.
- * @options: pointer to option string
- * @baud: pointer to an 'int' variable for the baud rate.
- * @parity: pointer to an 'int' variable for the parity.
- * @bits: pointer to an 'int' variable for the number of data bits.
- * @flow: pointer to an 'int' variable for the flow control character.
+ * uart_parse_options - Parse serial port baud/parity/bits/flow control.
+ * @options: pointer to option string
+ * @baud: pointer to an 'int' variable for the baud rate.
+ * @parity: pointer to an 'int' variable for the parity.
+ * @bits: pointer to an 'int' variable for the number of data bits.
+ * @flow: pointer to an 'int' variable for the flow control character.
*
- * uart_parse_options decodes a string containing the serial console
- * options. The format of the string is <baud><parity><bits><flow>,
- * eg: 115200n8r
+ * uart_parse_options() decodes a string containing the serial console
+ * options. The format of the string is <baud><parity><bits><flow>,
+ * eg: 115200n8r
*/
void
uart_parse_options(const char *options, int *baud, int *parity,
@@ -2084,13 +2206,13 @@ uart_parse_options(const char *options, int *baud, int *parity,
EXPORT_SYMBOL_GPL(uart_parse_options);
/**
- * uart_set_options - setup the serial console parameters
- * @port: pointer to the serial ports uart_port structure
- * @co: console pointer
- * @baud: baud rate
- * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
- * @bits: number of data bits
- * @flow: flow control character - 'r' (rts)
+ * uart_set_options - setup the serial console parameters
+ * @port: pointer to the serial ports uart_port structure
+ * @co: console pointer
+ * @baud: baud rate
+ * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
+ * @bits: number of data bits
+ * @flow: flow control character - 'r' (rts)
*/
int
uart_set_options(struct uart_port *port, struct console *co,
@@ -2577,17 +2699,19 @@ static const struct tty_port_operations uart_port_ops = {
};
/**
- * uart_register_driver - register a driver with the uart core layer
- * @drv: low level driver structure
+ * uart_register_driver - register a driver with the uart core layer
+ * @drv: low level driver structure
+ *
+ * Register a uart driver with the core driver. We in turn register with the
+ * tty layer, and initialise the core driver per-port state.
*
- * Register a uart driver with the core driver. We in turn register
- * with the tty layer, and initialise the core driver per-port state.
+ * We have a proc file in /proc/tty/driver which is named after the normal
+ * driver.
*
- * We have a proc file in /proc/tty/driver which is named after the
- * normal driver.
+ * @drv->port should be %NULL, and the per-port structures should be registered
+ * using uart_add_one_port() after this call has succeeded.
*
- * drv->port should be NULL, and the per-port structures should be
- * registered using uart_add_one_port after this call has succeeded.
+ * Locking: none, Interrupts: enabled
*/
int uart_register_driver(struct uart_driver *drv)
{
@@ -2651,13 +2775,14 @@ out:
EXPORT_SYMBOL(uart_register_driver);
/**
- * uart_unregister_driver - remove a driver from the uart core layer
- * @drv: low level driver structure
+ * uart_unregister_driver - remove a driver from the uart core layer
+ * @drv: low level driver structure
*
- * Remove all references to a driver from the core driver. The low
- * level driver must have removed all its ports via the
- * uart_remove_one_port() if it registered them with uart_add_one_port().
- * (ie, drv->port == NULL)
+ * Remove all references to a driver from the core driver. The low level
+ * driver must have removed all its ports via the uart_remove_one_port() if it
+ * registered them with uart_add_one_port(). (I.e. @drv->port is %NULL.)
+ *
+ * Locking: none, Interrupts: enabled
*/
void uart_unregister_driver(struct uart_driver *drv)
{
@@ -2906,16 +3031,15 @@ static const struct attribute_group tty_dev_attr_group = {
};
/**
- * uart_add_one_port - attach a driver-defined port structure
- * @drv: pointer to the uart low level driver structure for this port
- * @uport: uart port structure to use for this port.
+ * uart_add_one_port - attach a driver-defined port structure
+ * @drv: pointer to the uart low level driver structure for this port
+ * @uport: uart port structure to use for this port.
*
- * Context: task context, might sleep
+ * Context: task context, might sleep
*
- * This allows the driver to register its own uart_port structure
- * with the core driver. The main purpose is to allow the low
- * level uart drivers to expand uart_port, rather than having yet
- * more levels of structures.
+ * This allows the driver @drv to register its own uart_port structure with the
+ * core driver. The main purpose is to allow the low level uart drivers to
+ * expand uart_port, rather than having yet more levels of structures.
*/
int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
{
@@ -3010,15 +3134,14 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
EXPORT_SYMBOL(uart_add_one_port);
/**
- * uart_remove_one_port - detach a driver defined port structure
- * @drv: pointer to the uart low level driver structure for this port
- * @uport: uart port structure for this port
+ * uart_remove_one_port - detach a driver defined port structure
+ * @drv: pointer to the uart low level driver structure for this port
+ * @uport: uart port structure for this port
*
- * Context: task context, might sleep
+ * Context: task context, might sleep
*
- * This unhooks (and hangs up) the specified port structure from the
- * core driver. No further calls will be made to the low-level code
- * for this port.
+ * This unhooks (and hangs up) the specified port structure from the core
+ * driver. No further calls will be made to the low-level code for this port.
*/
int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
@@ -3090,8 +3213,13 @@ out:
}
EXPORT_SYMBOL(uart_remove_one_port);
-/*
- * Are the two ports equivalent?
+/**
+ * uart_match_port - are the two ports equivalent?
+ * @port1: first port
+ * @port2: second port
+ *
+ * This utility function can be used to determine whether two uart_port
+ * structures describe the same port.
*/
bool uart_match_port(const struct uart_port *port1,
const struct uart_port *port2)
@@ -3119,11 +3247,11 @@ bool uart_match_port(const struct uart_port *port1,
EXPORT_SYMBOL(uart_match_port);
/**
- * uart_handle_dcd_change - handle a change of carrier detect state
- * @uport: uart_port structure for the open port
- * @status: new carrier detect status, nonzero if active
+ * uart_handle_dcd_change - handle a change of carrier detect state
+ * @uport: uart_port structure for the open port
+ * @status: new carrier detect status, nonzero if active
*
- * Caller must hold uport->lock
+ * Caller must hold uport->lock.
*/
void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
{
@@ -3154,11 +3282,11 @@ void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
/**
- * uart_handle_cts_change - handle a change of clear-to-send state
- * @uport: uart_port structure for the open port
- * @status: new clear to send status, nonzero if active
+ * uart_handle_cts_change - handle a change of clear-to-send state
+ * @uport: uart_port structure for the open port
+ * @status: new clear to send status, nonzero if active
*
- * Caller must hold uport->lock
+ * Caller must hold uport->lock.
*/
void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
{
@@ -3229,15 +3357,15 @@ static void uart_sysrq_on(struct work_struct *w)
static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on);
/**
- * uart_try_toggle_sysrq - Enables SysRq from serial line
- * @port: uart_port structure where char(s) after BREAK met
- * @ch: new character in the sequence after received BREAK
+ * uart_try_toggle_sysrq - Enables SysRq from serial line
+ * @port: uart_port structure where char(s) after BREAK met
+ * @ch: new character in the sequence after received BREAK
*
- * Enables magic SysRq when the required sequence is met on port
- * (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE).
+ * Enables magic SysRq when the required sequence is met on port
+ * (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE).
*
- * Returns false if @ch is out of enabling sequence and should be
- * handled some other way, true if @ch was consumed.
+ * Returns: %false if @ch is out of enabling sequence and should be
+ * handled some other way, %true if @ch was consumed.
*/
bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
{
@@ -3289,6 +3417,8 @@ int uart_get_rs485_mode(struct uart_port *port)
rs485conf->delay_rts_after_send = 0;
}
+ uart_sanitize_serial_rs485_delays(port, rs485conf);
+
/*
* Clear full-duplex and enabled flags, set RTS polarity to active high
* to get to a defined state with the following properties:
@@ -3321,10 +3451,20 @@ int uart_get_rs485_mode(struct uart_port *port)
port->rs485_term_gpio = NULL;
return dev_err_probe(dev, ret, "Cannot get rs485-term-gpios\n");
}
+ if (port->rs485_term_gpio)
+ port->rs485_supported.flags |= SER_RS485_TERMINATE_BUS;
return 0;
}
EXPORT_SYMBOL_GPL(uart_get_rs485_mode);
+/* Compile-time assertions for serial_rs485 layout */
+static_assert(offsetof(struct serial_rs485, padding) ==
+ (offsetof(struct serial_rs485, delay_rts_after_send) + sizeof(__u32)));
+static_assert(offsetof(struct serial_rs485, padding1) ==
+ offsetof(struct serial_rs485, padding[1]));
+static_assert((offsetof(struct serial_rs485, padding[4]) + sizeof(__u32)) ==
+ sizeof(struct serial_rs485));
+
MODULE_DESCRIPTION("Serial driver core");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 1663b3afc3a0..7d5aaa8d422b 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -42,6 +42,13 @@ static bool mctrl_gpio_flags_is_dir_out(unsigned int idx)
return mctrl_gpios_desc[idx].flags & GPIOD_FLAGS_BIT_DIR_OUT;
}
+/**
+ * mctrl_gpio_set - set gpios according to mctrl state
+ * @gpios: gpios to set
+ * @mctrl: state to set
+ *
+ * Set the gpios according to the mctrl state.
+ */
void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
{
enum mctrl_gpio_idx i;
@@ -63,6 +70,12 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_set);
+/**
+ * mctrl_gpio_to_gpiod - obtain gpio_desc of modem line index
+ * @gpios: gpios to look into
+ * @gidx: index of the modem line
+ * Returns: the gpio_desc structure associated to the modem line index
+ */
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
@@ -73,6 +86,14 @@ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
+/**
+ * mctrl_gpio_get - update mctrl with the gpios values.
+ * @gpios: gpios to get the info from
+ * @mctrl: mctrl to set
+ * Returns: modified mctrl (the same value as in @mctrl)
+ *
+ * Update mctrl with the gpios values.
+ */
unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
{
enum mctrl_gpio_idx i;
@@ -189,6 +210,17 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
return IRQ_HANDLED;
}
+/**
+ * mctrl_gpio_init - initialize uart gpios
+ * @port: port to initialize gpios for
+ * @idx: index of the gpio in the @port's device
+ *
+ * This will get the {cts,rts,...}-gpios from device tree if they are present
+ * and request them, set direction etc, and return an allocated structure.
+ * `devm_*` functions are used, so there's no need to call mctrl_gpio_free().
+ * As this sets up the irq handling, make sure to not handle changes to the
+ * gpio input lines in your driver, too.
+ */
struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
{
struct mctrl_gpios *gpios;
@@ -235,6 +267,14 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_init);
+/**
+ * mctrl_gpio_free - explicitly free uart gpios
+ * @dev: uart port's device
+ * @gpios: gpios structure to be freed
+ *
+ * This will free the requested gpios in mctrl_gpio_init(). As `devm_*`
+ * functions are used, there's generally no need to call this function.
+ */
void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
@@ -253,6 +293,10 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_free);
+/**
+ * mctrl_gpio_enable_ms - enable irqs and handling of changes to the ms lines
+ * @gpios: gpios to enable
+ */
void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
@@ -278,6 +322,10 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms);
+/**
+ * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines
+ * @gpios: gpios to disable
+ */
void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index c0869b080cc3..5c3a07546a58 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -4,16 +4,6 @@
* Copyright (C) 2018 Paul Walmsley <paul@pwsan.com>
* Copyright (C) 2018-2019 SiFive
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Based partially on:
* - drivers/tty/serial/pxa.c
* - drivers/tty/serial/amba-pl011.c
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 1b0da603ab54..cce42f4c9bc2 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -17,7 +17,6 @@
#include <linux/tty_flip.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
-#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 0973b03eeeaa..2c85dbf165c4 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -35,6 +35,75 @@
#include "serial_mctrl_gpio.h"
#include "stm32-usart.h"
+
+/* Register offsets */
+static struct stm32_usart_info stm32f4_info = {
+ .ofs = {
+ .isr = 0x00,
+ .rdr = 0x04,
+ .tdr = 0x04,
+ .brr = 0x08,
+ .cr1 = 0x0c,
+ .cr2 = 0x10,
+ .cr3 = 0x14,
+ .gtpr = 0x18,
+ .rtor = UNDEF_REG,
+ .rqr = UNDEF_REG,
+ .icr = UNDEF_REG,
+ },
+ .cfg = {
+ .uart_enable_bit = 13,
+ .has_7bits_data = false,
+ .fifosize = 1,
+ }
+};
+
+static struct stm32_usart_info stm32f7_info = {
+ .ofs = {
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ },
+ .cfg = {
+ .uart_enable_bit = 0,
+ .has_7bits_data = true,
+ .has_swap = true,
+ .fifosize = 1,
+ }
+};
+
+static struct stm32_usart_info stm32h7_info = {
+ .ofs = {
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ },
+ .cfg = {
+ .uart_enable_bit = 0,
+ .has_7bits_data = true,
+ .has_swap = true,
+ .has_wakeup = true,
+ .has_fifo = true,
+ .fifosize = 16,
+ }
+};
+
static void stm32_usart_stop_tx(struct uart_port *port);
static void stm32_usart_transmit_chars(struct uart_port *port);
static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
@@ -99,7 +168,7 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
*cr1 |= rs485_deat_dedt;
}
-static int stm32_usart_config_rs485(struct uart_port *port,
+static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -1377,6 +1446,13 @@ static void stm32_usart_deinit_port(struct stm32_port *stm32port)
clk_disable_unprepare(stm32port->clk);
}
+static const struct serial_rs485 stm32_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int stm32_usart_init_port(struct stm32_port *stm32port,
struct platform_device *pdev)
{
@@ -1396,6 +1472,7 @@ static int stm32_usart_init_port(struct stm32_port *stm32port,
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
port->irq = irq;
port->rs485_config = stm32_usart_config_rs485;
+ port->rs485_supported = stm32_rs485_supported;
ret = stm32_usart_init_rs485(port, pdev);
if (ret)
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index ee69c203b926..0ec41a732c88 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -38,74 +38,6 @@ struct stm32_usart_info {
#define UNDEF_REG 0xff
-/* Register offsets */
-struct stm32_usart_info stm32f4_info = {
- .ofs = {
- .isr = 0x00,
- .rdr = 0x04,
- .tdr = 0x04,
- .brr = 0x08,
- .cr1 = 0x0c,
- .cr2 = 0x10,
- .cr3 = 0x14,
- .gtpr = 0x18,
- .rtor = UNDEF_REG,
- .rqr = UNDEF_REG,
- .icr = UNDEF_REG,
- },
- .cfg = {
- .uart_enable_bit = 13,
- .has_7bits_data = false,
- .fifosize = 1,
- }
-};
-
-struct stm32_usart_info stm32f7_info = {
- .ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
- },
- .cfg = {
- .uart_enable_bit = 0,
- .has_7bits_data = true,
- .has_swap = true,
- .fifosize = 1,
- }
-};
-
-struct stm32_usart_info stm32h7_info = {
- .ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
- },
- .cfg = {
- .uart_enable_bit = 0,
- .has_7bits_data = true,
- .has_swap = true,
- .has_wakeup = true,
- .has_fifo = true,
- .fifosize = 16,
- }
-};
-
/* USART_SR (F4) / USART_ISR (F7) */
#define USART_SR_PE BIT(0)
#define USART_SR_FE BIT(1)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index fff50b5b82eb..84d545e5a8c7 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1249,8 +1249,6 @@ static int sunsu_kbd_ms_init(struct uart_sunsu_port *up)
#ifdef CONFIG_SERIAL_SUNSU_CONSOLE
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Wait for transmitter & holding register to empty
*/
@@ -1268,7 +1266,7 @@ static void wait_for_xmitr(struct uart_sunsu_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 6000853973c1..3cc9ef08455c 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1137,6 +1137,8 @@ static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l)
/* No compatible property, so try the name. */
soc_string = np->name;
+ of_node_put(np);
+
/* Extract the SOC number from the "PowerPC," string */
if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc)
return 0;
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
deleted file mode 100644
index e0bf003ca3a1..000000000000
--- a/drivers/tty/serial/vr41xx_siu.c
+++ /dev/null
@@ -1,934 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for NEC VR4100 series Serial Interface Unit.
- *
- * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- *
- * Based on drivers/serial/8250.c, by Russell King.
- */
-
-#include <linux/console.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/serial_reg.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-
-#include <linux/io.h>
-#include <asm/vr41xx/siu.h>
-#include <asm/vr41xx/vr41xx.h>
-
-#define SIU_BAUD_BASE 1152000
-#define SIU_MAJOR 204
-#define SIU_MINOR_BASE 82
-
-#define RX_MAX_COUNT 256
-#define TX_MAX_COUNT 15
-
-#define SIUIRSEL 0x08
- #define TMICMODE 0x20
- #define TMICTX 0x10
- #define IRMSEL 0x0c
- #define IRMSEL_HP 0x08
- #define IRMSEL_TEMIC 0x04
- #define IRMSEL_SHARP 0x00
- #define IRUSESEL 0x02
- #define SIRSEL 0x01
-
-static struct uart_port siu_uart_ports[SIU_PORTS_MAX] = {
- [0 ... SIU_PORTS_MAX-1] = {
- .lock = __SPIN_LOCK_UNLOCKED(siu_uart_ports->lock),
- .irq = 0,
- },
-};
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
-static uint8_t lsr_break_flag[SIU_PORTS_MAX];
-#endif
-
-#define siu_read(port, offset) readb((port)->membase + (offset))
-#define siu_write(port, offset, value) writeb((value), (port)->membase + (offset))
-
-void vr41xx_select_siu_interface(siu_interface_t interface)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- if (interface == SIU_INTERFACE_IRDA)
- irsel |= SIRSEL;
- else
- irsel &= ~SIRSEL;
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_select_siu_interface);
-
-void vr41xx_use_irda(irda_use_t use)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- if (use == FIR_USE_IRDA)
- irsel |= IRUSESEL;
- else
- irsel &= ~IRUSESEL;
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_use_irda);
-
-void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- irsel &= ~(IRMSEL | TMICTX | TMICMODE);
- switch (module) {
- case SHARP_IRDA:
- irsel |= IRMSEL_SHARP;
- break;
- case TEMIC_IRDA:
- irsel |= IRMSEL_TEMIC | TMICMODE;
- if (speed == IRDA_TX_4MBPS)
- irsel |= TMICTX;
- break;
- case HP_IRDA:
- irsel |= IRMSEL_HP;
- break;
- default:
- break;
- }
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_select_irda_module);
-
-static inline void siu_clear_fifo(struct uart_port *port)
-{
- siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO);
- siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT);
- siu_write(port, UART_FCR, 0);
-}
-
-static inline unsigned long siu_port_size(struct uart_port *port)
-{
- switch (port->type) {
- case PORT_VR41XX_SIU:
- return 11UL;
- case PORT_VR41XX_DSIU:
- return 8UL;
- }
-
- return 0;
-}
-
-static inline unsigned int siu_check_type(struct uart_port *port)
-{
- if (port->line == 0)
- return PORT_VR41XX_SIU;
- if (port->line == 1 && port->irq)
- return PORT_VR41XX_DSIU;
-
- return PORT_UNKNOWN;
-}
-
-static inline const char *siu_type_name(struct uart_port *port)
-{
- switch (port->type) {
- case PORT_VR41XX_SIU:
- return "SIU";
- case PORT_VR41XX_DSIU:
- return "DSIU";
- }
-
- return NULL;
-}
-
-static unsigned int siu_tx_empty(struct uart_port *port)
-{
- uint8_t lsr;
-
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_TEMT)
- return TIOCSER_TEMT;
-
- return 0;
-}
-
-static void siu_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- uint8_t mcr = 0;
-
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
- siu_write(port, UART_MCR, mcr);
-}
-
-static unsigned int siu_get_mctrl(struct uart_port *port)
-{
- uint8_t msr;
- unsigned int mctrl = 0;
-
- msr = siu_read(port, UART_MSR);
- if (msr & UART_MSR_DCD)
- mctrl |= TIOCM_CAR;
- if (msr & UART_MSR_RI)
- mctrl |= TIOCM_RNG;
- if (msr & UART_MSR_DSR)
- mctrl |= TIOCM_DSR;
- if (msr & UART_MSR_CTS)
- mctrl |= TIOCM_CTS;
-
- return mctrl;
-}
-
-static void siu_stop_tx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_THRI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_start_tx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier |= UART_IER_THRI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_stop_rx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_RLSI;
- siu_write(port, UART_IER, ier);
-
- port->read_status_mask &= ~UART_LSR_DR;
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_enable_ms(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier |= UART_IER_MSI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_break_ctl(struct uart_port *port, int ctl)
-{
- unsigned long flags;
- uint8_t lcr;
-
- spin_lock_irqsave(&port->lock, flags);
-
- lcr = siu_read(port, UART_LCR);
- if (ctl == -1)
- lcr |= UART_LCR_SBC;
- else
- lcr &= ~UART_LCR_SBC;
- siu_write(port, UART_LCR, lcr);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static inline void receive_chars(struct uart_port *port, uint8_t *status)
-{
- uint8_t lsr, ch;
- char flag;
- int max_count = RX_MAX_COUNT;
-
- lsr = *status;
-
- do {
- ch = siu_read(port, UART_RX);
- port->icount.rx++;
- flag = TTY_NORMAL;
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
- lsr |= lsr_break_flag[port->line];
- lsr_break_flag[port->line] = 0;
-#endif
- if (unlikely(lsr & (UART_LSR_BI | UART_LSR_FE |
- UART_LSR_PE | UART_LSR_OE))) {
- if (lsr & UART_LSR_BI) {
- lsr &= ~(UART_LSR_FE | UART_LSR_PE);
- port->icount.brk++;
-
- if (uart_handle_break(port))
- goto ignore_char;
- }
-
- if (lsr & UART_LSR_FE)
- port->icount.frame++;
- if (lsr & UART_LSR_PE)
- port->icount.parity++;
- if (lsr & UART_LSR_OE)
- port->icount.overrun++;
-
- lsr &= port->read_status_mask;
- if (lsr & UART_LSR_BI)
- flag = TTY_BREAK;
- if (lsr & UART_LSR_FE)
- flag = TTY_FRAME;
- if (lsr & UART_LSR_PE)
- flag = TTY_PARITY;
- }
-
- if (uart_handle_sysrq_char(port, ch))
- goto ignore_char;
-
- uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
-
- ignore_char:
- lsr = siu_read(port, UART_LSR);
- } while ((lsr & UART_LSR_DR) && (max_count-- > 0));
-
- tty_flip_buffer_push(&port->state->port);
-
- *status = lsr;
-}
-
-static inline void check_modem_status(struct uart_port *port)
-{
- uint8_t msr;
-
- msr = siu_read(port, UART_MSR);
- if ((msr & UART_MSR_ANY_DELTA) == 0)
- return;
- if (msr & UART_MSR_DDCD)
- uart_handle_dcd_change(port, msr & UART_MSR_DCD);
- if (msr & UART_MSR_TERI)
- port->icount.rng++;
- if (msr & UART_MSR_DDSR)
- port->icount.dsr++;
- if (msr & UART_MSR_DCTS)
- uart_handle_cts_change(port, msr & UART_MSR_CTS);
-
- wake_up_interruptible(&port->state->port.delta_msr_wait);
-}
-
-static inline void transmit_chars(struct uart_port *port)
-{
- struct circ_buf *xmit;
- int max_count = TX_MAX_COUNT;
-
- xmit = &port->state->xmit;
-
- if (port->x_char) {
- siu_write(port, UART_TX, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
- return;
- }
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- siu_stop_tx(port);
- return;
- }
-
- do {
- siu_write(port, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- } while (max_count-- > 0);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- if (uart_circ_empty(xmit))
- siu_stop_tx(port);
-}
-
-static irqreturn_t siu_interrupt(int irq, void *dev_id)
-{
- struct uart_port *port;
- uint8_t iir, lsr;
-
- port = (struct uart_port *)dev_id;
-
- iir = siu_read(port, UART_IIR);
- if (iir & UART_IIR_NO_INT)
- return IRQ_NONE;
-
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_DR)
- receive_chars(port, &lsr);
-
- check_modem_status(port);
-
- if (lsr & UART_LSR_THRE)
- transmit_chars(port);
-
- return IRQ_HANDLED;
-}
-
-static int siu_startup(struct uart_port *port)
-{
- int retval;
-
- if (port->membase == NULL)
- return -ENODEV;
-
- siu_clear_fifo(port);
-
- (void)siu_read(port, UART_LSR);
- (void)siu_read(port, UART_RX);
- (void)siu_read(port, UART_IIR);
- (void)siu_read(port, UART_MSR);
-
- if (siu_read(port, UART_LSR) == 0xff)
- return -ENODEV;
-
- retval = request_irq(port->irq, siu_interrupt, 0, siu_type_name(port), port);
- if (retval)
- return retval;
-
- if (port->type == PORT_VR41XX_DSIU)
- vr41xx_enable_dsiuint(DSIUINT_ALL);
-
- siu_write(port, UART_LCR, UART_LCR_WLEN8);
-
- spin_lock_irq(&port->lock);
- siu_set_mctrl(port, port->mctrl);
- spin_unlock_irq(&port->lock);
-
- siu_write(port, UART_IER, UART_IER_RLSI | UART_IER_RDI);
-
- (void)siu_read(port, UART_LSR);
- (void)siu_read(port, UART_RX);
- (void)siu_read(port, UART_IIR);
- (void)siu_read(port, UART_MSR);
-
- return 0;
-}
-
-static void siu_shutdown(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t lcr;
-
- siu_write(port, UART_IER, 0);
-
- spin_lock_irqsave(&port->lock, flags);
-
- port->mctrl &= ~TIOCM_OUT2;
- siu_set_mctrl(port, port->mctrl);
-
- spin_unlock_irqrestore(&port->lock, flags);
-
- lcr = siu_read(port, UART_LCR);
- lcr &= ~UART_LCR_SBC;
- siu_write(port, UART_LCR, lcr);
-
- siu_clear_fifo(port);
-
- (void)siu_read(port, UART_RX);
-
- if (port->type == PORT_VR41XX_DSIU)
- vr41xx_disable_dsiuint(DSIUINT_ALL);
-
- free_irq(port->irq, port);
-}
-
-static void siu_set_termios(struct uart_port *port, struct ktermios *new,
- struct ktermios *old)
-{
- tcflag_t c_cflag, c_iflag;
- uint8_t lcr, fcr, ier;
- unsigned int baud, quot;
- unsigned long flags;
-
- c_cflag = new->c_cflag;
- lcr = UART_LCR_WLEN(tty_get_char_size(c_cflag));
-
- if (c_cflag & CSTOPB)
- lcr |= UART_LCR_STOP;
- if (c_cflag & PARENB)
- lcr |= UART_LCR_PARITY;
- if ((c_cflag & PARODD) != PARODD)
- lcr |= UART_LCR_EPAR;
- if (c_cflag & CMSPAR)
- lcr |= UART_LCR_SPAR;
-
- baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
-
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
-
- spin_lock_irqsave(&port->lock, flags);
-
- uart_update_timeout(port, c_cflag, baud);
-
- c_iflag = new->c_iflag;
-
- port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
- if (c_iflag & INPCK)
- port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= UART_LSR_BI;
-
- port->ignore_status_mask = 0;
- if (c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (c_iflag & IGNBRK) {
- port->ignore_status_mask |= UART_LSR_BI;
- if (c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_LSR_OE;
- }
-
- if ((c_cflag & CREAD) == 0)
- port->ignore_status_mask |= UART_LSR_DR;
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_MSI;
- if (UART_ENABLE_MS(port, c_cflag))
- ier |= UART_IER_MSI;
- siu_write(port, UART_IER, ier);
-
- siu_write(port, UART_LCR, lcr | UART_LCR_DLAB);
-
- siu_write(port, UART_DLL, (uint8_t)quot);
- siu_write(port, UART_DLM, (uint8_t)(quot >> 8));
-
- siu_write(port, UART_LCR, lcr);
-
- siu_write(port, UART_FCR, fcr);
-
- siu_set_mctrl(port, port->mctrl);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_pm(struct uart_port *port, unsigned int state, unsigned int oldstate)
-{
- switch (state) {
- case 0:
- switch (port->type) {
- case PORT_VR41XX_SIU:
- vr41xx_supply_clock(SIU_CLOCK);
- break;
- case PORT_VR41XX_DSIU:
- vr41xx_supply_clock(DSIU_CLOCK);
- break;
- }
- break;
- case 3:
- switch (port->type) {
- case PORT_VR41XX_SIU:
- vr41xx_mask_clock(SIU_CLOCK);
- break;
- case PORT_VR41XX_DSIU:
- vr41xx_mask_clock(DSIU_CLOCK);
- break;
- }
- break;
- }
-}
-
-static const char *siu_type(struct uart_port *port)
-{
- return siu_type_name(port);
-}
-
-static void siu_release_port(struct uart_port *port)
-{
- unsigned long size;
-
- if (port->flags & UPF_IOREMAP) {
- iounmap(port->membase);
- port->membase = NULL;
- }
-
- size = siu_port_size(port);
- release_mem_region(port->mapbase, size);
-}
-
-static int siu_request_port(struct uart_port *port)
-{
- unsigned long size;
- struct resource *res;
-
- size = siu_port_size(port);
- res = request_mem_region(port->mapbase, size, siu_type_name(port));
- if (res == NULL)
- return -EBUSY;
-
- if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap(port->mapbase, size);
- if (port->membase == NULL) {
- release_resource(res);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static void siu_config_port(struct uart_port *port, int flags)
-{
- if (flags & UART_CONFIG_TYPE) {
- port->type = siu_check_type(port);
- (void)siu_request_port(port);
- }
-}
-
-static int siu_verify_port(struct uart_port *port, struct serial_struct *serial)
-{
- if (port->type != PORT_VR41XX_SIU && port->type != PORT_VR41XX_DSIU)
- return -EINVAL;
- if (port->irq != serial->irq)
- return -EINVAL;
- if (port->iotype != serial->io_type)
- return -EINVAL;
- if (port->mapbase != (unsigned long)serial->iomem_base)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct uart_ops siu_uart_ops = {
- .tx_empty = siu_tx_empty,
- .set_mctrl = siu_set_mctrl,
- .get_mctrl = siu_get_mctrl,
- .stop_tx = siu_stop_tx,
- .start_tx = siu_start_tx,
- .stop_rx = siu_stop_rx,
- .enable_ms = siu_enable_ms,
- .break_ctl = siu_break_ctl,
- .startup = siu_startup,
- .shutdown = siu_shutdown,
- .set_termios = siu_set_termios,
- .pm = siu_pm,
- .type = siu_type,
- .release_port = siu_release_port,
- .request_port = siu_request_port,
- .config_port = siu_config_port,
- .verify_port = siu_verify_port,
-};
-
-static int siu_init_ports(struct platform_device *pdev)
-{
- struct uart_port *port;
- struct resource *res;
- int *type = dev_get_platdata(&pdev->dev);
- int i;
-
- if (!type)
- return 0;
-
- port = siu_uart_ports;
- for (i = 0; i < SIU_PORTS_MAX; i++) {
- port->type = type[i];
- if (port->type == PORT_UNKNOWN)
- continue;
- port->irq = platform_get_irq(pdev, i);
- port->uartclk = SIU_BAUD_BASE * 16;
- port->fifosize = 16;
- port->regshift = 0;
- port->iotype = UPIO_MEM;
- port->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
- port->line = i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- port->mapbase = res->start;
- port++;
- }
-
- return i;
-}
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
-
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-static void wait_for_xmitr(struct uart_port *port)
-{
- int timeout = 10000;
- uint8_t lsr, msr;
-
- do {
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_BI)
- lsr_break_flag[port->line] = UART_LSR_BI;
-
- if ((lsr & BOTH_EMPTY) == BOTH_EMPTY)
- break;
- } while (timeout-- > 0);
-
- if (port->flags & UPF_CONS_FLOW) {
- timeout = 1000000;
-
- do {
- msr = siu_read(port, UART_MSR);
- if ((msr & UART_MSR_CTS) != 0)
- break;
- } while (timeout-- > 0);
- }
-}
-
-static void siu_console_putchar(struct uart_port *port, unsigned char ch)
-{
- wait_for_xmitr(port);
- siu_write(port, UART_TX, ch);
-}
-
-static void siu_console_write(struct console *con, const char *s, unsigned count)
-{
- struct uart_port *port;
- uint8_t ier;
-
- port = &siu_uart_ports[con->index];
-
- ier = siu_read(port, UART_IER);
- siu_write(port, UART_IER, 0);
-
- uart_console_write(port, s, count, siu_console_putchar);
-
- wait_for_xmitr(port);
- siu_write(port, UART_IER, ier);
-}
-
-static int __init siu_console_setup(struct console *con, char *options)
-{
- struct uart_port *port;
- int baud = 9600;
- int parity = 'n';
- int bits = 8;
- int flow = 'n';
-
- if (con->index >= SIU_PORTS_MAX)
- con->index = 0;
-
- port = &siu_uart_ports[con->index];
- if (port->membase == NULL) {
- if (port->mapbase == 0)
- return -ENODEV;
- port->membase = ioremap(port->mapbase, siu_port_size(port));
- }
-
- if (port->type == PORT_VR41XX_SIU)
- vr41xx_select_siu_interface(SIU_INTERFACE_RS232C);
-
- if (options != NULL)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
-
- return uart_set_options(port, con, baud, parity, bits, flow);
-}
-
-static struct uart_driver siu_uart_driver;
-
-static struct console siu_console = {
- .name = "ttyVR",
- .write = siu_console_write,
- .device = uart_console_device,
- .setup = siu_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &siu_uart_driver,
-};
-
-static int siu_console_init(void)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < SIU_PORTS_MAX; i++) {
- port = &siu_uart_ports[i];
- port->ops = &siu_uart_ops;
- }
-
- register_console(&siu_console);
-
- return 0;
-}
-
-console_initcall(siu_console_init);
-
-void __init vr41xx_siu_early_setup(struct uart_port *port)
-{
- if (port->type == PORT_UNKNOWN)
- return;
-
- siu_uart_ports[port->line].line = port->line;
- siu_uart_ports[port->line].type = port->type;
- siu_uart_ports[port->line].uartclk = SIU_BAUD_BASE * 16;
- siu_uart_ports[port->line].mapbase = port->mapbase;
- siu_uart_ports[port->line].ops = &siu_uart_ops;
-}
-
-#define SERIAL_VR41XX_CONSOLE &siu_console
-#else
-#define SERIAL_VR41XX_CONSOLE NULL
-#endif
-
-static struct uart_driver siu_uart_driver = {
- .owner = THIS_MODULE,
- .driver_name = "SIU",
- .dev_name = "ttyVR",
- .major = SIU_MAJOR,
- .minor = SIU_MINOR_BASE,
- .cons = SERIAL_VR41XX_CONSOLE,
-};
-
-static int siu_probe(struct platform_device *dev)
-{
- struct uart_port *port;
- int num, i, retval;
-
- num = siu_init_ports(dev);
- if (num <= 0)
- return -ENODEV;
-
- siu_uart_driver.nr = num;
- retval = uart_register_driver(&siu_uart_driver);
- if (retval)
- return retval;
-
- for (i = 0; i < num; i++) {
- port = &siu_uart_ports[i];
- port->ops = &siu_uart_ops;
- port->dev = &dev->dev;
- port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_VR41XX_CONSOLE);
-
- retval = uart_add_one_port(&siu_uart_driver, port);
- if (retval < 0) {
- port->dev = NULL;
- break;
- }
- }
-
- if (i == 0 && retval < 0) {
- uart_unregister_driver(&siu_uart_driver);
- return retval;
- }
-
- return 0;
-}
-
-static int siu_remove(struct platform_device *dev)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if (port->dev == &dev->dev) {
- uart_remove_one_port(&siu_uart_driver, port);
- port->dev = NULL;
- }
- }
-
- uart_unregister_driver(&siu_uart_driver);
-
- return 0;
-}
-
-static int siu_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if ((port->type == PORT_VR41XX_SIU ||
- port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
- uart_suspend_port(&siu_uart_driver, port);
-
- }
-
- return 0;
-}
-
-static int siu_resume(struct platform_device *dev)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if ((port->type == PORT_VR41XX_SIU ||
- port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
- uart_resume_port(&siu_uart_driver, port);
- }
-
- return 0;
-}
-
-static struct platform_driver siu_device_driver = {
- .probe = siu_probe,
- .remove = siu_remove,
- .suspend = siu_suspend,
- .resume = siu_resume,
- .driver = {
- .name = "SIU",
- },
-};
-
-module_platform_driver(siu_device_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:SIU");
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 595d8b49c745..9fdecc795b6b 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/minmax.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
@@ -104,6 +105,7 @@ static void tty_buffer_reset(struct tty_buffer *p, size_t size)
p->size = size;
p->next = NULL;
p->commit = 0;
+ p->lookahead = 0;
p->read = 0;
p->flags = 0;
}
@@ -234,6 +236,7 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
buf->head = next;
}
buf->head->read = buf->head->commit;
+ buf->head->lookahead = buf->head->read;
if (ld && ld->ops->flush_buffer)
ld->ops->flush_buffer(tty);
@@ -276,13 +279,15 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
if (n != NULL) {
n->flags = flags;
buf->tail = n;
- /* paired w/ acquire in flush_to_ldisc(); ensures
- * flush_to_ldisc() sees buffer data.
+ /*
+ * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
+ * ensures they see all buffer data.
*/
smp_store_release(&b->commit, b->used);
- /* paired w/ acquire in flush_to_ldisc(); ensures the
- * latest commit value can be read before the head is
- * advanced to the next buffer
+ /*
+ * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
+ * ensures the latest commit value can be read before the head
+ * is advanced to the next buffer.
*/
smp_store_release(&b->next, n);
} else if (change)
@@ -459,6 +464,40 @@ int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
}
EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
+static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
+{
+ head->lookahead = max(head->lookahead, head->read);
+
+ while (head) {
+ struct tty_buffer *next;
+ unsigned char *p, *f = NULL;
+ unsigned int count;
+
+ /*
+ * Paired w/ release in __tty_buffer_request_room();
+ * ensures commit value read is not stale if the head
+ * is advancing to the next buffer.
+ */
+ next = smp_load_acquire(&head->next);
+ /*
+ * Paired w/ release in __tty_buffer_request_room() or in
+ * tty_buffer_flush(); ensures we see the committed buffer data.
+ */
+ count = smp_load_acquire(&head->commit) - head->lookahead;
+ if (!count) {
+ head = next;
+ continue;
+ }
+
+ p = char_buf_ptr(head, head->lookahead);
+ if (~head->flags & TTYB_NORMAL)
+ f = flag_buf_ptr(head, head->lookahead);
+
+ port->client_ops->lookahead_buf(port, p, f, count);
+ head->lookahead += count;
+ }
+}
+
static int
receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
{
@@ -496,7 +535,7 @@ static void flush_to_ldisc(struct work_struct *work)
while (1) {
struct tty_buffer *head = buf->head;
struct tty_buffer *next;
- int count;
+ int count, rcvd;
/* Ldisc or user is trying to gain exclusive access */
if (atomic_read(&buf->priority))
@@ -519,10 +558,12 @@ static void flush_to_ldisc(struct work_struct *work)
continue;
}
- count = receive_buf(port, head, count);
- if (!count)
+ rcvd = receive_buf(port, head, count);
+ head->read += rcvd;
+ if (rcvd < count)
+ lookahead_bufs(port, head);
+ if (!rcvd)
break;
- head->read += count;
if (need_resched())
cond_resched();
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8fec1d8648f5..82a8855981f7 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1663,7 +1663,7 @@ void tty_kclose(struct tty_struct *tty)
*/
tty_ldisc_release(tty);
- /* Wait for pending work before tty destruction commmences */
+ /* Wait for pending work before tty destruction commences */
tty_flush_works(tty);
tty_debug_hangup(tty, "freeing structure\n");
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index adae687f654b..2a76b330e108 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -319,6 +319,8 @@ unsigned char tty_get_frame_size(unsigned int cflag)
bits++;
if (cflag & PARENB)
bits++;
+ if (cflag & ADDRB)
+ bits++;
return bits;
}
@@ -353,6 +355,8 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
old_termios = tty->termios;
tty->termios = *new_termios;
unset_locked_termios(tty, &old_termios);
+ /* Reset any ADDRB changes, ADDRB is changed through ->rs485_config() */
+ tty->termios.c_cflag ^= (tty->termios.c_cflag ^ old_termios.c_cflag) & ADDRB;
if (tty->ops->set_termios)
tty->ops->set_termios(tty, &old_termios);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 880608a65773..dce08a6d7b5e 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -43,6 +43,26 @@ static int tty_port_default_receive_buf(struct tty_port *port,
return ret;
}
+static void tty_port_default_lookahead_buf(struct tty_port *port, const unsigned char *p,
+ const unsigned char *f, unsigned int count)
+{
+ struct tty_struct *tty;
+ struct tty_ldisc *disc;
+
+ tty = READ_ONCE(port->itty);
+ if (!tty)
+ return;
+
+ disc = tty_ldisc_ref(tty);
+ if (!disc)
+ return;
+
+ if (disc->ops->lookahead_buf)
+ disc->ops->lookahead_buf(disc->tty, p, f, count);
+
+ tty_ldisc_deref(disc);
+}
+
static void tty_port_default_wakeup(struct tty_port *port)
{
struct tty_struct *tty = tty_port_tty_get(port);
@@ -55,6 +75,7 @@ static void tty_port_default_wakeup(struct tty_port *port)
const struct tty_port_client_operations tty_port_default_client_ops = {
.receive_buf = tty_port_default_receive_buf,
+ .lookahead_buf = tty_port_default_lookahead_buf,
.write_wakeup = tty_port_default_wakeup,
};
EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index fe30ce512819..b3dfe9d5717e 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -30,6 +30,6 @@ $(obj)/defkeymap.o: $(obj)/defkeymap.c
ifdef GENERATE_KEYMAP
$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
- loadkeys --mktable $< > $@
+ loadkeys --mktable --unicode $< > $@
endif
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index d815ac98b39e..f02d21e2a96e 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -23,6 +23,8 @@
* stack overflow.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kd.h>
#include <linux/errno.h>
@@ -36,9 +38,9 @@
#include <linux/vt_kern.h>
#include <linux/string.h>
-static unsigned short translations[][256] = {
+static unsigned short translations[][E_TABSZ] = {
/* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
- {
+ [LAT1_MAP] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
@@ -71,9 +73,9 @@ static unsigned short translations[][256] = {
0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
- },
+ },
/* VT100 graphics mapped to Unicode */
- {
+ [GRAF_MAP] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
@@ -108,8 +110,8 @@ static unsigned short translations[][256] = {
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
},
/* IBM Codepage 437 mapped to Unicode */
- {
- 0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
+ [IBMPC_MAP] = {
+ 0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
0x25d8, 0x25cb, 0x25d9, 0x2642, 0x2640, 0x266a, 0x266b, 0x263c,
0x25b6, 0x25c0, 0x2195, 0x203c, 0x00b6, 0x00a7, 0x25ac, 0x21a8,
0x2191, 0x2193, 0x2192, 0x2190, 0x221f, 0x2194, 0x25b2, 0x25bc,
@@ -141,9 +143,9 @@ static unsigned short translations[][256] = {
0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229,
0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0
- },
+ },
/* User mapping -- default to codes for direct font mapping */
- {
+ [USER_MAP] = {
0xf000, 0xf001, 0xf002, 0xf003, 0xf004, 0xf005, 0xf006, 0xf007,
0xf008, 0xf009, 0xf00a, 0xf00b, 0xf00c, 0xf00d, 0xf00e, 0xf00f,
0xf010, 0xf011, 0xf012, 0xf013, 0xf014, 0xf015, 0xf016, 0xf017,
@@ -184,78 +186,105 @@ static unsigned short translations[][256] = {
#define MAX_GLYPH 512 /* Max possible glyph value */
-static int inv_translate[MAX_NR_CONSOLES];
+static enum translation_map inv_translate[MAX_NR_CONSOLES];
+
+#define UNI_DIRS 32U
+#define UNI_DIR_ROWS 32U
+#define UNI_ROW_GLYPHS 64U
+
+#define UNI_DIR_BITS GENMASK(15, 11)
+#define UNI_ROW_BITS GENMASK(10, 6)
+#define UNI_GLYPH_BITS GENMASK( 5, 0)
-struct uni_pagedir {
- u16 **uni_pgdir[32];
+#define UNI_DIR(uni) FIELD_GET(UNI_DIR_BITS, (uni))
+#define UNI_ROW(uni) FIELD_GET(UNI_ROW_BITS, (uni))
+#define UNI_GLYPH(uni) FIELD_GET(UNI_GLYPH_BITS, (uni))
+
+#define UNI(dir, row, glyph) (FIELD_PREP(UNI_DIR_BITS, (dir)) | \
+ FIELD_PREP(UNI_ROW_BITS, (row)) | \
+ FIELD_PREP(UNI_GLYPH_BITS, (glyph)))
+
+/**
+ * struct uni_pagedict -- unicode directory
+ *
+ * @uni_pgdir: 32*32*64 table with glyphs
+ * @refcount: reference count of this structure
+ * @sum: checksum
+ * @inverse_translations: best-effort inverse mapping
+ * @inverse_trans_unicode: best-effort inverse mapping to unicode
+ */
+struct uni_pagedict {
+ u16 **uni_pgdir[UNI_DIRS];
unsigned long refcount;
unsigned long sum;
- unsigned char *inverse_translations[4];
+ unsigned char *inverse_translations[LAST_MAP + 1];
u16 *inverse_trans_unicode;
};
-static struct uni_pagedir *dflt;
+static struct uni_pagedict *dflt;
-static void set_inverse_transl(struct vc_data *conp, struct uni_pagedir *p, int i)
+static void set_inverse_transl(struct vc_data *conp, struct uni_pagedict *dict,
+ enum translation_map m)
{
- int j, glyph;
- unsigned short *t = translations[i];
- unsigned char *q;
-
- if (!p) return;
- q = p->inverse_translations[i];
-
- if (!q) {
- q = p->inverse_translations[i] = kmalloc(MAX_GLYPH, GFP_KERNEL);
- if (!q) return;
+ unsigned short *t = translations[m];
+ unsigned char *inv;
+
+ if (!dict)
+ return;
+ inv = dict->inverse_translations[m];
+
+ if (!inv) {
+ inv = dict->inverse_translations[m] = kmalloc(MAX_GLYPH,
+ GFP_KERNEL);
+ if (!inv)
+ return;
}
- memset(q, 0, MAX_GLYPH);
+ memset(inv, 0, MAX_GLYPH);
- for (j = 0; j < E_TABSZ; j++) {
- glyph = conv_uni_to_pc(conp, t[j]);
- if (glyph >= 0 && glyph < MAX_GLYPH && q[glyph] < 32) {
+ for (unsigned int ch = 0; ch < ARRAY_SIZE(translations[m]); ch++) {
+ int glyph = conv_uni_to_pc(conp, t[ch]);
+ if (glyph >= 0 && glyph < MAX_GLYPH && inv[glyph] < 32) {
/* prefer '-' above SHY etc. */
- q[glyph] = j;
+ inv[glyph] = ch;
}
}
}
-static void set_inverse_trans_unicode(struct vc_data *conp,
- struct uni_pagedir *p)
+static void set_inverse_trans_unicode(struct uni_pagedict *dict)
{
- int i, j, k, glyph;
- u16 **p1, *p2;
- u16 *q;
-
- if (!p) return;
- q = p->inverse_trans_unicode;
- if (!q) {
- q = p->inverse_trans_unicode =
- kmalloc_array(MAX_GLYPH, sizeof(u16), GFP_KERNEL);
- if (!q)
+ unsigned int d, r, g;
+ u16 *inv;
+
+ if (!dict)
+ return;
+
+ inv = dict->inverse_trans_unicode;
+ if (!inv) {
+ inv = dict->inverse_trans_unicode = kmalloc_array(MAX_GLYPH,
+ sizeof(*inv), GFP_KERNEL);
+ if (!inv)
return;
}
- memset(q, 0, MAX_GLYPH * sizeof(u16));
+ memset(inv, 0, MAX_GLYPH * sizeof(*inv));
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (!p1)
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (!dir)
continue;
- for (j = 0; j < 32; j++) {
- p2 = p1[j];
- if (!p2)
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row)
continue;
- for (k = 0; k < 64; k++) {
- glyph = p2[k];
- if (glyph >= 0 && glyph < MAX_GLYPH
- && q[glyph] < 32)
- q[glyph] = (i << 11) + (j << 6) + k;
+ for (g = 0; g < UNI_ROW_GLYPHS; g++) {
+ u16 glyph = row[g];
+ if (glyph < MAX_GLYPH && inv[glyph] < 32)
+ inv[glyph] = UNI(d, r, g);
}
}
}
}
-unsigned short *set_translate(int m, struct vc_data *vc)
+unsigned short *set_translate(enum translation_map m, struct vc_data *vc)
{
inv_translate[vc->vc_num] = m;
return translations[m];
@@ -268,44 +297,45 @@ unsigned short *set_translate(int m, struct vc_data *vc)
* was active.
* Still, it is now possible to a certain extent to cut and paste non-ASCII.
*/
-u16 inverse_translate(const struct vc_data *conp, int glyph, int use_unicode)
+u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode)
{
- struct uni_pagedir *p;
- int m;
- if (glyph < 0 || glyph >= MAX_GLYPH)
+ struct uni_pagedict *p;
+ enum translation_map m;
+
+ if (glyph >= MAX_GLYPH)
return 0;
- else {
- p = *conp->vc_uni_pagedir_loc;
- if (!p)
+
+ p = *conp->uni_pagedict_loc;
+ if (!p)
+ return glyph;
+
+ if (use_unicode) {
+ if (!p->inverse_trans_unicode)
return glyph;
- else if (use_unicode) {
- if (!p->inverse_trans_unicode)
- return glyph;
- else
- return p->inverse_trans_unicode[glyph];
- } else {
- m = inv_translate[conp->vc_num];
- if (!p->inverse_translations[m])
- return glyph;
- else
- return p->inverse_translations[m][glyph];
- }
+
+ return p->inverse_trans_unicode[glyph];
}
+
+ m = inv_translate[conp->vc_num];
+ if (!p->inverse_translations[m])
+ return glyph;
+
+ return p->inverse_translations[m][glyph];
}
EXPORT_SYMBOL_GPL(inverse_translate);
static void update_user_maps(void)
{
int i;
- struct uni_pagedir *p, *q = NULL;
-
+ struct uni_pagedict *p, *q = NULL;
+
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons_allocated(i))
continue;
- p = *vc_cons[i].d->vc_uni_pagedir_loc;
+ p = *vc_cons[i].d->uni_pagedict_loc;
if (p && p != q) {
set_inverse_transl(vc_cons[i].d, p, USER_MAP);
- set_inverse_trans_unicode(vc_cons[i].d, p);
+ set_inverse_trans_unicode(p);
q = p;
}
}
@@ -321,15 +351,15 @@ static void update_user_maps(void)
*/
int con_set_trans_old(unsigned char __user * arg)
{
- int i;
unsigned short inbuf[E_TABSZ];
- unsigned char ubuf[E_TABSZ];
-
- if (copy_from_user(ubuf, arg, E_TABSZ))
- return -EFAULT;
+ unsigned int i;
+ unsigned char ch;
- for (i = 0; i < E_TABSZ ; i++)
- inbuf[i] = UNI_DIRECT_BASE | ubuf[i];
+ for (i = 0; i < ARRAY_SIZE(inbuf); i++) {
+ if (get_user(ch, &arg[i]))
+ return -EFAULT;
+ inbuf[i] = UNI_DIRECT_BASE | ch;
+ }
console_lock();
memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
@@ -345,7 +375,7 @@ int con_get_trans_old(unsigned char __user * arg)
unsigned char outbuf[E_TABSZ];
console_lock();
- for (i = 0; i < E_TABSZ ; i++)
+ for (i = 0; i < ARRAY_SIZE(outbuf); i++)
{
ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
outbuf[i] = (ch & ~0xff) ? 0 : ch;
@@ -381,7 +411,7 @@ int con_get_trans_new(ushort __user * arg)
}
/*
- * Unicode -> current font conversion
+ * Unicode -> current font conversion
*
* A font has at most 512 chars, usually 256.
* But one font position may represent several Unicode chars.
@@ -393,78 +423,82 @@ int con_get_trans_new(ushort __user * arg)
extern u8 dfont_unicount[]; /* Defined in console_defmap.c */
extern u16 dfont_unitable[];
-static void con_release_unimap(struct uni_pagedir *p)
+static void con_release_unimap(struct uni_pagedict *dict)
{
- u16 **p1;
- int i, j;
-
- if (p == dflt) dflt = NULL;
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1 != NULL) {
- for (j = 0; j < 32; j++)
- kfree(p1[j]);
- kfree(p1);
+ unsigned int d, r;
+
+ if (dict == dflt)
+ dflt = NULL;
+
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (dir != NULL) {
+ for (r = 0; r < UNI_DIR_ROWS; r++)
+ kfree(dir[r]);
+ kfree(dir);
}
- p->uni_pgdir[i] = NULL;
+ dict->uni_pgdir[d] = NULL;
}
- for (i = 0; i < 4; i++) {
- kfree(p->inverse_translations[i]);
- p->inverse_translations[i] = NULL;
+
+ for (r = 0; r < ARRAY_SIZE(dict->inverse_translations); r++) {
+ kfree(dict->inverse_translations[r]);
+ dict->inverse_translations[r] = NULL;
}
- kfree(p->inverse_trans_unicode);
- p->inverse_trans_unicode = NULL;
+
+ kfree(dict->inverse_trans_unicode);
+ dict->inverse_trans_unicode = NULL;
}
/* Caller must hold the console lock */
void con_free_unimap(struct vc_data *vc)
{
- struct uni_pagedir *p;
+ struct uni_pagedict *p;
- p = *vc->vc_uni_pagedir_loc;
+ p = *vc->uni_pagedict_loc;
if (!p)
return;
- *vc->vc_uni_pagedir_loc = NULL;
+ *vc->uni_pagedict_loc = NULL;
if (--p->refcount)
return;
con_release_unimap(p);
kfree(p);
}
-
-static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
+
+static int con_unify_unimap(struct vc_data *conp, struct uni_pagedict *dict1)
{
- int i, j, k;
- struct uni_pagedir *q;
-
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
- if (!vc_cons_allocated(i))
+ struct uni_pagedict *dict2;
+ unsigned int cons, d, r;
+
+ for (cons = 0; cons < MAX_NR_CONSOLES; cons++) {
+ if (!vc_cons_allocated(cons))
continue;
- q = *vc_cons[i].d->vc_uni_pagedir_loc;
- if (!q || q == p || q->sum != p->sum)
+ dict2 = *vc_cons[cons].d->uni_pagedict_loc;
+ if (!dict2 || dict2 == dict1 || dict2->sum != dict1->sum)
continue;
- for (j = 0; j < 32; j++) {
- u16 **p1, **q1;
- p1 = p->uni_pgdir[j]; q1 = q->uni_pgdir[j];
- if (!p1 && !q1)
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir1 = dict1->uni_pgdir[d];
+ u16 **dir2 = dict2->uni_pgdir[d];
+ if (!dir1 && !dir2)
continue;
- if (!p1 || !q1)
+ if (!dir1 || !dir2)
break;
- for (k = 0; k < 32; k++) {
- if (!p1[k] && !q1[k])
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ if (!dir1[r] && !dir2[r])
continue;
- if (!p1[k] || !q1[k])
+ if (!dir1[r] || !dir2[r])
break;
- if (memcmp(p1[k], q1[k], 64*sizeof(u16)))
+ if (memcmp(dir1[r], dir2[r], UNI_ROW_GLYPHS *
+ sizeof(*dir1[r])))
break;
}
- if (k < 32)
+ if (r < UNI_DIR_ROWS)
break;
}
- if (j == 32) {
- q->refcount++;
- *conp->vc_uni_pagedir_loc = q;
- con_release_unimap(p);
- kfree(p);
+ if (d == UNI_DIRS) {
+ dict2->refcount++;
+ *conp->uni_pagedict_loc = dict2;
+ con_release_unimap(dict1);
+ kfree(dict1);
return 1;
}
}
@@ -472,55 +506,66 @@ static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
}
static int
-con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
+con_insert_unipair(struct uni_pagedict *p, u_short unicode, u_short fontpos)
{
- int i, n;
- u16 **p1, *p2;
-
- p1 = p->uni_pgdir[n = unicode >> 11];
- if (!p1) {
- p1 = p->uni_pgdir[n] = kmalloc_array(32, sizeof(u16 *),
- GFP_KERNEL);
- if (!p1) return -ENOMEM;
- for (i = 0; i < 32; i++)
- p1[i] = NULL;
+ u16 **dir, *row;
+ unsigned int n;
+
+ n = UNI_DIR(unicode);
+ dir = p->uni_pgdir[n];
+ if (!dir) {
+ dir = p->uni_pgdir[n] = kcalloc(UNI_DIR_ROWS, sizeof(*dir),
+ GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
}
- p2 = p1[n = (unicode >> 6) & 0x1f];
- if (!p2) {
- p2 = p1[n] = kmalloc_array(64, sizeof(u16), GFP_KERNEL);
- if (!p2) return -ENOMEM;
- memset(p2, 0xff, 64*sizeof(u16)); /* No glyphs for the characters (yet) */
+ n = UNI_ROW(unicode);
+ row = dir[n];
+ if (!row) {
+ row = dir[n] = kmalloc_array(UNI_ROW_GLYPHS, sizeof(*row),
+ GFP_KERNEL);
+ if (!row)
+ return -ENOMEM;
+ /* No glyphs for the characters (yet) */
+ memset(row, 0xff, UNI_ROW_GLYPHS * sizeof(*row));
}
- p2[unicode & 0x3f] = fontpos;
-
+ row[UNI_GLYPH(unicode)] = fontpos;
+
p->sum += (fontpos << 20U) + unicode;
return 0;
}
+static int con_allocate_new(struct vc_data *vc)
+{
+ struct uni_pagedict *new, *old = *vc->uni_pagedict_loc;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ new->refcount = 1;
+ *vc->uni_pagedict_loc = new;
+
+ if (old)
+ old->refcount--;
+
+ return 0;
+}
+
/* Caller must hold the lock */
static int con_do_clear_unimap(struct vc_data *vc)
{
- struct uni_pagedir *p, *q;
-
- p = *vc->vc_uni_pagedir_loc;
- if (!p || --p->refcount) {
- q = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!q) {
- if (p)
- p->refcount++;
- return -ENOMEM;
- }
- q->refcount=1;
- *vc->vc_uni_pagedir_loc = q;
- } else {
- if (p == dflt) dflt = NULL;
- p->refcount++;
- p->sum = 0;
- con_release_unimap(p);
- }
+ struct uni_pagedict *old = *vc->uni_pagedict_loc;
+
+ if (!old || old->refcount > 1)
+ return con_allocate_new(vc);
+
+ old->sum = 0;
+ con_release_unimap(old);
+
return 0;
}
@@ -532,91 +577,93 @@ int con_clear_unimap(struct vc_data *vc)
console_unlock();
return ret;
}
-
+
+static struct uni_pagedict *con_unshare_unimap(struct vc_data *vc,
+ struct uni_pagedict *old)
+{
+ struct uni_pagedict *new;
+ unsigned int d, r, g;
+ int ret;
+ u16 uni = 0;
+
+ ret = con_allocate_new(vc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ new = *vc->uni_pagedict_loc;
+
+ /*
+ * uni_pgdir is a 32*32*64 table with rows allocated when its first
+ * entry is added. The unicode value must still be incremented for
+ * empty rows. We are copying entries from "old" to "new".
+ */
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = old->uni_pgdir[d];
+ if (!dir) {
+ /* Account for empty table */
+ uni += UNI_DIR_ROWS * UNI_ROW_GLYPHS;
+ continue;
+ }
+
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row) {
+ /* Account for row of 64 empty entries */
+ uni += UNI_ROW_GLYPHS;
+ continue;
+ }
+
+ for (g = 0; g < UNI_ROW_GLYPHS; g++, uni++) {
+ if (row[g] == 0xffff)
+ continue;
+ /*
+ * Found one, copy entry for unicode uni with
+ * fontpos value row[g].
+ */
+ ret = con_insert_unipair(new, uni, row[g]);
+ if (ret) {
+ old->refcount++;
+ *vc->uni_pagedict_loc = old;
+ con_release_unimap(new);
+ kfree(new);
+ return ERR_PTR(ret);
+ }
+ }
+ }
+ }
+
+ return new;
+}
+
int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
{
- int err = 0, err1, i;
- struct uni_pagedir *p, *q;
+ int err = 0, err1;
+ struct uni_pagedict *dict;
struct unipair *unilist, *plist;
if (!ct)
return 0;
- unilist = vmemdup_user(list, array_size(sizeof(struct unipair), ct));
+ unilist = vmemdup_user(list, array_size(sizeof(*unilist), ct));
if (IS_ERR(unilist))
return PTR_ERR(unilist);
console_lock();
/* Save original vc_unipagdir_loc in case we allocate a new one */
- p = *vc->vc_uni_pagedir_loc;
-
- if (!p) {
+ dict = *vc->uni_pagedict_loc;
+ if (!dict) {
err = -EINVAL;
-
goto out_unlock;
}
-
- if (p->refcount > 1) {
- int j, k;
- u16 **p1, *p2, l;
-
- err1 = con_do_clear_unimap(vc);
- if (err1) {
- err = err1;
+
+ if (dict->refcount > 1) {
+ dict = con_unshare_unimap(vc, dict);
+ if (IS_ERR(dict)) {
+ err = PTR_ERR(dict);
goto out_unlock;
}
-
- /*
- * Since refcount was > 1, con_clear_unimap() allocated a
- * a new uni_pagedir for this vc. Re: p != q
- */
- q = *vc->vc_uni_pagedir_loc;
-
- /*
- * uni_pgdir is a 32*32*64 table with rows allocated
- * when its first entry is added. The unicode value must
- * still be incremented for empty rows. We are copying
- * entries from "p" (old) to "q" (new).
- */
- l = 0; /* unicode value */
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1)
- for (j = 0; j < 32; j++) {
- p2 = p1[j];
- if (p2) {
- for (k = 0; k < 64; k++, l++)
- if (p2[k] != 0xffff) {
- /*
- * Found one, copy entry for unicode
- * l with fontpos value p2[k].
- */
- err1 = con_insert_unipair(q, l, p2[k]);
- if (err1) {
- p->refcount++;
- *vc->vc_uni_pagedir_loc = p;
- con_release_unimap(q);
- kfree(q);
- err = err1;
- goto out_unlock;
- }
- }
- } else {
- /* Account for row of 64 empty entries */
- l += 64;
- }
- }
- else
- /* Account for empty table */
- l += 32 * 64;
- }
-
- /*
- * Finished copying font table, set vc_uni_pagedir to new table
- */
- p = q;
- } else if (p == dflt) {
+ } else if (dict == dflt) {
dflt = NULL;
}
@@ -624,20 +671,20 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
* Insert user specified unicode pairs into new table.
*/
for (plist = unilist; ct; ct--, plist++) {
- err1 = con_insert_unipair(p, plist->unicode, plist->fontpos);
+ err1 = con_insert_unipair(dict, plist->unicode, plist->fontpos);
if (err1)
err = err1;
}
-
+
/*
* Merge with fontmaps of any other virtual consoles.
*/
- if (con_unify_unimap(vc, p))
+ if (con_unify_unimap(vc, dict))
goto out_unlock;
- for (i = 0; i <= 3; i++)
- set_inverse_transl(vc, p, i); /* Update inverse translations */
- set_inverse_trans_unicode(vc, p);
+ for (enum translation_map m = FIRST_MAP; m <= LAST_MAP; m++)
+ set_inverse_transl(vc, dict, m);
+ set_inverse_trans_unicode(dict);
out_unlock:
console_unlock();
@@ -652,55 +699,56 @@ out_unlock:
* Loads the unimap for the hardware font, as defined in uni_hash.tbl.
* The representation used was the most compact I could come up
* with. This routine is executed at video setup, and when the
- * PIO_FONTRESET ioctl is called.
+ * PIO_FONTRESET ioctl is called.
*
* The caller must hold the console lock
*/
int con_set_default_unimap(struct vc_data *vc)
{
- int i, j, err = 0, err1;
- u16 *q;
- struct uni_pagedir *p;
+ struct uni_pagedict *dict;
+ unsigned int fontpos, count;
+ int err = 0, err1;
+ u16 *dfont;
if (dflt) {
- p = *vc->vc_uni_pagedir_loc;
- if (p == dflt)
+ dict = *vc->uni_pagedict_loc;
+ if (dict == dflt)
return 0;
dflt->refcount++;
- *vc->vc_uni_pagedir_loc = dflt;
- if (p && !--p->refcount) {
- con_release_unimap(p);
- kfree(p);
+ *vc->uni_pagedict_loc = dflt;
+ if (dict && !--dict->refcount) {
+ con_release_unimap(dict);
+ kfree(dict);
}
return 0;
}
-
+
/* The default font is always 256 characters */
err = con_do_clear_unimap(vc);
if (err)
return err;
-
- p = *vc->vc_uni_pagedir_loc;
- q = dfont_unitable;
-
- for (i = 0; i < 256; i++)
- for (j = dfont_unicount[i]; j; j--) {
- err1 = con_insert_unipair(p, *(q++), i);
+
+ dict = *vc->uni_pagedict_loc;
+ dfont = dfont_unitable;
+
+ for (fontpos = 0; fontpos < 256U; fontpos++)
+ for (count = dfont_unicount[fontpos]; count; count--) {
+ err1 = con_insert_unipair(dict, *(dfont++), fontpos);
if (err1)
err = err1;
}
-
- if (con_unify_unimap(vc, p)) {
- dflt = *vc->vc_uni_pagedir_loc;
+
+ if (con_unify_unimap(vc, dict)) {
+ dflt = *vc->uni_pagedict_loc;
return err;
}
- for (i = 0; i <= 3; i++)
- set_inverse_transl(vc, p, i); /* Update all inverse translations */
- set_inverse_trans_unicode(vc, p);
- dflt = p;
+ for (enum translation_map m = FIRST_MAP; m <= LAST_MAP; m++)
+ set_inverse_transl(vc, dict, m);
+ set_inverse_trans_unicode(dict);
+ dflt = dict;
return err;
}
EXPORT_SYMBOL(con_set_default_unimap);
@@ -714,16 +762,16 @@ EXPORT_SYMBOL(con_set_default_unimap);
*/
int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc)
{
- struct uni_pagedir *q;
+ struct uni_pagedict *src;
- if (!*src_vc->vc_uni_pagedir_loc)
+ if (!*src_vc->uni_pagedict_loc)
return -EINVAL;
- if (*dst_vc->vc_uni_pagedir_loc == *src_vc->vc_uni_pagedir_loc)
+ if (*dst_vc->uni_pagedict_loc == *src_vc->uni_pagedict_loc)
return 0;
con_free_unimap(dst_vc);
- q = *src_vc->vc_uni_pagedir_loc;
- q->refcount++;
- *dst_vc->vc_uni_pagedir_loc = q;
+ src = *src_vc->uni_pagedict_loc;
+ src->refcount++;
+ *dst_vc->uni_pagedict_loc = src;
return 0;
}
EXPORT_SYMBOL(con_copy_unimap);
@@ -734,46 +782,53 @@ EXPORT_SYMBOL(con_copy_unimap);
* Read the console unicode data for this console. Called from the ioctl
* handlers.
*/
-int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
+int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct,
+ struct unipair __user *list)
{
- int i, j, k, ret = 0;
ushort ect;
- u16 **p1, *p2;
- struct uni_pagedir *p;
+ struct uni_pagedict *dict;
struct unipair *unilist;
+ unsigned int d, r, g;
+ int ret = 0;
- unilist = kvmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+ unilist = kvmalloc_array(ct, sizeof(*unilist), GFP_KERNEL);
if (!unilist)
return -ENOMEM;
console_lock();
ect = 0;
- if (*vc->vc_uni_pagedir_loc) {
- p = *vc->vc_uni_pagedir_loc;
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1)
- for (j = 0; j < 32; j++) {
- p2 = *(p1++);
- if (p2)
- for (k = 0; k < 64; k++, p2++) {
- if (*p2 >= MAX_GLYPH)
- continue;
- if (ect < ct) {
- unilist[ect].unicode =
- (i<<11)+(j<<6)+k;
- unilist[ect].fontpos = *p2;
- }
- ect++;
+ dict = *vc->uni_pagedict_loc;
+ if (!dict)
+ goto unlock;
+
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (!dir)
+ continue;
+
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row)
+ continue;
+
+ for (g = 0; g < UNI_ROW_GLYPHS; g++, row++) {
+ if (*row >= MAX_GLYPH)
+ continue;
+ if (ect < ct) {
+ unilist[ect].unicode = UNI(d, r, g);
+ unilist[ect].fontpos = *row;
}
+ ect++;
}
}
}
+unlock:
console_unlock();
- if (copy_to_user(list, unilist, min(ect, ct) * sizeof(struct unipair)))
+ if (copy_to_user(list, unilist, min(ect, ct) * sizeof(*unilist)))
+ ret = -EFAULT;
+ if (put_user(ect, uct))
ret = -EFAULT;
- put_user(ect, uct);
kvfree(unilist);
return ret ? ret : (ect <= ct) ? 0 : -ENOMEM;
}
@@ -798,20 +853,18 @@ u32 conv_8bit_to_uni(unsigned char c)
int conv_uni_to_8bit(u32 uni)
{
int c;
- for (c = 0; c < 0x100; c++)
+ for (c = 0; c < ARRAY_SIZE(translations[USER_MAP]); c++)
if (translations[USER_MAP][c] == uni ||
(translations[USER_MAP][c] == (c | 0xf000) && uni == c))
return c;
return -1;
}
-int
-conv_uni_to_pc(struct vc_data *conp, long ucs)
+int conv_uni_to_pc(struct vc_data *conp, long ucs)
{
- int h;
- u16 **p1, *p2;
- struct uni_pagedir *p;
-
+ struct uni_pagedict *dict;
+ u16 **dir, *row, glyph;
+
/* Only 16-bit codes supported at this time */
if (ucs > 0xffff)
return -4; /* Not found */
@@ -826,17 +879,24 @@ conv_uni_to_pc(struct vc_data *conp, long ucs)
*/
else if ((ucs & ~UNI_DIRECT_MASK) == UNI_DIRECT_BASE)
return ucs & UNI_DIRECT_MASK;
-
- if (!*conp->vc_uni_pagedir_loc)
+
+ dict = *conp->uni_pagedict_loc;
+ if (!dict)
return -3;
- p = *conp->vc_uni_pagedir_loc;
- if ((p1 = p->uni_pgdir[ucs >> 11]) &&
- (p2 = p1[(ucs >> 6) & 0x1f]) &&
- (h = p2[ucs & 0x3f]) < MAX_GLYPH)
- return h;
+ dir = dict->uni_pgdir[UNI_DIR(ucs)];
+ if (!dir)
+ return -4;
- return -4; /* not found */
+ row = dir[UNI_ROW(ucs)];
+ if (!row)
+ return -4;
+
+ glyph = row[UNI_GLYPH(ucs)];
+ if (glyph >= MAX_GLYPH)
+ return -4;
+
+ return glyph;
}
/*
@@ -844,13 +904,13 @@ conv_uni_to_pc(struct vc_data *conp, long ucs)
* initialized. It must be possible to call kmalloc(..., GFP_KERNEL)
* from this function, hence the call from sys_setup.
*/
-void __init
+void __init
console_map_init(void)
{
int i;
-
+
for (i = 0; i < MAX_NR_CONSOLES; i++)
- if (vc_cons_allocated(i) && !*vc_cons[i].d->vc_uni_pagedir_loc)
+ if (vc_cons_allocated(i) && !*vc_cons[i].d->uni_pagedict_loc)
con_set_default_unimap(vc_cons[i].d);
}
diff --git a/drivers/tty/vt/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
index 094d95bf0005..0c043e4f292e 100644
--- a/drivers/tty/vt/defkeymap.c_shipped
+++ b/drivers/tty/vt/defkeymap.c_shipped
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-/* Do not edit this file! It was automatically generated by */
-/* loadkeys --mktable defkeymap.map > defkeymap.c */
+/* Do not edit this file! It was automatically generated by */
+/* loadkeys --mktable --unicode defkeymap.map > defkeymap.c */
#include <linux/types.h>
#include <linux/keyboard.h>
@@ -139,7 +139,7 @@ static unsigned short ctrl_alt_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-ushort *key_maps[MAX_NR_KEYMAPS] = {
+unsigned short *key_maps[MAX_NR_KEYMAPS] = {
plain_map, shift_map, altgr_map, NULL,
ctrl_map, shift_ctrl_map, NULL, NULL,
alt_map, NULL, NULL, NULL,
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index f7755e73696e..6ef22f01cc51 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -68,7 +68,8 @@ sel_pos(int n, bool unicode)
{
if (unicode)
return screen_glyph_unicode(vc_sel.cons, n / 2);
- return inverse_translate(vc_sel.cons, screen_glyph(vc_sel.cons, n), 0);
+ return inverse_translate(vc_sel.cons, screen_glyph(vc_sel.cons, n),
+ false);
}
/**
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index dfc1f4b445f3..ae9c926acd6f 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -344,7 +344,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
/* allocate everything in one go */
memsize = cols * rows * sizeof(char32_t);
memsize += rows * sizeof(char32_t *);
- p = vmalloc(memsize);
+ p = vzalloc(memsize);
if (!p)
return NULL;
@@ -1063,10 +1063,10 @@ static void visual_init(struct vc_data *vc, int num, int init)
__module_get(vc->vc_sw->owner);
vc->vc_num = num;
vc->vc_display_fg = &master_display_fg;
- if (vc->vc_uni_pagedir_loc)
+ if (vc->uni_pagedict_loc)
con_free_unimap(vc);
- vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
- vc->vc_uni_pagedir = NULL;
+ vc->uni_pagedict_loc = &vc->uni_pagedict;
+ vc->uni_pagedict = NULL;
vc->vc_hi_font_mask = 0;
vc->vc_complement_mask = 0;
vc->vc_can_do_color = 0;
@@ -1136,7 +1136,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
visual_init(vc, currcons, 1);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_set_default_unimap(vc);
err = -EINVAL;
@@ -3939,7 +3939,7 @@ static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
bind = con_is_bound(con->con);
console_unlock();
- return snprintf(buf, PAGE_SIZE, "%i\n", bind);
+ return sysfs_emit(buf, "%i\n", bind);
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr,
@@ -3947,7 +3947,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr,
{
struct con_driver *con = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s %s\n",
+ return sysfs_emit(buf, "%s %s\n",
(con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
con->desc);
@@ -4741,7 +4741,7 @@ u32 screen_glyph_unicode(const struct vc_data *vc, int n)
if (uniscr)
return uniscr->lines[n / vc->vc_cols][n % vc->vc_cols];
- return inverse_translate(vc, screen_glyph(vc, n * 2), 1);
+ return inverse_translate(vc, screen_glyph(vc, n * 2), true);
}
EXPORT_SYMBOL_GPL(screen_glyph_unicode);
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index ffb01fc6de75..8f67db202d7b 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -215,7 +215,7 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
hba->vops->config_scaling_param(hba, p, data);
}
-extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
+extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
* ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
@@ -234,8 +234,8 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
int ufshcd_write_ee_control(struct ufs_hba *hba);
-int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
- u16 set, u16 clr);
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
+ const u16 *other_mask, u16 set, u16 clr);
static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
u16 set, u16 clr)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 2b40174e93ac..a202d7d5240d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -64,9 +64,6 @@
/* maximum number of link-startup retries */
#define DME_LINKSTARTUP_RETRIES 3
-/* Maximum retries for Hibern8 enter */
-#define UIC_HIBERN8_ENTER_RETRIES 3
-
/* maximum number of reset retries before giving up */
#define MAX_HOST_RESET_RETRIES 5
@@ -175,7 +172,7 @@ enum {
#define ufshcd_clear_eh_in_progress(h) \
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
-struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
[UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
[UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
[UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
@@ -363,7 +360,7 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
}
static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
- struct uic_command *ucmd,
+ const struct uic_command *ucmd,
enum ufs_trace_str_t str_t)
{
u32 cmd;
@@ -443,11 +440,11 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
}
static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
- char *err_name)
+ const char *err_name)
{
int i;
bool found = false;
- struct ufs_event_hist *e;
+ const struct ufs_event_hist *e;
if (id >= UFS_EVT_CNT)
return;
@@ -497,7 +494,7 @@ static void ufshcd_print_evt_hist(struct ufs_hba *hba)
static
void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
{
- struct ufshcd_lrb *lrbp;
+ const struct ufshcd_lrb *lrbp;
int prdt_length;
int tag;
@@ -553,7 +550,7 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
- struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
+ const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
@@ -1109,7 +1106,7 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
*/
static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{
- struct scsi_device *sdev;
+ const struct scsi_device *sdev;
u32 pending = 0;
lockdep_assert_held(hba->host->host_lock);
@@ -2080,14 +2077,15 @@ static inline int ufshcd_monitor_opcode2dir(u8 opcode)
static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
- struct ufs_hba_monitor *m = &hba->monitor;
+ const struct ufs_hba_monitor *m = &hba->monitor;
return (m->enabled && lrbp && lrbp->cmd &&
(!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
}
-static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_start_monitor(struct ufs_hba *hba,
+ const struct ufshcd_lrb *lrbp)
{
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
unsigned long flags;
@@ -2098,14 +2096,14 @@ static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
-static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
{
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
- struct request *req = scsi_cmd_to_rq(lrbp->cmd);
+ const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
struct ufs_hba_monitor *m = &hba->monitor;
ktime_t now, inc, lat;
@@ -2227,6 +2225,8 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
int err;
hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
+ hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* nutrs and nutmrs are 0 based values */
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
@@ -3095,7 +3095,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
if (ret)
dev_err(hba->dev,
- "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+ "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retries\n",
__func__, opcode, idn, ret, retries);
return ret;
}
@@ -3263,7 +3263,7 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba,
if (ret)
dev_err(hba->dev,
- "%s: query attribute, idn %d, failed with error %d after %d retires\n",
+ "%s: query attribute, idn %d, failed with error %d after %d retries\n",
__func__, idn, ret, QUERY_REQ_RETRIES);
return ret;
}
@@ -3834,7 +3834,7 @@ int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
{
int ret;
- if (agreed_gear != UFS_HS_G4)
+ if (agreed_gear < UFS_HS_G4)
adapt_val = PA_NO_ADAPT;
ret = ufshcd_dme_set(hba,
@@ -4123,7 +4123,7 @@ out_unlock:
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
struct uic_command uic_cmd = {0};
int ret;
@@ -4148,6 +4148,7 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
int ufshcd_link_recovery(struct ufs_hba *hba)
{
@@ -4290,8 +4291,13 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
if (hba->max_pwr_info.is_valid)
return 0;
- pwr_info->pwr_tx = FAST_MODE;
- pwr_info->pwr_rx = FAST_MODE;
+ if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
+ pwr_info->pwr_tx = FASTAUTO_MODE;
+ pwr_info->pwr_rx = FASTAUTO_MODE;
+ } else {
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
+ }
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
@@ -4766,7 +4772,7 @@ link_startup:
* but we can't be sure if the link is up until link startup
* succeeds. So reset the local Uni-Pro and try again.
*/
- if (ret && ufshcd_hba_enable(hba)) {
+ if (ret && retries && ufshcd_hba_enable(hba)) {
ufshcd_update_evt_hist(hba,
UFS_EVT_LINK_STARTUP_FAIL,
(u32)ret);
@@ -4931,7 +4937,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba,
*
*/
static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
- struct scsi_device *sdev)
+ const struct scsi_device *sdev)
{
if (hba->dev_info.f_power_on_wp_en &&
!hba->dev_info.is_lu_power_on_wp) {
@@ -5450,8 +5456,8 @@ int ufshcd_write_ee_control(struct ufs_hba *hba)
return err;
}
-int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
- u16 set, u16 clr)
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
+ const u16 *other_mask, u16 set, u16 clr)
{
u16 new_mask, ee_ctrl_mask;
int err = 0;
@@ -7388,7 +7394,8 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
*
* Returns calculated max ICC level for specific regulator
*/
-static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
+static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
+ const char *buff)
{
int i;
int curr_uA;
@@ -7435,7 +7442,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
* Returns calculated ICC level
*/
static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
- u8 *desc_buf, int len)
+ const u8 *desc_buf, int len)
{
u32 icc_level = 0;
@@ -7585,7 +7592,7 @@ out:
return ret;
}
-static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
+static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u8 lun;
@@ -7656,7 +7663,7 @@ wb_disabled:
hba->caps &= ~UFSHCD_CAP_WB_EN;
}
-static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
+static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u32 ext_ufs_feature;
@@ -7890,7 +7897,7 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
u32 granularity, peer_granularity;
u32 pa_tactivate, peer_pa_tactivate;
u32 pa_tactivate_us, peer_pa_tactivate_us;
- u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+ static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
&granularity);
@@ -8007,7 +8014,7 @@ struct ufs_ref_clk {
enum ufs_ref_clk_freq val;
};
-static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
+static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
{38400000, REF_CLK_FREQ_38_4_MHZ},
@@ -8319,6 +8326,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
+ .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
@@ -8449,7 +8457,7 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
}
-static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
+int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
@@ -8465,6 +8473,7 @@ static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
static int ufshcd_init_vreg(struct ufs_hba *hba)
{
@@ -8558,6 +8567,19 @@ out:
return ret;
}
+static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
+{
+ u32 freq;
+ int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
+
+ if (ret) {
+ dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
+ return REF_CLK_FREQ_INVAL;
+ }
+
+ return ufs_get_bref_clk_from_hz(freq);
+}
+
static int ufshcd_init_clocks(struct ufs_hba *hba)
{
int ret = 0;
@@ -8651,6 +8673,9 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
if (err)
goto out_disable_hba_vreg;
+ if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
+ hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
+
err = ufshcd_setup_clocks(hba, true);
if (err)
goto out_disable_hba_vreg;
@@ -8716,6 +8741,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
struct scsi_device *sdp;
unsigned long flags;
int ret, retries;
+ unsigned long deadline;
+ int32_t remaining;
spin_lock_irqsave(hba->host->host_lock, flags);
sdp = hba->ufs_device_wlun;
@@ -8748,9 +8775,14 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
+ deadline = jiffies + 10 * HZ;
for (retries = 3; retries > 0; --retries) {
+ ret = -ETIMEDOUT;
+ remaining = deadline - jiffies;
+ if (remaining <= 0)
+ break;
ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ remaining / HZ, 0, 0, RQF_PM, NULL);
if (!scsi_status_is_check_condition(ret) ||
!scsi_sense_valid(&sshdr) ||
sshdr.sense_key != UNIT_ATTENTION)
@@ -9484,12 +9516,8 @@ EXPORT_SYMBOL(ufshcd_runtime_resume);
int ufshcd_shutdown(struct ufs_hba *hba)
{
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
- goto out;
+ ufshcd_suspend(hba);
- pm_runtime_get_sync(hba->dev);
-
- ufshcd_suspend(hba);
-out:
hba->is_powered = false;
/* allow force shutdown even in case of errors */
return 0;
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 82590224da13..4cc2dbd79ed0 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -92,6 +92,18 @@ config SCSI_UFS_HISI
Select this if you have UFS controller on Hisilicon chipset.
If unsure, say N.
+config SCSI_UFS_RENESAS
+ tristate "Renesas specific hooks to UFS controller platform driver"
+ depends on (ARCH_RENESAS || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
+ help
+ This selects the Renesas specific additions to UFSHCD platform driver.
+ UFS host on Renesas needs some vendor specific configuration before
+ accessing the hardware.
+
+ Select this if you have UFS controller on Renesas chipset.
+
+ If unsure, say N.
+
config SCSI_UFS_TI_J721E
tristate "TI glue layer for Cadence UFS Controller"
depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST)
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index e4be54273c98..7717ca93e7d5 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -11,4 +11,5 @@ obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
+obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index a81d8cbd542f..c3628a8645a5 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -52,11 +52,12 @@
#define HCI_ERR_EN_DME_LAYER 0x88
#define HCI_CLKSTOP_CTRL 0xB0
#define REFCLKOUT_STOP BIT(4)
+#define MPHY_APBCLK_STOP BIT(3)
#define REFCLK_STOP BIT(2)
#define UNIPRO_MCLK_STOP BIT(1)
#define UNIPRO_PCLK_STOP BIT(0)
#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\
- UNIPRO_MCLK_STOP |\
+ UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\
UNIPRO_PCLK_STOP)
#define HCI_MISC 0xB4
#define REFCLK_CTRL_EN BIT(7)
@@ -135,15 +136,9 @@ enum {
/*
* UNIPRO registers
*/
-#define UNIPRO_COMP_VERSION 0x000
-#define UNIPRO_DME_PWR_REQ 0x090
-#define UNIPRO_DME_PWR_REQ_POWERMODE 0x094
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER0 0x098
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER1 0x09C
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER2 0x0A0
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER0 0x0A4
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER1 0x0A8
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER2 0x0AC
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0 0x78B8
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1 0x78BC
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2 0x78C0
/*
* UFS Protector registers
@@ -157,7 +152,6 @@ enum {
#define CNTR_DIV_VAL 40
-static struct exynos_ufs_drv_data exynos_ufs_drvs;
static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
@@ -657,8 +651,9 @@ static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs)
if (attr->rx_min_actv_time_cap)
ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_MIN_ACTIVATETIME_CAP,
- i), attr->rx_min_actv_time_cap);
+ UIC_ARG_MIB_SEL(
+ RX_MIN_ACTIVATETIME_CAPABILITY, i),
+ attr->rx_min_actv_time_cap);
if (attr->rx_hibern8_time_cap)
ufshcd_dme_set(hba,
@@ -910,9 +905,13 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
- goto out_exit_phy;
+ return ret;
}
+ ret = phy_power_on(generic_phy);
+ if (ret)
+ goto out_exit_phy;
+
return 0;
out_exit_phy:
@@ -1174,10 +1173,6 @@ static int exynos_ufs_init(struct ufs_hba *hba)
goto out;
}
- ret = phy_power_on(ufs->phy);
- if (ret)
- goto phy_off;
-
exynos_ufs_priv_init(hba, ufs);
if (ufs->drv_data->drv_init) {
@@ -1195,8 +1190,6 @@ static int exynos_ufs_init(struct ufs_hba *hba)
exynos_ufs_config_smu(ufs);
return 0;
-phy_off:
- phy_power_off(ufs->phy);
out:
hba->priv = NULL;
return ret;
@@ -1473,7 +1466,100 @@ static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
return 0;
}
-static struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
+static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
+{
+ int i;
+ struct ufs_hba *hba = ufs->hba;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+
+ for_each_ufs_tx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xAA, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8F, i), 0x3F);
+ }
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x12, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x5C, i), 0x38);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0F, i), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x65, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x69, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x21, i), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x22, i), 0x0);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
+
+ exynos_ufs_establish_connt(ufs);
+
+ return 0;
+}
+
+static int fsd_ufs_post_link(struct exynos_ufs *ufs)
+{
+ int i;
+ struct ufs_hba *hba = ufs->hba;
+ u32 hw_cap_min_tactivate;
+ u32 peer_rx_min_actv_time_cap;
+ u32 max_rx_hibern8_time_cap;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x8F, 4),
+ &hw_cap_min_tactivate); /* HW Capability of MIN_TACTIVATE */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_rx_min_actv_time_cap); /* PA_TActivate */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ &max_rx_hibern8_time_cap); /* PA_Hibern8Time */
+
+ if (peer_rx_min_actv_time_cap >= hw_cap_min_tactivate)
+ ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ peer_rx_min_actv_time_cap + 1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), max_rx_hibern8_time_cap + 1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xFA);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x00);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x35, i), 0x05);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x73, i), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x41, i), 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x42, i), 0xAC);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+
+ return 0;
+}
+
+static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
+
+ unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0);
+ unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1);
+ unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2);
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
@@ -1514,9 +1600,14 @@ static int exynos_ufs_probe(struct platform_device *pdev)
static int exynos_ufs_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
+
+ phy_power_off(ufs->phy);
+ phy_exit(ufs->phy);
+
return 0;
}
@@ -1545,7 +1636,7 @@ static struct exynos_ufs_uic_attr exynos7_uic_attr = {
.pa_dbg_option_suite = 0x30103,
};
-static struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
+static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
@@ -1561,7 +1652,7 @@ static struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.post_pwr_change = exynosauto_ufs_post_pwr_change,
};
-static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
+static const struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
.vops = &ufs_hba_exynosauto_vh_ops,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
@@ -1573,7 +1664,7 @@ static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
.opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
};
-static struct exynos_ufs_drv_data exynos_ufs_drvs = {
+static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
@@ -1595,6 +1686,47 @@ static struct exynos_ufs_drv_data exynos_ufs_drvs = {
.post_pwr_change = exynos7_ufs_post_pwr_change,
};
+static struct exynos_ufs_uic_attr fsd_uic_attr = {
+ .tx_trailingclks = 0x10,
+ .tx_dif_p_nsec = 3000000, /* unit: ns */
+ .tx_dif_n_nsec = 1000000, /* unit: ns */
+ .tx_high_z_cnt_nsec = 20000, /* unit: ns */
+ .tx_base_unit_nsec = 100000, /* unit: ns */
+ .tx_gran_unit_nsec = 4000, /* unit: ns */
+ .tx_sleep_cnt = 1000, /* unit: ns */
+ .tx_min_activatetime = 0xa,
+ .rx_filler_enable = 0x2,
+ .rx_dif_p_nsec = 1000000, /* unit: ns */
+ .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
+ .rx_base_unit_nsec = 100000, /* unit: ns */
+ .rx_gran_unit_nsec = 4000, /* unit: ns */
+ .rx_sleep_cnt = 1280, /* unit: ns */
+ .rx_stall_cnt = 320, /* unit: ns */
+ .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
+ .pa_dbg_option_suite = 0x2E820183,
+};
+
+static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
+ .uic_attr = &fsd_uic_attr,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR,
+ .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
+ EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .pre_link = fsd_ufs_pre_link,
+ .post_link = fsd_ufs_post_link,
+ .pre_pwr_change = fsd_ufs_pre_pwr_change,
+};
+
static const struct of_device_id exynos_ufs_of_match[] = {
{ .compatible = "samsung,exynos7-ufs",
.data = &exynos_ufs_drvs },
@@ -1602,6 +1734,8 @@ static const struct of_device_id exynos_ufs_of_match[] = {
.data = &exynosauto_ufs_drvs },
{ .compatible = "samsung,exynosautov9-ufs-vh",
.data = &exynosauto_ufs_vh_drvs },
+ { .compatible = "tesla,fsd-ufs",
+ .data = &fsd_ufs_drvs },
{},
};
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index 0b0a3d530ca6..a4bd6646d7f1 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -22,6 +22,7 @@
#define PA_DBG_RXPHY_CFGUPDT 0x9519
#define PA_DBG_MODE 0x9529
#define PA_DBG_SKIP_RESET_PHY 0x9539
+#define PA_DBG_AUTOMODE_THLD 0x9536
#define PA_DBG_OV_TM 0x9540
#define PA_DBG_SKIP_LINE_RESET 0x9541
#define PA_DBG_LINE_RESET_REQ 0x9543
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index beabc3ccd30b..c958279bdd8f 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -16,6 +16,7 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sched/clock.h>
@@ -30,26 +31,11 @@
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
-#define ufs_mtk_smc(cmd, val, res) \
- arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
- cmd, val, 0, 0, 0, 0, 0, &(res))
-
-#define ufs_mtk_va09_pwr_ctrl(res, on) \
- ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
-
-#define ufs_mtk_crypto_ctrl(res, enable) \
- ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
-
-#define ufs_mtk_ref_clk_notify(on, res) \
- ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
-
-#define ufs_mtk_device_reset_ctrl(high, res) \
- ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
-
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
- { .wmanufacturerid = UFS_VENDOR_MICRON,
+ { .wmanufacturerid = UFS_ANY_VENDOR,
.model = UFS_ANY_MODEL,
- .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM },
+ .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = "H9HQ21AFAMZDAR",
.quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
@@ -82,6 +68,13 @@ static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
}
+static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
+}
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -191,6 +184,14 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
hba->ahit = 0;
}
+
+ /*
+ * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
+ * to prevent host hang issue
+ */
+ ufshcd_writel(hba,
+ ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
+ REG_UFS_XOUFS_CTRL);
}
return 0;
@@ -244,8 +245,9 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
if (host->ref_clk_enabled == on)
return 0;
+ ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
+
if (on) {
- ufs_mtk_ref_clk_notify(on, res);
ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
} else {
ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
@@ -267,7 +269,7 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
- ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
+ ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
return -ETIMEDOUT;
@@ -275,8 +277,8 @@ out:
host->ref_clk_enabled = on;
if (on)
ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
- else
- ufs_mtk_ref_clk_notify(on, res);
+
+ ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
return 0;
}
@@ -579,20 +581,38 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
host->caps |= UFS_MTK_CAP_BROKEN_VCC;
+ if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
+ host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
-static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
+static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- ufs_mtk_boost_crypt(hba, up);
- ufs_mtk_setup_ref_clk(hba, up);
+ if (!host || !host->pm_qos_init)
+ return;
+
+ cpu_latency_qos_update_request(&host->pm_qos_req,
+ boost ? 0 : PM_QOS_DEFAULT_VALUE);
+}
- if (up)
+static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (on) {
phy_power_on(host->mphy);
- else
+ ufs_mtk_setup_ref_clk(hba, on);
+ ufs_mtk_boost_crypt(hba, on);
+ ufs_mtk_boost_pm_qos(hba, on);
+ } else {
+ ufs_mtk_boost_pm_qos(hba, on);
+ ufs_mtk_boost_crypt(hba, on);
+ ufs_mtk_setup_ref_clk(hba, on);
phy_power_off(host->mphy);
+ }
}
/**
@@ -637,9 +657,9 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
}
if (clk_pwr_off)
- ufs_mtk_scale_perf(hba, false);
+ ufs_mtk_pwr_ctrl(hba, false);
} else if (on && status == POST_CHANGE) {
- ufs_mtk_scale_perf(hba, true);
+ ufs_mtk_pwr_ctrl(hba, true);
}
return ret;
@@ -675,6 +695,73 @@ static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
return hba->ufs_version;
}
+#define MAX_VCC_NAME 30
+static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+ struct device_node *np = hba->dev->of_node;
+ struct device *dev = hba->dev;
+ char vcc_name[MAX_VCC_NAME];
+ struct arm_smccc_res res;
+ int err, ver;
+
+ if (hba->vreg_info.vcc)
+ return 0;
+
+ if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
+ ufs_mtk_get_vcc_num(res);
+ if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
+ snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
+ else
+ return -ENODEV;
+ } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
+ ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
+ snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
+ } else {
+ return 0;
+ }
+
+ err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
+ if (err)
+ return err;
+
+ err = ufshcd_get_vreg(dev, info->vcc);
+ if (err)
+ return err;
+
+ err = regulator_enable(info->vcc->reg);
+ if (!err) {
+ info->vcc->enabled = true;
+ dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
+ }
+
+ return err;
+}
+
+static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+ struct ufs_vreg **vreg_on, **vreg_off;
+
+ if (hba->dev_info.wspecversion >= 0x0300) {
+ vreg_on = &info->vccq;
+ vreg_off = &info->vccq2;
+ } else {
+ vreg_on = &info->vccq2;
+ vreg_off = &info->vccq;
+ }
+
+ if (*vreg_on)
+ (*vreg_on)->always_on = true;
+
+ if (*vreg_off) {
+ regulator_disable((*vreg_off)->reg);
+ devm_kfree(hba->dev, (*vreg_off)->name);
+ devm_kfree(hba->dev, *vreg_off);
+ *vreg_off = NULL;
+ }
+}
+
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
@@ -754,6 +841,26 @@ out:
return err;
}
+static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ if (!ufs_mtk_is_pmc_via_fastauto(hba))
+ return false;
+
+ if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
+ return false;
+
+ if (dev_req_params->pwr_tx != FAST_MODE &&
+ dev_req_params->gear_tx < UFS_HS_G4)
+ return false;
+
+ if (dev_req_params->pwr_rx != FAST_MODE &&
+ dev_req_params->gear_rx < UFS_HS_G4)
+ return false;
+
+ return true;
+}
+
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
@@ -763,8 +870,8 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
int ret;
ufshcd_init_pwr_dev_param(&host_cap);
- host_cap.hs_rx_gear = UFS_HS_G4;
- host_cap.hs_tx_gear = UFS_HS_G4;
+ host_cap.hs_rx_gear = UFS_HS_G5;
+ host_cap.hs_tx_gear = UFS_HS_G5;
ret = ufshcd_get_pwr_dev_param(&host_cap,
dev_max_params,
@@ -774,6 +881,32 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
__func__);
}
+ if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+ dev_req_params->lane_tx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+ dev_req_params->lane_rx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ dev_req_params->hs_rate);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ PA_NO_ADAPT);
+
+ ret = ufshcd_uic_change_pwr_mode(hba,
+ FASTAUTO_MODE << 4 | FASTAUTO_MODE);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
+ __func__, ret);
+ }
+ }
+
if (host->hw_ver.major >= 3) {
ret = ufshcd_dme_configure_adapt(hba,
dev_req_params->gear_tx,
@@ -963,6 +1096,11 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
{
int err;
+ /* Disable reset confirm feature by UniPro */
+ ufshcd_writel(hba,
+ (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
+ REG_UFS_XOUFS_CTRL);
+
err = ufs_mtk_unipro_set_lpm(hba, true);
if (err) {
/* Resume UniPro state for following error recovery */
@@ -973,17 +1111,52 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
return 0;
}
-static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
+static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
+{
+ struct ufs_vreg *vccqx = NULL;
+
+ if (hba->vreg_info.vccq)
+ vccqx = hba->vreg_info.vccq;
+ else
+ vccqx = hba->vreg_info.vccq2;
+
+ regulator_set_mode(vccqx->reg,
+ lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
+}
+
+static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
+{
+ struct arm_smccc_res res;
+
+ ufs_mtk_device_pwr_ctrl(!lpm,
+ (unsigned long)hba->dev_info.wspecversion,
+ res);
+}
+
+static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
{
- if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
+ if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
+ return;
+
+ /* Skip if VCC is assumed always-on */
+ if (!hba->vreg_info.vcc)
+ return;
+
+ /* Bypass LPM when device is still active */
+ if (lpm && ufshcd_is_ufs_dev_active(hba))
+ return;
+
+ /* Bypass LPM if VCC is enabled */
+ if (lpm && hba->vreg_info.vcc->enabled)
return;
- if (lpm && !hba->vreg_info.vcc->enabled)
- regulator_set_mode(hba->vreg_info.vccq2->reg,
- REGULATOR_MODE_IDLE);
- else if (!lpm)
- regulator_set_mode(hba->vreg_info.vccq2->reg,
- REGULATOR_MODE_NORMAL);
+ if (lpm) {
+ ufs_mtk_vccqx_set_lpm(hba, lpm);
+ ufs_mtk_vsx_set_lpm(hba, lpm);
+ } else {
+ ufs_mtk_vsx_set_lpm(hba, lpm);
+ ufs_mtk_vccqx_set_lpm(hba, lpm);
+ }
}
static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
@@ -1026,7 +1199,6 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
* ufshcd_suspend() re-enabling regulators while vreg is still
* in low-power mode.
*/
- ufs_mtk_vreg_set_lpm(hba, true);
err = ufs_mtk_mphy_power_on(hba, false);
if (err)
goto fail;
@@ -1035,6 +1207,8 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (ufshcd_is_link_off(hba))
ufs_mtk_device_reset_ctrl(0, res);
+ ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
+
return 0;
fail:
/*
@@ -1049,13 +1223,17 @@ fail:
static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
+ struct arm_smccc_res res;
+
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
err = ufs_mtk_mphy_power_on(hba, true);
if (err)
goto fail;
- ufs_mtk_vreg_set_lpm(hba, false);
-
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
@@ -1087,8 +1265,10 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
- if (mid == UFS_VENDOR_SAMSUNG)
+ if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
+ }
/*
* Decide waiting time before gating reference clock and
@@ -1104,7 +1284,6 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
else
ufs_mtk_setup_ref_clk_wait_us(hba,
REFCLK_DEFAULT_WAIT_US);
-
return 0;
}
@@ -1122,6 +1301,9 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
}
+
+ ufs_mtk_vreg_fix_vcc(hba);
+ ufs_mtk_vreg_fix_vccqx(hba);
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
@@ -1220,9 +1402,59 @@ static int ufs_mtk_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int ufs_mtk_system_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ufshcd_system_suspend(dev);
+ if (ret)
+ return ret;
+
+ ufs_mtk_dev_vreg_set_lpm(hba, true);
+
+ return 0;
+}
+
+static int ufs_mtk_system_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ return ufshcd_system_resume(dev);
+}
+#endif
+
+static int ufs_mtk_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = ufshcd_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ufs_mtk_dev_vreg_set_lpm(hba, true);
+
+ return 0;
+}
+
+static int ufs_mtk_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ return ufshcd_runtime_resume(dev);
+}
+
static const struct dev_pm_ops ufs_mtk_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
+ ufs_mtk_system_resume)
+ SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
+ ufs_mtk_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 414dca86c09f..aa26d415527b 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -7,11 +7,13 @@
#define _UFS_MEDIATEK_H
#include <linux/bitops.h>
+#include <linux/pm_qos.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
/*
* Vendor specific UFSHCI Registers
*/
+#define REG_UFS_XOUFS_CTRL 0x140
#define REG_UFS_REFCLK_CTRL 0x144
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
@@ -83,6 +85,9 @@ enum {
#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2)
#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
+#define UFS_MTK_SIP_HOST_PWR_CTRL BIT(5)
+#define UFS_MTK_SIP_GET_VCC_NUM BIT(6)
+#define UFS_MTK_SIP_DEVICE_PWR_CTRL BIT(7)
/*
* VS_DEBUGCLOCKENABLE
@@ -108,6 +113,7 @@ enum ufs_mtk_host_caps {
UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1,
UFS_MTK_CAP_DISABLE_AH8 = 1 << 2,
UFS_MTK_CAP_BROKEN_VCC = 1 << 3,
+ UFS_MTK_CAP_PMC_VIA_FASTAUTO = 1 << 6,
};
struct ufs_mtk_crypt_cfg {
@@ -126,6 +132,7 @@ struct ufs_mtk_hw_ver {
struct ufs_mtk_host {
struct phy *mphy;
+ struct pm_qos_request pm_qos_req;
struct regulator *reg_va09;
struct reset_control *hci_reset;
struct reset_control *unipro_reset;
@@ -135,6 +142,7 @@ struct ufs_mtk_host {
struct ufs_mtk_hw_ver hw_ver;
enum ufs_mtk_host_caps caps;
bool mphy_powered_on;
+ bool pm_qos_init;
bool unipro_lpm;
bool ref_clk_enabled;
u16 ref_clk_ungating_wait_us;
@@ -142,4 +150,70 @@ struct ufs_mtk_host {
u32 ip_ver;
};
+/*
+ * Multi-VCC by Numbering
+ */
+enum ufs_mtk_vcc_num {
+ UFS_VCC_NONE = 0,
+ UFS_VCC_1,
+ UFS_VCC_2,
+ UFS_VCC_MAX
+};
+
+/*
+ * Host Power Control options
+ */
+enum {
+ HOST_PWR_HCI = 0,
+ HOST_PWR_MPHY
+};
+
+/*
+ * SMC call wrapper function
+ */
+struct ufs_mtk_smc_arg {
+ unsigned long cmd;
+ struct arm_smccc_res *res;
+ unsigned long v1;
+ unsigned long v2;
+ unsigned long v3;
+ unsigned long v4;
+ unsigned long v5;
+ unsigned long v6;
+ unsigned long v7;
+};
+
+static void _ufs_mtk_smc(struct ufs_mtk_smc_arg s)
+{
+ arm_smccc_smc(MTK_SIP_UFS_CONTROL,
+ s.cmd, s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res);
+}
+
+#define ufs_mtk_smc(...) \
+ _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__})
+
+/*
+ * SMC call interface
+ */
+#define ufs_mtk_va09_pwr_ctrl(res, on) \
+ ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, &(res), on)
+
+#define ufs_mtk_crypto_ctrl(res, enable) \
+ ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, &(res), enable)
+
+#define ufs_mtk_ref_clk_notify(on, stage, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage)
+
+#define ufs_mtk_device_reset_ctrl(high, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high)
+
+#define ufs_mtk_host_pwr_ctrl(opt, on, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_HOST_PWR_CTRL, &(res), opt, on)
+
+#define ufs_mtk_get_vcc_num(res) \
+ ufs_mtk_smc(UFS_MTK_SIP_GET_VCC_NUM, &(res))
+
+#define ufs_mtk_device_pwr_ctrl(on, ufs_ver, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_PWR_CTRL, &(res), on, ufs_ver)
+
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index f10d4668814c..473fad83701e 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -58,19 +58,6 @@ static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
ufshcd_dump_regs(hba, offset, len * 4, prefix);
}
-static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
-{
- int err = 0;
-
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
- __func__, err);
-
- return err;
-}
-
static int ufs_qcom_host_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool optional)
{
@@ -194,13 +181,6 @@ out:
return err;
}
-static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
-{
- u32 tx_lanes;
-
- return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
-}
-
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
{
int err;
@@ -570,9 +550,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
err = ufshcd_disable_host_tx_lcc(hba);
break;
- case POST_CHANGE:
- ufs_qcom_link_startup_post_change(hba);
- break;
default:
break;
}
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
new file mode 100644
index 000000000000..f8a5e79ed3b4
--- /dev/null
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Renesas UFS host controller driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <ufs/ufshcd.h>
+
+#include "ufshcd-pltfrm.h"
+
+struct ufs_renesas_priv {
+ bool initialized; /* The hardware needs initialization once */
+};
+
+enum {
+ SET_PHY_INDEX_LO = 0,
+ SET_PHY_INDEX_HI,
+ TIMER_INDEX,
+ MAX_INDEX
+};
+
+enum ufs_renesas_init_param_mode {
+ MODE_RESTORE,
+ MODE_SET,
+ MODE_SAVE,
+ MODE_POLL,
+ MODE_WAIT,
+ MODE_WRITE,
+};
+
+#define PARAM_RESTORE(_reg, _index) \
+ { .mode = MODE_RESTORE, .reg = _reg, .index = _index }
+#define PARAM_SET(_index, _set) \
+ { .mode = MODE_SET, .index = _index, .u.set = _set }
+#define PARAM_SAVE(_reg, _mask, _index) \
+ { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \
+ .index = _index }
+#define PARAM_POLL(_reg, _expected, _mask) \
+ { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \
+ .mask = (u32)(_mask) }
+#define PARAM_WAIT(_delay_us) \
+ { .mode = MODE_WAIT, .u.delay_us = _delay_us }
+
+#define PARAM_WRITE(_reg, _val) \
+ { .mode = MODE_WRITE, .reg = _reg, .u.val = _val }
+
+#define PARAM_WRITE_D0_D4(_d0, _d4) \
+ PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4)
+
+#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_RESTORE_800_80C_POLL(_index) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE(0xd0, 0x00000800), \
+ PARAM_RESTORE(0xd4, _index), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_WRITE_828_82C_POLL(_data_828) \
+ PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \
+ PARAM_WRITE_D0_D4(0x00000828, _data_828), \
+ PARAM_WRITE(0xd0, 0x0000082c), \
+ PARAM_POLL(0xd4, _data_828, _data_828)
+
+#define PARAM_WRITE_PHY(_addr16, _data16) \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_SET_PHY(_addr16, _data16) \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE_804_80C_POLL(0x1a, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \
+ PARAM_WRITE_804_80C_POLL(0x1b, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0), \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \
+ PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \
+ PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \
+ PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \
+ PARAM_WRITE(0xf0, _gpio), \
+ PARAM_WRITE_800_80C_POLL(_addr, _data_800), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \
+ PARAM_WRITE(0xf0, _gpio), \
+ PARAM_WRITE_800_80C_POLL(_addr, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_POLL(0xd4, _expected, _mask), \
+ PARAM_WRITE(0xf0, 0)
+
+struct ufs_renesas_init_param {
+ enum ufs_renesas_init_param_mode mode;
+ u32 reg;
+ union {
+ u32 expected;
+ u32 delay_us;
+ u32 set;
+ u32 val;
+ } u;
+ u32 mask;
+ u32 index;
+};
+
+/* This setting is for SERIES B */
+static const struct ufs_renesas_init_param ufs_param[] = {
+ PARAM_WRITE(0xc0, 0x49425308),
+ PARAM_WRITE_D0_D4(0x00000104, 0x00000002),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000828, 0x00000200),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000828, 0x00000000),
+ PARAM_WRITE_D0_D4(0x00000104, 0x00000001),
+ PARAM_WRITE_D0_D4(0x00000940, 0x00000001),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000940, 0x00000000),
+
+ PARAM_WRITE(0xc0, 0x49425308),
+ PARAM_WRITE(0xc0, 0x41584901),
+
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100),
+ PARAM_WRITE_D0_D4(0x00000804, 0x00000000),
+ PARAM_WRITE(0xd0, 0x0000080c),
+ PARAM_POLL(0xd4, BIT(8), BIT(8)),
+
+ PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001),
+
+ PARAM_WRITE(0xd0, 0x00000804),
+ PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)),
+
+ PARAM_WRITE(0xd0, 0x00000d00),
+ PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX),
+ PARAM_WRITE(0xd4, 0x00000000),
+ PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000),
+ PARAM_WRITE_D0_D4(0x00000828, 0x08000000),
+ PARAM_WRITE(0xd0, 0x0000082c),
+ PARAM_POLL(0xd4, BIT(27), BIT(27)),
+ PARAM_WRITE(0xd0, 0x00000d2c),
+ PARAM_POLL(0xd4, BIT(0), BIT(0)),
+
+ /* phy setup */
+ PARAM_INDIRECT_WRITE(1, 0x01, 0x001f),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
+ PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x60, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6),
+ PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003),
+
+ PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)),
+ PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)),
+
+ PARAM_INDIRECT_WRITE(1, 0x32, 0x0080),
+ PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001),
+ PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001),
+ PARAM_INDIRECT_WRITE(0, 0x32, 0x0087),
+
+ PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061),
+ PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009),
+ PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005),
+ PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058),
+ PARAM_INDIRECT_WRITE(1, 0x39, 0x0027),
+ PARAM_INDIRECT_WRITE(1, 0x47, 0x004c),
+
+ PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002),
+ PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
+
+ PARAM_WRITE_PHY(0x0028, 0x0061),
+ PARAM_WRITE_PHY(0x4014, 0x0061),
+ PARAM_SET_PHY(0x401c, BIT(2)),
+ PARAM_WRITE_PHY(0x4000, 0x0000),
+ PARAM_WRITE_PHY(0x4001, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0000),
+ PARAM_WRITE_PHY(0x10af, 0x0001),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0000),
+ PARAM_WRITE_PHY(0x10af, 0x0002),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0080),
+ PARAM_WRITE_PHY(0x10af, 0x0000),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0080),
+ PARAM_WRITE_PHY(0x10af, 0x001a),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_INDIRECT_WRITE(7, 0x70, 0x0016),
+ PARAM_INDIRECT_WRITE(7, 0x71, 0x0016),
+ PARAM_INDIRECT_WRITE(7, 0x72, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x73, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x74, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x75, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x76, 0x0010),
+ PARAM_INDIRECT_WRITE(7, 0x77, 0x0010),
+ PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff),
+ PARAM_INDIRECT_WRITE(7, 0x79, 0x0000),
+
+ PARAM_INDIRECT_WRITE(7, 0x19, 0x0007),
+
+ PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007),
+
+ PARAM_INDIRECT_WRITE(7, 0x24, 0x000c),
+
+ PARAM_INDIRECT_WRITE(7, 0x25, 0x000c),
+
+ PARAM_INDIRECT_WRITE(7, 0x62, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x63, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
+ PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)),
+ PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)),
+ /* end of phy setup */
+
+ PARAM_WRITE(0xf0, 0),
+ PARAM_WRITE(0xd0, 0x00000d00),
+ PARAM_RESTORE(0xd4, TIMER_INDEX),
+};
+
+static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
+}
+
+static void ufs_renesas_reg_control(struct ufs_hba *hba,
+ const struct ufs_renesas_init_param *p)
+{
+ static u32 save[MAX_INDEX];
+ int ret;
+ u32 val;
+
+ WARN_ON(p->index >= MAX_INDEX);
+
+ switch (p->mode) {
+ case MODE_RESTORE:
+ ufshcd_writel(hba, save[p->index], p->reg);
+ break;
+ case MODE_SET:
+ save[p->index] |= p->u.set;
+ break;
+ case MODE_SAVE:
+ save[p->index] = ufshcd_readl(hba, p->reg) & p->mask;
+ break;
+ case MODE_POLL:
+ ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg,
+ val,
+ (val & p->mask) == p->u.expected,
+ 10, 1000);
+ if (ret)
+ dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
+ __func__, ret, val, p->mask, p->u.expected);
+ break;
+ case MODE_WAIT:
+ if (p->u.delay_us > 1000)
+ mdelay(DIV_ROUND_UP(p->u.delay_us, 1000));
+ else
+ udelay(p->u.delay_us);
+ break;
+ case MODE_WRITE:
+ ufshcd_writel(hba, p->u.val, p->reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ufs_renesas_pre_init(struct ufs_hba *hba)
+{
+ const struct ufs_renesas_init_param *p = ufs_param;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ufs_param); i++)
+ ufs_renesas_reg_control(hba, &p[i]);
+}
+
+static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
+
+ if (priv->initialized)
+ return 0;
+
+ if (status == PRE_CHANGE)
+ ufs_renesas_pre_init(hba);
+
+ priv->initialized = true;
+
+ return 0;
+}
+
+static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ if (on && status == PRE_CHANGE)
+ pm_runtime_get_sync(hba->dev);
+ else if (!on && status == POST_CHANGE)
+ pm_runtime_put(hba->dev);
+
+ return 0;
+}
+
+static int ufs_renesas_init(struct ufs_hba *hba)
+{
+ struct ufs_renesas_priv *priv;
+
+ priv = devm_kmalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, priv);
+
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO;
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_renesas_vops = {
+ .name = "renesas",
+ .init = ufs_renesas_init,
+ .setup_clocks = ufs_renesas_setup_clocks,
+ .hce_enable_notify = ufs_renesas_hce_enable_notify,
+ .dbg_register_dump = ufs_renesas_dbg_register_dump,
+};
+
+static const struct of_device_id __maybe_unused ufs_renesas_of_match[] = {
+ { .compatible = "renesas,r8a779f0-ufs" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ufs_renesas_of_match);
+
+static int ufs_renesas_probe(struct platform_device *pdev)
+{
+ return ufshcd_pltfrm_init(pdev, &ufs_renesas_vops);
+}
+
+static int ufs_renesas_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+
+ return 0;
+}
+
+static struct platform_driver ufs_renesas_platform = {
+ .probe = ufs_renesas_probe,
+ .remove = ufs_renesas_remove,
+ .driver = {
+ .name = "ufshcd-renesas",
+ .of_match_table = of_match_ptr(ufs_renesas_of_match),
+ },
+};
+module_platform_driver(ufs_renesas_platform);
+
+MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
+MODULE_DESCRIPTION("Renesas UFS host controller driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 04166bda41da..1c91f43e15c8 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -24,7 +24,7 @@ struct ufs_host {
void (*late_init)(struct ufs_hba *hba);
};
-enum {
+enum intel_ufs_dsm_func_id {
INTEL_DSM_FNS = 0,
INTEL_DSM_RESET = 1,
};
@@ -42,6 +42,15 @@ static const guid_t intel_dsm_guid =
GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
+static bool __intel_dsm_supported(struct intel_host *host,
+ enum intel_ufs_dsm_func_id fn)
+{
+ return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn));
+}
+
+#define INTEL_DSM_SUPPORTED(host, name) \
+ __intel_dsm_supported(host, INTEL_DSM_##name)
+
static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
@@ -71,7 +80,7 @@ out:
static int intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
- if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+ if (!__intel_dsm_supported(intel_host, fn))
return -EOPNOTSUPP;
return __intel_dsm(intel_host, dev, fn, result);
@@ -300,7 +309,7 @@ static int ufs_intel_device_reset(struct ufs_hba *hba)
{
struct intel_host *host = ufshcd_get_variant(hba);
- if (host->dsm_fns & INTEL_DSM_RESET) {
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
u32 result = 0;
int err;
@@ -342,7 +351,7 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
return -ENOMEM;
ufshcd_set_variant(hba, host);
intel_dsm_init(host, hba->dev);
- if (host->dsm_fns & INTEL_DSM_RESET) {
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
if (hba->vops->device_reset)
hba->caps |= UFSHCD_CAP_DEEPSLEEP;
} else {
@@ -426,6 +435,7 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
{
hba->nop_out_timeout = 200;
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ hba->caps |= UFSHCD_CAP_WB_EN;
return ufs_intel_common_init(hba);
}
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index 173aea8e9997..5739ff007828 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -26,7 +26,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
int i;
struct device *dev = hba->dev;
struct device_node *np = dev->of_node;
- char *name;
+ const char *name;
u32 *clkfreq = NULL;
struct ufs_clk_info *clki;
int len = 0;
@@ -79,8 +79,8 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
}
for (i = 0; i < sz; i += 2) {
- ret = of_property_read_string_index(np,
- "clock-names", i/2, (const char **)&name);
+ ret = of_property_read_string_index(np, "clock-names", i/2,
+ &name);
if (ret)
goto out;
@@ -120,8 +120,8 @@ static bool phandle_exists(const struct device_node *np,
}
#define MAX_PROP_SIZE 32
-static int ufshcd_populate_vreg(struct device *dev, const char *name,
- struct ufs_vreg **out_vreg)
+int ufshcd_populate_vreg(struct device *dev, const char *name,
+ struct ufs_vreg **out_vreg)
{
char prop_name[MAX_PROP_SIZE];
struct ufs_vreg *vreg = NULL;
@@ -156,6 +156,7 @@ out:
*out_vreg = vreg;
return 0;
}
+EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
/**
* ufshcd_parse_regulator_info - get regulator info from device tree
@@ -219,8 +220,8 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
*
* Returns 0 on success, non-zero value on failure
*/
-int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
- struct ufs_pa_layer_attr *dev_max,
+int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
+ const struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr)
{
int min_pltfrm_gear;
diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index 43c2e412bd99..2e4ba2bfbcad 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -25,12 +25,14 @@ struct ufs_dev_params {
u32 desired_working_mode;
};
-int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param,
- struct ufs_pa_layer_attr *dev_max,
+int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *dev_param,
+ const struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr);
void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
+int ufshcd_populate_vreg(struct device *dev, const char *name,
+ struct ufs_vreg **out_vreg);
#endif /* UFSHCD_PLTFRM_H_ */
diff --git a/drivers/usb/chipidea/trace.h b/drivers/usb/chipidea/trace.h
index 1601fd86c4c1..ca0e65b48f0a 100644
--- a/drivers/usb/chipidea/trace.h
+++ b/drivers/usb/chipidea/trace.h
@@ -28,11 +28,11 @@ TRACE_EVENT(ci_log,
TP_ARGS(ci, vaf),
TP_STRUCT__entry(
__string(name, dev_name(ci->dev))
- __dynamic_array(char, msg, CHIPIDEA_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(ci->dev));
- vsnprintf(__get_str(msg), CHIPIDEA_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index a6a87c5d1b05..94b305bbd621 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1251,7 +1251,8 @@ void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
/*
- * Some usb host controllers can only perform dma using a small SRAM area.
+ * Some usb host controllers can only perform dma using a small SRAM area,
+ * or have restrictions on addressable DRAM.
* The usb core itself is however optimized for host controllers that can dma
* using regular system memory - like pci devices doing bus mastering.
*
@@ -3127,8 +3128,18 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
if (IS_ERR(hcd->localmem_pool))
return PTR_ERR(hcd->localmem_pool);
- local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
- size, MEMREMAP_WC);
+ /*
+ * if a physical SRAM address was passed, map it, otherwise
+ * allocate system memory as a buffer.
+ */
+ if (phys_addr)
+ local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
+ size, MEMREMAP_WC);
+ else
+ local_mem = dmam_alloc_attrs(hcd->self.sysdev, size, &dma,
+ GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+
if (IS_ERR(local_mem))
return PTR_ERR(local_mem);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index feca826d3f6a..75c2b28b3379 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -203,6 +203,31 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
goto err1;
}
+ /*
+ * According to the "Intel StrongARM SA-1111 Microprocessor Companion
+ * Chip Specification Update" (June 2000), erratum #7, there is a
+ * significant bug in the SA1111 SDRAM shared memory controller. If
+ * an access to a region of memory above 1MB relative to the bank base,
+ * it is important that address bit 10 _NOT_ be asserted. Depending
+ * on the configuration of the RAM, bit 10 may correspond to one
+ * of several different (processor-relative) address bits.
+ *
+ * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
+ * User's Guide" mentions that jumpers R51 and R52 control the
+ * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
+ * SDRAM bank 1 on Neponset). The default configuration selects
+ * Assabet, so any address in bank 1 is necessarily invalid.
+ *
+ * As a workaround, use a bounce buffer in addressable memory
+ * as local_mem, relying on ZONE_DMA to provide an area that
+ * fits within the above constraints.
+ *
+ * SZ_64K is an estimate for what size this might need.
+ */
+ ret = usb_hcd_setup_local_mem(hcd, 0, 0, SZ_64K);
+ if (ret)
+ goto err1;
+
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&dev->dev, "request_mem_region failed\n");
ret = -EBUSY;
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index a5da02077297..61e93a3540a7 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -28,9 +28,9 @@
DECLARE_EVENT_CLASS(xhci_log_msg,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
- TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
+ TP_STRUCT__entry(__vstring(msg, vaf->fmt, vaf->va)),
TP_fast_assign(
- vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index dfa0d5ce6012..fcb95fb639e0 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -248,7 +248,7 @@ sisusbcon_init(struct vc_data *c, int init)
*/
kref_get(&sisusb->kref);
- if (!*c->vc_uni_pagedir_loc)
+ if (!*c->uni_pagedict_loc)
con_set_default_unimap(c);
mutex_unlock(&sisusb->lock);
diff --git a/drivers/usb/mtu3/mtu3_trace.h b/drivers/usb/mtu3/mtu3_trace.h
index a09deae1146f..03d2a9bac27e 100644
--- a/drivers/usb/mtu3/mtu3_trace.h
+++ b/drivers/usb/mtu3/mtu3_trace.h
@@ -18,18 +18,16 @@
#include "mtu3.h"
-#define MTU3_MSG_MAX 256
-
TRACE_EVENT(mtu3_log,
TP_PROTO(struct device *dev, struct va_format *vaf),
TP_ARGS(dev, vaf),
TP_STRUCT__entry(
__string(name, dev_name(dev))
- __dynamic_array(char, msg, MTU3_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
- vsnprintf(__get_str(msg), MTU3_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index ec28b5716796..f246b14394c4 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -28,11 +28,11 @@ TRACE_EVENT(musb_log,
TP_ARGS(musb, vaf),
TP_STRUCT__entry(
__string(name, dev_name(musb->controller))
- __dynamic_array(char, msg, MUSB_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(musb->controller));
- vsnprintf(__get_str(msg), MUSB_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 48c4dadb0c7c..75a703b803a2 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -29,7 +29,6 @@ u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
- cfg = hw->common_cfg;
vp_iowrite16(vector, &cfg->msix_config);
return vp_ioread16(&cfg->msix_config);
@@ -128,6 +127,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
hw->dev_cfg = get_cap_addr(hw, &cap);
+ hw->cap_dev_config_size = le32_to_cpu(cap.length);
IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
break;
}
@@ -233,15 +233,23 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
{
struct ifcvf_adapter *adapter;
+ u32 net_config_size = sizeof(struct virtio_net_config);
+ u32 blk_config_size = sizeof(struct virtio_blk_config);
+ u32 cap_size = hw->cap_dev_config_size;
u32 config_size;
adapter = vf_to_adapter(hw);
+ /* If the onboard device config space size is greater than
+ * the size of struct virtio_net/blk_config, only the spec
+ * implementing contents size is returned, this is very
+ * unlikely, defensive programming.
+ */
switch (hw->dev_type) {
case VIRTIO_ID_NET:
- config_size = sizeof(struct virtio_net_config);
+ config_size = min(cap_size, net_config_size);
break;
case VIRTIO_ID_BLOCK:
- config_size = sizeof(struct virtio_blk_config);
+ config_size = min(cap_size, blk_config_size);
break;
default:
config_size = 0;
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 115b61f4924b..f5563f665cc6 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -87,6 +87,8 @@ struct ifcvf_hw {
int config_irq;
int vqs_reused_irq;
u16 nr_vring;
+ /* VIRTIO_PCI_CAP_DEVICE_CFG size */
+ u32 cap_dev_config_size;
};
struct ifcvf_adapter {
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 0a5670729412..f9c0044c6442 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -685,7 +685,7 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic
}
/*
- * IFCVF currently does't have on-chip IOMMU, so not
+ * IFCVF currently doesn't have on-chip IOMMU, so not
* implemented set_map()/dma_map()/dma_unmap()
*/
static const struct vdpa_config_ops ifc_vdpa_ops = {
@@ -752,59 +752,36 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct ifcvf_adapter *adapter;
+ struct vdpa_device *vdpa_dev;
struct pci_dev *pdev;
struct ifcvf_hw *vf;
- struct device *dev;
- int ret, i;
+ int ret;
ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
- if (ifcvf_mgmt_dev->adapter)
+ if (!ifcvf_mgmt_dev->adapter)
return -EOPNOTSUPP;
- pdev = ifcvf_mgmt_dev->pdev;
- dev = &pdev->dev;
- adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, 1, 1, name, false);
- if (IS_ERR(adapter)) {
- IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
- return PTR_ERR(adapter);
- }
-
- ifcvf_mgmt_dev->adapter = adapter;
-
+ adapter = ifcvf_mgmt_dev->adapter;
vf = &adapter->vf;
- vf->dev_type = get_dev_type(pdev);
- vf->base = pcim_iomap_table(pdev);
+ pdev = adapter->pdev;
+ vdpa_dev = &adapter->vdpa;
- adapter->pdev = pdev;
- adapter->vdpa.dma_dev = &pdev->dev;
-
- ret = ifcvf_init_hw(vf, pdev);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
- goto err;
- }
-
- for (i = 0; i < vf->nr_vring; i++)
- vf->vring[i].irq = -EINVAL;
-
- vf->hw_features = ifcvf_get_hw_features(vf);
- vf->config_size = ifcvf_get_config_size(vf);
+ if (name)
+ ret = dev_set_name(&vdpa_dev->dev, "%s", name);
+ else
+ ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
- adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
if (ret) {
+ put_device(&adapter->vdpa.dev);
IFCVF_ERR(pdev, "Failed to register to vDPA bus");
- goto err;
+ return ret;
}
return 0;
-
-err:
- put_device(&adapter->vdpa.dev);
- return ret;
}
+
static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
@@ -823,61 +800,94 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct device *dev = &pdev->dev;
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
u32 dev_type;
- int ret;
-
- ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
- if (!ifcvf_mgmt_dev) {
- IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
- return -ENOMEM;
- }
-
- dev_type = get_dev_type(pdev);
- switch (dev_type) {
- case VIRTIO_ID_NET:
- ifcvf_mgmt_dev->mdev.id_table = id_table_net;
- break;
- case VIRTIO_ID_BLOCK:
- ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
- break;
- default:
- IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
- ret = -EOPNOTSUPP;
- goto err;
- }
-
- ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
- ifcvf_mgmt_dev->mdev.device = dev;
- ifcvf_mgmt_dev->pdev = pdev;
+ int ret, i;
ret = pcim_enable_device(pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to enable device\n");
- goto err;
+ return ret;
}
-
ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
IFCVF_DRIVER_NAME);
if (ret) {
IFCVF_ERR(pdev, "Failed to request MMIO region\n");
- goto err;
+ return ret;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
IFCVF_ERR(pdev, "No usable DMA configuration\n");
- goto err;
+ return ret;
}
ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
if (ret) {
IFCVF_ERR(pdev,
"Failed for adding devres for freeing irq vectors\n");
- goto err;
+ return ret;
}
pci_set_master(pdev);
+ adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
+ dev, &ifc_vdpa_ops, 1, 1, NULL, false);
+ if (IS_ERR(adapter)) {
+ IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
+ return PTR_ERR(adapter);
+ }
+
+ vf = &adapter->vf;
+ vf->dev_type = get_dev_type(pdev);
+ vf->base = pcim_iomap_table(pdev);
+
+ adapter->pdev = pdev;
+ adapter->vdpa.dma_dev = &pdev->dev;
+
+ ret = ifcvf_init_hw(vf, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
+ return ret;
+ }
+
+ for (i = 0; i < vf->nr_vring; i++)
+ vf->vring[i].irq = -EINVAL;
+
+ vf->hw_features = ifcvf_get_hw_features(vf);
+ vf->config_size = ifcvf_get_config_size(vf);
+
+ ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
+ if (!ifcvf_mgmt_dev) {
+ IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
+ return -ENOMEM;
+ }
+
+ ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
+ ifcvf_mgmt_dev->mdev.device = dev;
+ ifcvf_mgmt_dev->adapter = adapter;
+
+ dev_type = get_dev_type(pdev);
+ switch (dev_type) {
+ case VIRTIO_ID_NET:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_net;
+ break;
+ case VIRTIO_ID_BLOCK:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
+ break;
+ default:
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
+ ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
+
+ adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
+
+
ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
if (ret) {
IFCVF_ERR(pdev,
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 44104093163b..6af9fdbb86b7 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -70,6 +70,16 @@ struct mlx5_vdpa_wq_ent {
struct mlx5_vdpa_dev *mvdev;
};
+enum {
+ MLX5_VDPA_DATAVQ_GROUP,
+ MLX5_VDPA_CVQ_GROUP,
+ MLX5_VDPA_NUMVQ_GROUPS
+};
+
+enum {
+ MLX5_VDPA_NUM_AS = MLX5_VDPA_NUMVQ_GROUPS
+};
+
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
@@ -85,6 +95,7 @@ struct mlx5_vdpa_dev {
struct mlx5_vdpa_mr mr;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
+ unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index e85c1d71f4ed..ed100a35e596 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -164,6 +164,7 @@ struct mlx5_vdpa_net {
bool setup;
u32 cur_num_vqs;
u32 rqt_size;
+ bool nb_registered;
struct notifier_block nb;
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
@@ -895,6 +896,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
if (err)
goto err_cmd;
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT;
kfree(in);
mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
@@ -922,6 +924,7 @@ static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtq
mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
return;
}
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
umems_destroy(ndev, mvq);
}
@@ -1121,6 +1124,20 @@ err_cmd:
return err;
}
+static bool is_valid_state_change(int oldstate, int newstate)
+{
+ switch (oldstate) {
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
+ return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY;
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
+ return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR:
+ default:
+ return false;
+ }
+}
+
static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
{
int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
@@ -1130,6 +1147,12 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
void *in;
int err;
+ if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
+ return 0;
+
+ if (!is_valid_state_change(mvq->fw_state, state))
+ return -EINVAL;
+
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1440,7 +1463,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
- memset(dmac_c, 0xff, ETH_ALEN);
+ eth_broadcast_addr(dmac_c);
ether_addr_copy(dmac_v, mac);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
if (tagged) {
@@ -1992,6 +2015,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
+ int err;
if (!mvdev->actual_features)
return;
@@ -2005,8 +2029,16 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
}
mvq = &ndev->vqs[idx];
- if (!ready)
+ if (!ready) {
suspend_vq(ndev, mvq);
+ } else {
+ err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err);
+ ready = false;
+ }
+ }
+
mvq->ready = ready;
}
@@ -2095,9 +2127,14 @@ static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
return PAGE_SIZE;
}
-static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdev, u16 idx)
{
- return 0;
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ if (is_ctrl_vq_idx(mvdev, idx))
+ return MLX5_VDPA_CVQ_GROUP;
+
+ return MLX5_VDPA_DATAVQ_GROUP;
}
enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
@@ -2511,6 +2548,15 @@ err_clear:
up_write(&ndev->reslock);
}
+static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
+{
+ int i;
+
+ /* default mapping all groups are mapped to asid 0 */
+ for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
+ mvdev->group2asid[i] = 0;
+}
+
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2529,7 +2575,9 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
ndev->mvdev.cvq.completed_desc = 0;
memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
+ init_group_to_asid_map(mvdev);
++mvdev->generation;
+
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
@@ -2567,26 +2615,63 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
return mvdev->generation;
}
-static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
- struct vhost_iotlb *iotlb)
+static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ u64 start = 0ULL, last = 0ULL - 1;
+ struct vhost_iotlb_map *map;
+ int err = 0;
+
+ spin_lock(&mvdev->cvq.iommu_lock);
+ vhost_iotlb_reset(mvdev->cvq.iotlb);
+
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start,
+ map->last, map->addr, map->perm);
+ if (err)
+ goto out;
+ }
+
+out:
+ spin_unlock(&mvdev->cvq.iommu_lock);
+ return err;
+}
+
+static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
- struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
- struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
- down_write(&ndev->reslock);
-
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
- goto err;
+ return err;
}
if (change_map)
err = mlx5_vdpa_change_map(mvdev, iotlb);
-err:
+ return err;
+}
+
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
+ struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int err = -EINVAL;
+
+ down_write(&ndev->reslock);
+ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+ err = set_map_data(mvdev, iotlb);
+ if (err)
+ goto out;
+ }
+
+ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid)
+ err = set_map_control(mvdev, iotlb);
+
+out:
up_write(&ndev->reslock);
return err;
}
@@ -2733,6 +2818,49 @@ out_err:
return err;
}
+static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_control_vq *cvq;
+
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ return;
+
+ cvq = &mvdev->cvq;
+ cvq->ready = false;
+}
+
+static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
+ down_write(&ndev->reslock);
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ ndev->nb_registered = false;
+ flush_workqueue(ndev->mvdev.wq);
+ for (i = 0; i < ndev->cur_num_vqs; i++) {
+ mvq = &ndev->vqs[i];
+ suspend_vq(ndev, mvq);
+ }
+ mlx5_vdpa_cvq_suspend(mvdev);
+ up_write(&ndev->reslock);
+ return 0;
+}
+
+static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
+ unsigned int asid)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ if (group >= MLX5_VDPA_NUMVQ_GROUPS)
+ return -EINVAL;
+
+ mvdev->group2asid[group] = asid;
+ return 0;
+}
+
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2762,7 +2890,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_config = mlx5_vdpa_set_config,
.get_generation = mlx5_vdpa_get_generation,
.set_map = mlx5_vdpa_set_map,
+ .set_group_asid = mlx5_set_group_asid,
.free = mlx5_vdpa_free,
+ .suspend = mlx5_vdpa_suspend,
};
static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
@@ -2828,6 +2958,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
mvq->index = i;
mvq->ndev = ndev;
mvq->fwqp.fw = true;
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
}
for (; i < ndev->mvdev.max_vqs; i++) {
mvq = &ndev->vqs[i];
@@ -2902,13 +3033,21 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ down_read(&ndev->reslock);
+ if (!ndev->nb_registered) {
+ up_read(&ndev->reslock);
+ return NOTIFY_DONE;
+ }
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
- if (!wqent)
+ if (!wqent) {
+ up_read(&ndev->reslock);
return NOTIFY_DONE;
+ }
wqent->mvdev = &ndev->mvdev;
INIT_WORK(&wqent->work, update_carrier);
queue_work(ndev->mvdev.wq, &wqent->work);
+ up_read(&ndev->reslock);
ret = NOTIFY_OK;
break;
default:
@@ -2982,7 +3121,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- 1, 1, name, false);
+ MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -3062,6 +3201,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(mdev, &ndev->nb);
+ ndev->nb_registered = true;
mvdev->vdev.mdev = &mgtdev->mgtdev;
err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
if (err)
@@ -3093,7 +3233,10 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct workqueue_struct *wq;
- mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ if (ndev->nb_registered) {
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ ndev->nb_registered = false;
+ }
wq = mvdev->wq;
mvdev->wq = NULL;
destroy_workqueue(wq);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index ebf2f363fbe7..c06c02704461 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -824,11 +824,11 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
config.mac))
return -EMSGSIZE;
- val_u16 = le16_to_cpu(config.status);
+ val_u16 = __virtio16_to_cpu(true, config.status);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
return -EMSGSIZE;
- val_u16 = le16_to_cpu(config.mtu);
+ val_u16 = __virtio16_to_cpu(true, config.mtu);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
return -EMSGSIZE;
@@ -846,17 +846,9 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
{
u32 device_id;
void *hdr;
- u8 status;
int err;
down_read(&vdev->cf_lock);
- status = vdev->config->get_status(vdev);
- if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
- NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
- err = -EAGAIN;
- goto out;
- }
-
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
VDPA_CMD_DEV_CONFIG_GET);
if (!hdr) {
@@ -913,7 +905,7 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
}
vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
- max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
+ max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
return -EMSGSIZE;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 0f2865899647..225b7f5d8be3 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
static int max_iotlb_entries = 2048;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
- "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
+ "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
#define VDPASIM_QUEUE_MAX 256
@@ -107,6 +107,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
for (i = 0; i < vdpasim->dev_attr.nas; i++)
vhost_iotlb_reset(&vdpasim->iommu[i]);
+ vdpasim->running = true;
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
@@ -291,7 +292,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
goto err_iommu;
for (i = 0; i < vdpasim->dev_attr.nas; i++)
- vhost_iotlb_init(&vdpasim->iommu[i], 0, 0);
+ vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
if (!vdpasim->buffer)
@@ -505,6 +506,17 @@ static int vdpasim_reset(struct vdpa_device *vdpa)
return 0;
}
+static int vdpasim_suspend(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ spin_lock(&vdpasim->lock);
+ vdpasim->running = false;
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -694,6 +706,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
+ .suspend = vdpasim_suspend,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
@@ -726,6 +739,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
+ .suspend = vdpasim_suspend,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index 622782e92239..061986f30911 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -66,6 +66,7 @@ struct vdpasim {
u32 generation;
u64 features;
u32 groups;
+ bool running;
/* spinlock to synchronize iommu table */
spinlock_t iommu_lock;
};
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index 42d401d43911..c8bfea3b7db2 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -25,31 +25,49 @@
#define DRV_LICENSE "GPL v2"
#define VDPASIM_BLK_FEATURES (VDPASIM_FEATURES | \
+ (1ULL << VIRTIO_BLK_F_FLUSH) | \
(1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
(1ULL << VIRTIO_BLK_F_SEG_MAX) | \
(1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
(1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
- (1ULL << VIRTIO_BLK_F_MQ))
+ (1ULL << VIRTIO_BLK_F_MQ) | \
+ (1ULL << VIRTIO_BLK_F_DISCARD) | \
+ (1ULL << VIRTIO_BLK_F_WRITE_ZEROES))
#define VDPASIM_BLK_CAPACITY 0x40000
#define VDPASIM_BLK_SIZE_MAX 0x1000
#define VDPASIM_BLK_SEG_MAX 32
+#define VDPASIM_BLK_DWZ_MAX_SECTORS UINT_MAX
+
+/* 1 virtqueue, 1 address space, 1 virtqueue group */
#define VDPASIM_BLK_VQ_NUM 1
+#define VDPASIM_BLK_AS_NUM 1
+#define VDPASIM_BLK_GROUP_NUM 1
static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
-static bool vdpasim_blk_check_range(u64 start_sector, size_t range_size)
+static bool vdpasim_blk_check_range(struct vdpasim *vdpasim, u64 start_sector,
+ u64 num_sectors, u64 max_sectors)
{
- u64 range_sectors = range_size >> SECTOR_SHIFT;
-
- if (range_size > VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)
- return false;
+ if (start_sector > VDPASIM_BLK_CAPACITY) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "starting sector exceeds the capacity - start: 0x%llx capacity: 0x%x\n",
+ start_sector, VDPASIM_BLK_CAPACITY);
+ }
- if (start_sector > VDPASIM_BLK_CAPACITY)
+ if (num_sectors > max_sectors) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "number of sectors exceeds the max allowed in a request - num: 0x%llx max: 0x%llx\n",
+ num_sectors, max_sectors);
return false;
+ }
- if (range_sectors > VDPASIM_BLK_CAPACITY - start_sector)
+ if (num_sectors > VDPASIM_BLK_CAPACITY - start_sector) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "request exceeds the capacity - start: 0x%llx num: 0x%llx capacity: 0x%x\n",
+ start_sector, num_sectors, VDPASIM_BLK_CAPACITY);
return false;
+ }
return true;
}
@@ -63,6 +81,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
{
size_t pushed = 0, to_pull, to_push;
struct virtio_blk_outhdr hdr;
+ bool handled = false;
ssize_t bytes;
loff_t offset;
u64 sector;
@@ -76,14 +95,14 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
return false;
if (vq->out_iov.used < 1 || vq->in_iov.used < 1) {
- dev_err(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
+ dev_dbg(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
vq->out_iov.used, vq->in_iov.used);
- return false;
+ goto err;
}
if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) {
- dev_err(&vdpasim->vdpa.dev, "request in header too short\n");
- return false;
+ dev_dbg(&vdpasim->vdpa.dev, "request in header too short\n");
+ goto err;
}
/* The last byte is the status and we checked if the last iov has
@@ -96,8 +115,8 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr,
sizeof(hdr));
if (bytes != sizeof(hdr)) {
- dev_err(&vdpasim->vdpa.dev, "request out header too short\n");
- return false;
+ dev_dbg(&vdpasim->vdpa.dev, "request out header too short\n");
+ goto err;
}
to_pull -= bytes;
@@ -107,12 +126,20 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
offset = sector << SECTOR_SHIFT;
status = VIRTIO_BLK_S_OK;
+ if (type != VIRTIO_BLK_T_IN && type != VIRTIO_BLK_T_OUT &&
+ sector != 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "sector must be 0 for %u request - sector: 0x%llx\n",
+ type, sector);
+ status = VIRTIO_BLK_S_IOERR;
+ goto err_status;
+ }
+
switch (type) {
case VIRTIO_BLK_T_IN:
- if (!vdpasim_blk_check_range(sector, to_push)) {
- dev_err(&vdpasim->vdpa.dev,
- "reading over the capacity - offset: 0x%llx len: 0x%zx\n",
- offset, to_push);
+ if (!vdpasim_blk_check_range(vdpasim, sector,
+ to_push >> SECTOR_SHIFT,
+ VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
@@ -121,7 +148,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim->buffer + offset,
to_push);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_push);
status = VIRTIO_BLK_S_IOERR;
@@ -132,10 +159,9 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
break;
case VIRTIO_BLK_T_OUT:
- if (!vdpasim_blk_check_range(sector, to_pull)) {
- dev_err(&vdpasim->vdpa.dev,
- "writing over the capacity - offset: 0x%llx len: 0x%zx\n",
- offset, to_pull);
+ if (!vdpasim_blk_check_range(vdpasim, sector,
+ to_pull >> SECTOR_SHIFT,
+ VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
@@ -144,7 +170,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim->buffer + offset,
to_pull);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_pull);
status = VIRTIO_BLK_S_IOERR;
@@ -157,7 +183,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim_blk_id,
VIRTIO_BLK_ID_BYTES);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd\n", bytes);
status = VIRTIO_BLK_S_IOERR;
break;
@@ -166,13 +192,76 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
pushed += bytes;
break;
+ case VIRTIO_BLK_T_FLUSH:
+ /* nothing to do */
+ break;
+
+ case VIRTIO_BLK_T_DISCARD:
+ case VIRTIO_BLK_T_WRITE_ZEROES: {
+ struct virtio_blk_discard_write_zeroes range;
+ u32 num_sectors, flags;
+
+ if (to_pull != sizeof(range)) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "discard/write_zeroes header len: 0x%zx [expected: 0x%zx]\n",
+ to_pull, sizeof(range));
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &range,
+ to_pull);
+ if (bytes < 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
+ bytes, offset, to_pull);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ sector = le64_to_cpu(range.sector);
+ offset = sector << SECTOR_SHIFT;
+ num_sectors = le32_to_cpu(range.num_sectors);
+ flags = le32_to_cpu(range.flags);
+
+ if (type == VIRTIO_BLK_T_DISCARD && flags != 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "discard unexpected flags set - flags: 0x%x\n",
+ flags);
+ status = VIRTIO_BLK_S_UNSUPP;
+ break;
+ }
+
+ if (type == VIRTIO_BLK_T_WRITE_ZEROES &&
+ flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "write_zeroes unexpected flags set - flags: 0x%x\n",
+ flags);
+ status = VIRTIO_BLK_S_UNSUPP;
+ break;
+ }
+
+ if (!vdpasim_blk_check_range(vdpasim, sector, num_sectors,
+ VDPASIM_BLK_DWZ_MAX_SECTORS)) {
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
+ memset(vdpasim->buffer + offset, 0,
+ num_sectors << SECTOR_SHIFT);
+ }
+
+ break;
+ }
default:
- dev_warn(&vdpasim->vdpa.dev,
- "Unsupported request type %d\n", type);
+ dev_dbg(&vdpasim->vdpa.dev,
+ "Unsupported request type %d\n", type);
status = VIRTIO_BLK_S_IOERR;
break;
}
+err_status:
/* If some operations fail, we need to skip the remaining bytes
* to put the status in the last byte
*/
@@ -182,21 +271,25 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
/* Last byte is the status */
bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1);
if (bytes != 1)
- return false;
+ goto err;
pushed += bytes;
/* Make sure data is wrote before advancing index */
smp_wmb();
+ handled = true;
+
+err:
vringh_complete_iotlb(&vq->vring, vq->head, pushed);
- return true;
+ return handled;
}
static void vdpasim_blk_work(struct work_struct *work)
{
struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
+ bool reschedule = false;
int i;
spin_lock(&vdpasim->lock);
@@ -204,8 +297,12 @@ static void vdpasim_blk_work(struct work_struct *work)
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
+ if (!vdpasim->running)
+ goto out;
+
for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) {
struct vdpasim_virtqueue *vq = &vdpasim->vqs[i];
+ int reqs = 0;
if (!vq->ready)
continue;
@@ -218,10 +315,18 @@ static void vdpasim_blk_work(struct work_struct *work)
if (vringh_need_notify_iotlb(&vq->vring) > 0)
vringh_notify(&vq->vring);
local_bh_enable();
+
+ if (++reqs > 4) {
+ reschedule = true;
+ break;
+ }
}
}
out:
spin_unlock(&vdpasim->lock);
+
+ if (reschedule)
+ schedule_work(&vdpasim->work);
}
static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
@@ -237,6 +342,17 @@ static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
blk_config->min_io_size = cpu_to_vdpasim16(vdpasim, 1);
blk_config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1);
blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
+ /* VIRTIO_BLK_F_DISCARD */
+ blk_config->discard_sector_alignment =
+ cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
+ blk_config->max_discard_sectors =
+ cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_DWZ_MAX_SECTORS);
+ blk_config->max_discard_seg = cpu_to_vdpasim32(vdpasim, 1);
+ /* VIRTIO_BLK_F_WRITE_ZEROES */
+ blk_config->max_write_zeroes_sectors =
+ cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_DWZ_MAX_SECTORS);
+ blk_config->max_write_zeroes_seg = cpu_to_vdpasim32(vdpasim, 1);
+
}
static void vdpasim_blk_mgmtdev_release(struct device *dev)
@@ -260,6 +376,8 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.id = VIRTIO_ID_BLOCK;
dev_attr.supported_features = VDPASIM_BLK_FEATURES;
dev_attr.nvqs = VDPASIM_BLK_VQ_NUM;
+ dev_attr.ngroups = VDPASIM_BLK_GROUP_NUM;
+ dev_attr.nas = VDPASIM_BLK_AS_NUM;
dev_attr.config_size = sizeof(struct virtio_blk_config);
dev_attr.get_config = vdpasim_blk_get_config;
dev_attr.work_fn = vdpasim_blk_work;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 5125976a4df8..886449e88502 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -154,6 +154,9 @@ static void vdpasim_net_work(struct work_struct *work)
spin_lock(&vdpasim->lock);
+ if (!vdpasim->running)
+ goto out;
+
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 6daa3978d290..e682bc7ee6c9 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -138,18 +138,17 @@ static void do_bounce(phys_addr_t orig, void *addr, size_t size,
{
unsigned long pfn = PFN_DOWN(orig);
unsigned int offset = offset_in_page(orig);
- char *buffer;
+ struct page *page;
unsigned int sz = 0;
while (size) {
sz = min_t(size_t, PAGE_SIZE - offset, size);
- buffer = kmap_atomic(pfn_to_page(pfn));
+ page = pfn_to_page(pfn);
if (dir == DMA_TO_DEVICE)
- memcpy(addr, buffer + offset, sz);
+ memcpy_from_page(addr, page, offset, sz);
else
- memcpy(buffer + offset, addr, sz);
- kunmap_atomic(buffer);
+ memcpy_to_page(page, offset, addr, sz);
size -= sz;
pfn++;
@@ -179,8 +178,9 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
map->orig_phys == INVALID_PHYS_ADDR))
return;
- addr = page_address(map->bounce_page) + offset;
- do_bounce(map->orig_phys + offset, addr, sz, dir);
+ addr = kmap_local_page(map->bounce_page);
+ do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
+ kunmap_local(addr);
size -= sz;
iova += sz;
}
@@ -213,21 +213,21 @@ vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
struct vduse_bounce_map *map;
struct page *page = NULL;
- spin_lock(&domain->iotlb_lock);
+ read_lock(&domain->bounce_lock);
map = &domain->bounce_maps[iova >> PAGE_SHIFT];
- if (!map->bounce_page)
+ if (domain->user_bounce_pages || !map->bounce_page)
goto out;
page = map->bounce_page;
get_page(page);
out:
- spin_unlock(&domain->iotlb_lock);
+ read_unlock(&domain->bounce_lock);
return page;
}
static void
-vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
+vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
{
struct vduse_bounce_map *map;
unsigned long pfn, bounce_pfns;
@@ -247,6 +247,73 @@ vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
}
}
+int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
+ struct page **pages, int count)
+{
+ struct vduse_bounce_map *map;
+ int i, ret;
+
+ /* Now we don't support partial mapping */
+ if (count != (domain->bounce_size >> PAGE_SHIFT))
+ return -EINVAL;
+
+ write_lock(&domain->bounce_lock);
+ ret = -EEXIST;
+ if (domain->user_bounce_pages)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ map = &domain->bounce_maps[i];
+ if (map->bounce_page) {
+ /* Copy kernel page to user page if it's in use */
+ if (map->orig_phys != INVALID_PHYS_ADDR)
+ memcpy_to_page(pages[i], 0,
+ page_address(map->bounce_page),
+ PAGE_SIZE);
+ __free_page(map->bounce_page);
+ }
+ map->bounce_page = pages[i];
+ get_page(pages[i]);
+ }
+ domain->user_bounce_pages = true;
+ ret = 0;
+out:
+ write_unlock(&domain->bounce_lock);
+
+ return ret;
+}
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
+{
+ struct vduse_bounce_map *map;
+ unsigned long i, count;
+
+ write_lock(&domain->bounce_lock);
+ if (!domain->user_bounce_pages)
+ goto out;
+
+ count = domain->bounce_size >> PAGE_SHIFT;
+ for (i = 0; i < count; i++) {
+ struct page *page = NULL;
+
+ map = &domain->bounce_maps[i];
+ if (WARN_ON(!map->bounce_page))
+ continue;
+
+ /* Copy user page to kernel page if it's in use */
+ if (map->orig_phys != INVALID_PHYS_ADDR) {
+ page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
+ memcpy_from_page(page_address(page),
+ map->bounce_page, 0, PAGE_SIZE);
+ }
+ put_page(map->bounce_page);
+ map->bounce_page = page;
+ }
+ domain->user_bounce_pages = false;
+out:
+ write_unlock(&domain->bounce_lock);
+}
+
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
{
if (!domain->bounce_map)
@@ -322,13 +389,18 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
if (vduse_domain_init_bounce_map(domain))
goto err;
+ read_lock(&domain->bounce_lock);
if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
- goto err;
+ goto err_unlock;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
+ read_unlock(&domain->bounce_lock);
+
return iova;
+err_unlock:
+ read_unlock(&domain->bounce_lock);
err:
vduse_domain_free_iova(iovad, iova, size);
return DMA_MAPPING_ERROR;
@@ -340,10 +412,12 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
{
struct iova_domain *iovad = &domain->stream_iovad;
+ read_lock(&domain->bounce_lock);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
+ read_unlock(&domain->bounce_lock);
vduse_domain_free_iova(iovad, dma_addr, size);
}
@@ -451,7 +525,8 @@ static int vduse_domain_release(struct inode *inode, struct file *file)
spin_lock(&domain->iotlb_lock);
vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
- vduse_domain_free_bounce_pages(domain);
+ vduse_domain_remove_user_bounce_pages(domain);
+ vduse_domain_free_kernel_bounce_pages(domain);
spin_unlock(&domain->iotlb_lock);
put_iova_domain(&domain->stream_iovad);
put_iova_domain(&domain->consistent_iovad);
@@ -511,6 +586,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
goto err_file;
domain->file = file;
+ rwlock_init(&domain->bounce_lock);
spin_lock_init(&domain->iotlb_lock);
init_iova_domain(&domain->stream_iovad,
PAGE_SIZE, IOVA_START_PFN);
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 2722d9b8e21a..4e0e50e7ac15 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -14,6 +14,7 @@
#include <linux/iova.h>
#include <linux/dma-mapping.h>
#include <linux/vhost_iotlb.h>
+#include <linux/rwlock.h>
#define IOVA_START_PFN 1
@@ -34,6 +35,8 @@ struct vduse_iova_domain {
struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock;
struct file *file;
+ bool user_bounce_pages;
+ rwlock_t bounce_lock;
};
int vduse_domain_set_map(struct vduse_iova_domain *domain,
@@ -61,6 +64,11 @@ void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
+int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
+ struct page **pages, int count);
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
+
void vduse_domain_destroy(struct vduse_iova_domain *domain);
struct vduse_iova_domain *vduse_domain_create(unsigned long iova_limit,
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 3bc27de58f46..41c0b29739f1 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -21,6 +21,8 @@
#include <linux/uio.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
#include <uapi/linux/vduse.h>
#include <uapi/linux/vdpa.h>
#include <uapi/linux/virtio_config.h>
@@ -64,6 +66,13 @@ struct vduse_vdpa {
struct vduse_dev *dev;
};
+struct vduse_umem {
+ unsigned long iova;
+ unsigned long npages;
+ struct page **pages;
+ struct mm_struct *mm;
+};
+
struct vduse_dev {
struct vduse_vdpa *vdev;
struct device *dev;
@@ -95,6 +104,8 @@ struct vduse_dev {
u8 status;
u32 vq_num;
u32 vq_align;
+ struct vduse_umem *umem;
+ struct mutex mem_lock;
};
struct vduse_dev_msg {
@@ -917,6 +928,102 @@ unlock:
return ret;
}
+static int vduse_dev_dereg_umem(struct vduse_dev *dev,
+ u64 iova, u64 size)
+{
+ int ret;
+
+ mutex_lock(&dev->mem_lock);
+ ret = -ENOENT;
+ if (!dev->umem)
+ goto unlock;
+
+ ret = -EINVAL;
+ if (dev->umem->iova != iova || size != dev->domain->bounce_size)
+ goto unlock;
+
+ vduse_domain_remove_user_bounce_pages(dev->domain);
+ unpin_user_pages_dirty_lock(dev->umem->pages,
+ dev->umem->npages, true);
+ atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
+ mmdrop(dev->umem->mm);
+ vfree(dev->umem->pages);
+ kfree(dev->umem);
+ dev->umem = NULL;
+ ret = 0;
+unlock:
+ mutex_unlock(&dev->mem_lock);
+ return ret;
+}
+
+static int vduse_dev_reg_umem(struct vduse_dev *dev,
+ u64 iova, u64 uaddr, u64 size)
+{
+ struct page **page_list = NULL;
+ struct vduse_umem *umem = NULL;
+ long pinned = 0;
+ unsigned long npages, lock_limit;
+ int ret;
+
+ if (!dev->domain->bounce_map ||
+ size != dev->domain->bounce_size ||
+ iova != 0 || uaddr & ~PAGE_MASK)
+ return -EINVAL;
+
+ mutex_lock(&dev->mem_lock);
+ ret = -EEXIST;
+ if (dev->umem)
+ goto unlock;
+
+ ret = -ENOMEM;
+ npages = size >> PAGE_SHIFT;
+ page_list = __vmalloc(array_size(npages, sizeof(struct page *)),
+ GFP_KERNEL_ACCOUNT);
+ umem = kzalloc(sizeof(*umem), GFP_KERNEL);
+ if (!page_list || !umem)
+ goto unlock;
+
+ mmap_read_lock(current->mm);
+
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
+ if (npages + atomic64_read(&current->mm->pinned_vm) > lock_limit)
+ goto out;
+
+ pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE,
+ page_list, NULL);
+ if (pinned != npages) {
+ ret = pinned < 0 ? pinned : -ENOMEM;
+ goto out;
+ }
+
+ ret = vduse_domain_add_user_bounce_pages(dev->domain,
+ page_list, pinned);
+ if (ret)
+ goto out;
+
+ atomic64_add(npages, &current->mm->pinned_vm);
+
+ umem->pages = page_list;
+ umem->npages = pinned;
+ umem->iova = iova;
+ umem->mm = current->mm;
+ mmgrab(current->mm);
+
+ dev->umem = umem;
+out:
+ if (ret && pinned > 0)
+ unpin_user_pages(page_list, pinned);
+
+ mmap_read_unlock(current->mm);
+unlock:
+ if (ret) {
+ vfree(page_list);
+ kfree(umem);
+ }
+ mutex_unlock(&dev->mem_lock);
+ return ret;
+}
+
static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -1089,6 +1196,77 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index].inject);
break;
}
+ case VDUSE_IOTLB_REG_UMEM: {
+ struct vduse_iova_umem umem;
+
+ ret = -EFAULT;
+ if (copy_from_user(&umem, argp, sizeof(umem)))
+ break;
+
+ ret = -EINVAL;
+ if (!is_mem_zero((const char *)umem.reserved,
+ sizeof(umem.reserved)))
+ break;
+
+ ret = vduse_dev_reg_umem(dev, umem.iova,
+ umem.uaddr, umem.size);
+ break;
+ }
+ case VDUSE_IOTLB_DEREG_UMEM: {
+ struct vduse_iova_umem umem;
+
+ ret = -EFAULT;
+ if (copy_from_user(&umem, argp, sizeof(umem)))
+ break;
+
+ ret = -EINVAL;
+ if (!is_mem_zero((const char *)umem.reserved,
+ sizeof(umem.reserved)))
+ break;
+
+ ret = vduse_dev_dereg_umem(dev, umem.iova,
+ umem.size);
+ break;
+ }
+ case VDUSE_IOTLB_GET_INFO: {
+ struct vduse_iova_info info;
+ struct vhost_iotlb_map *map;
+ struct vduse_iova_domain *domain = dev->domain;
+
+ ret = -EFAULT;
+ if (copy_from_user(&info, argp, sizeof(info)))
+ break;
+
+ ret = -EINVAL;
+ if (info.start > info.last)
+ break;
+
+ if (!is_mem_zero((const char *)info.reserved,
+ sizeof(info.reserved)))
+ break;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb,
+ info.start, info.last);
+ if (map) {
+ info.start = map->start;
+ info.last = map->last;
+ info.capability = 0;
+ if (domain->bounce_map && map->start == 0 &&
+ map->last == domain->bounce_size - 1)
+ info.capability |= VDUSE_IOVA_CAP_UMEM;
+ }
+ spin_unlock(&domain->iotlb_lock);
+ if (!map)
+ break;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &info, sizeof(info)))
+ break;
+
+ ret = 0;
+ break;
+ }
default:
ret = -ENOIOCTLCMD;
break;
@@ -1101,6 +1279,7 @@ static int vduse_dev_release(struct inode *inode, struct file *file)
{
struct vduse_dev *dev = file->private_data;
+ vduse_dev_dereg_umem(dev, 0, dev->domain->bounce_size);
spin_lock(&dev->msg_lock);
/* Make sure the inflight messages can processed after reconncection */
list_splice_init(&dev->recv_list, &dev->send_list);
@@ -1163,6 +1342,7 @@ static struct vduse_dev *vduse_dev_create(void)
return NULL;
mutex_init(&dev->lock);
+ mutex_init(&dev->mem_lock);
spin_lock_init(&dev->msg_lock);
INIT_LIST_HEAD(&dev->send_list);
INIT_LIST_HEAD(&dev->recv_list);
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index fee73f3d9480..1a32357592e3 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
vfio_virqfd-y := virqfd.o
+vfio-y += vfio_main.o
+
obj-$(CONFIG_VFIO) += vfio.o
obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
index 4ad63ececb91..7a29f572f93d 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
@@ -39,7 +39,7 @@ struct vfio_fsl_mc_device {
struct vfio_fsl_mc_irq *mc_irqs;
};
-extern int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
+int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
u32 flags, unsigned int index,
unsigned int start, unsigned int count,
void *data);
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 4def43f5f7b6..ea762e28c1cc 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -1185,7 +1185,7 @@ static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
if (ret)
return ret;
- if (core_vdev->ops->migration_set_state) {
+ if (core_vdev->mig_ops) {
ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
if (ret) {
vfio_pci_core_disable(vdev);
@@ -1208,6 +1208,11 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
vfio_pci_core_close_device(core_vdev);
}
+static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
+ .migration_set_state = hisi_acc_vfio_pci_set_device_state,
+ .migration_get_state = hisi_acc_vfio_pci_get_device_state,
+};
+
static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.name = "hisi-acc-vfio-pci-migration",
.open_device = hisi_acc_vfio_pci_open_device,
@@ -1219,8 +1224,6 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.mmap = hisi_acc_vfio_pci_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
- .migration_set_state = hisi_acc_vfio_pci_set_device_state,
- .migration_get_state = hisi_acc_vfio_pci_get_device_state,
};
static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
@@ -1272,6 +1275,8 @@ static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device
if (!ret) {
vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
&hisi_acc_vfio_pci_migrn_ops);
+ hisi_acc_vdev->core_device.vdev.mig_ops =
+ &hisi_acc_vfio_pci_migrn_state_ops;
} else {
pci_warn(pdev, "migration support failed, continue with generic interface\n");
vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 9b9f33ca270a..dd5d7bfe0a49 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -88,6 +88,16 @@ static int mlx5fv_vf_event(struct notifier_block *nb,
return 0;
}
+void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
+{
+ if (!mvdev->migrate_cap)
+ return;
+
+ mutex_lock(&mvdev->state_mutex);
+ mlx5vf_disable_fds(mvdev);
+ mlx5vf_state_mutex_unlock(mvdev);
+}
+
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
{
if (!mvdev->migrate_cap)
@@ -98,7 +108,8 @@ void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
destroy_workqueue(mvdev->cb_wq);
}
-void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
+ const struct vfio_migration_ops *mig_ops)
{
struct pci_dev *pdev = mvdev->core_device.pdev;
int ret;
@@ -139,6 +150,7 @@ void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
mvdev->core_device.vdev.migration_flags =
VFIO_MIGRATION_STOP_COPY |
VFIO_MIGRATION_P2P;
+ mvdev->core_device.vdev.mig_ops = mig_ops;
end:
mlx5_vf_put_core_dev(mvdev->mdev);
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 6c3112fdd8b1..8208f4701a90 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -62,8 +62,10 @@ int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size);
-void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
+ const struct vfio_migration_ops *mig_ops);
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf);
int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 0558d0649ddb..a9b63d15c5d3 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -570,10 +570,15 @@ static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
struct mlx5vf_pci_core_device *mvdev = container_of(
core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
- mlx5vf_disable_fds(mvdev);
+ mlx5vf_cmd_close_migratable(mvdev);
vfio_pci_core_close_device(core_vdev);
}
+static const struct vfio_migration_ops mlx5vf_pci_mig_ops = {
+ .migration_set_state = mlx5vf_pci_set_device_state,
+ .migration_get_state = mlx5vf_pci_get_device_state,
+};
+
static const struct vfio_device_ops mlx5vf_pci_ops = {
.name = "mlx5-vfio-pci",
.open_device = mlx5vf_pci_open_device,
@@ -585,8 +590,6 @@ static const struct vfio_device_ops mlx5vf_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
- .migration_set_state = mlx5vf_pci_set_device_state,
- .migration_get_state = mlx5vf_pci_get_device_state,
};
static int mlx5vf_pci_probe(struct pci_dev *pdev,
@@ -599,7 +602,7 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
if (!mvdev)
return -ENOMEM;
vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
- mlx5vf_cmd_set_migratable(mvdev);
+ mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops);
dev_set_drvdata(&pdev->dev, &mvdev->core_device);
ret = vfio_pci_core_register_device(&mvdev->core_device);
if (ret)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 9343f597182d..442d3ba4122b 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -222,7 +222,7 @@ static int vfio_default_config_write(struct vfio_pci_core_device *vdev, int pos,
memcpy(vdev->vconfig + pos, &virt_val, count);
}
- /* Non-virtualzed and writable bits go to hardware */
+ /* Non-virtualized and writable bits go to hardware */
if (write & ~virt) {
struct pci_dev *pdev = vdev->pdev;
__le32 phys_val = 0;
@@ -1728,7 +1728,7 @@ int vfio_config_init(struct vfio_pci_core_device *vdev)
/*
* Config space, caps and ecaps are all dword aligned, so we could
* use one byte per dword to record the type. However, there are
- * no requiremenst on the length of a capability, so the gap between
+ * no requirements on the length of a capability, so the gap between
* capabilities needs byte granularity.
*/
map = kmalloc(pdev->cfg_size, GFP_KERNEL);
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 0d8d4cdfb2f0..c8d3b0450fb3 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1868,6 +1868,13 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
+ if (vdev->vdev.mig_ops) {
+ if (!(vdev->vdev.mig_ops->migration_get_state &&
+ vdev->vdev.mig_ops->migration_set_state) ||
+ !(vdev->vdev.migration_flags & VFIO_MIGRATION_STOP_COPY))
+ return -EINVAL;
+ }
+
/*
* Prevent binding to PFs with VFs enabled, the VFs might be in use
* by the host or other users. We cannot capture the VFs if they
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h
index 520d2a8e8375..691b43f4b2b2 100644
--- a/drivers/vfio/platform/vfio_platform_private.h
+++ b/drivers/vfio/platform/vfio_platform_private.h
@@ -78,21 +78,20 @@ struct vfio_platform_reset_node {
vfio_platform_reset_fn_t of_reset;
};
-extern int vfio_platform_probe_common(struct vfio_platform_device *vdev,
- struct device *dev);
+int vfio_platform_probe_common(struct vfio_platform_device *vdev,
+ struct device *dev);
void vfio_platform_remove_common(struct vfio_platform_device *vdev);
-extern int vfio_platform_irq_init(struct vfio_platform_device *vdev);
-extern void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
+int vfio_platform_irq_init(struct vfio_platform_device *vdev);
+void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
-extern int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
- uint32_t flags, unsigned index,
- unsigned start, unsigned count,
- void *data);
+int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
+ uint32_t flags, unsigned index,
+ unsigned start, unsigned count, void *data);
-extern void __vfio_platform_register_reset(struct vfio_platform_reset_node *n);
-extern void vfio_platform_unregister_reset(const char *compat,
- vfio_platform_reset_fn_t fn);
+void __vfio_platform_register_reset(struct vfio_platform_reset_node *n);
+void vfio_platform_unregister_reset(const char *compat,
+ vfio_platform_reset_fn_t fn);
#define vfio_platform_register_reset(__compat, __reset) \
static struct vfio_platform_reset_node __reset ## _node = { \
.owner = THIS_MODULE, \
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index a67130221151..503bea6c843d 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -50,16 +50,15 @@ struct vfio_iommu_driver_ops {
struct iommu_group *group);
int (*pin_pages)(void *iommu_data,
struct iommu_group *group,
- unsigned long *user_pfn,
+ dma_addr_t user_iova,
int npage, int prot,
- unsigned long *phys_pfn);
- int (*unpin_pages)(void *iommu_data,
- unsigned long *user_pfn, int npage);
- int (*register_notifier)(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb);
- int (*unregister_notifier)(void *iommu_data,
- struct notifier_block *nb);
+ struct page **pages);
+ void (*unpin_pages)(void *iommu_data,
+ dma_addr_t user_iova, int npage);
+ void (*register_device)(void *iommu_data,
+ struct vfio_device *vdev);
+ void (*unregister_device)(void *iommu_data,
+ struct vfio_device *vdev);
int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
void *data, size_t count, bool write);
struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 708a95e61831..169f07ac162d 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -378,8 +378,7 @@ static void tce_iommu_release(void *iommu_data)
kfree(container);
}
-static void tce_iommu_unuse_page(struct tce_container *container,
- unsigned long hpa)
+static void tce_iommu_unuse_page(unsigned long hpa)
{
struct page *page;
@@ -474,7 +473,7 @@ static int tce_iommu_clear(struct tce_container *container,
continue;
}
- tce_iommu_unuse_page(container, oldhpa);
+ tce_iommu_unuse_page(oldhpa);
}
iommu_tce_kill(tbl, firstentry, pages);
@@ -524,7 +523,7 @@ static long tce_iommu_build(struct tce_container *container,
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
&hpa, &dirtmp);
if (ret) {
- tce_iommu_unuse_page(container, hpa);
+ tce_iommu_unuse_page(hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);
@@ -532,7 +531,7 @@ static long tce_iommu_build(struct tce_container *container,
}
if (dirtmp != DMA_NONE)
- tce_iommu_unuse_page(container, hpa);
+ tce_iommu_unuse_page(hpa);
tce += IOMMU_PAGE_SIZE(tbl);
}
@@ -1266,7 +1265,10 @@ static int tce_iommu_attach_group(void *iommu_data,
goto unlock_exit;
}
- /* Check if new group has the same iommu_ops (i.e. compatible) */
+ /*
+ * Check if new group has the same iommu_table_group_ops
+ * (i.e. compatible)
+ */
list_for_each_entry(tcegrp, &container->group_list, next) {
struct iommu_table_group *table_group_tmp;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c13b9290e357..db516c90a977 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -67,7 +67,8 @@ struct vfio_iommu {
struct list_head iova_list;
struct mutex lock;
struct rb_root dma_list;
- struct blocking_notifier_head notifier;
+ struct list_head device_list;
+ struct mutex device_list_lock;
unsigned int dma_avail;
unsigned int vaddr_invalid_count;
uint64_t pgsize_bitmap;
@@ -828,9 +829,9 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
static int vfio_iommu_type1_pin_pages(void *iommu_data,
struct iommu_group *iommu_group,
- unsigned long *user_pfn,
+ dma_addr_t user_iova,
int npage, int prot,
- unsigned long *phys_pfn)
+ struct page **pages)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
@@ -840,7 +841,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
bool do_accounting;
dma_addr_t iova;
- if (!iommu || !user_pfn || !phys_pfn)
+ if (!iommu || !pages)
return -EINVAL;
/* Supported for v2 version only */
@@ -856,7 +857,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
again:
if (iommu->vaddr_invalid_count) {
for (i = 0; i < npage; i++) {
- iova = user_pfn[i] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * i;
ret = vfio_find_dma_valid(iommu, iova, PAGE_SIZE, &dma);
if (ret < 0)
goto pin_done;
@@ -865,8 +866,8 @@ again:
}
}
- /* Fail if notifier list is empty */
- if (!iommu->notifier.head) {
+ /* Fail if no dma_umap notifier is registered */
+ if (list_empty(&iommu->device_list)) {
ret = -EINVAL;
goto pin_done;
}
@@ -879,9 +880,10 @@ again:
do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
+ unsigned long phys_pfn;
struct vfio_pfn *vpfn;
- iova = user_pfn[i] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * i;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma) {
ret = -EINVAL;
@@ -895,23 +897,25 @@ again:
vpfn = vfio_iova_get_vfio_pfn(dma, iova);
if (vpfn) {
- phys_pfn[i] = vpfn->pfn;
+ pages[i] = pfn_to_page(vpfn->pfn);
continue;
}
remote_vaddr = dma->vaddr + (iova - dma->iova);
- ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
+ ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn,
do_accounting);
if (ret)
goto pin_unwind;
- ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
+ ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
if (ret) {
- if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
+ if (put_pfn(phys_pfn, dma->prot) && do_accounting)
vfio_lock_acct(dma, -1, true);
goto pin_unwind;
}
+ pages[i] = pfn_to_page(phys_pfn);
+
if (iommu->dirty_page_tracking) {
unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
@@ -934,43 +938,38 @@ again:
goto pin_done;
pin_unwind:
- phys_pfn[i] = 0;
+ pages[i] = NULL;
for (j = 0; j < i; j++) {
dma_addr_t iova;
- iova = user_pfn[j] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * j;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
vfio_unpin_page_external(dma, iova, do_accounting);
- phys_pfn[j] = 0;
+ pages[j] = NULL;
}
pin_done:
mutex_unlock(&iommu->lock);
return ret;
}
-static int vfio_iommu_type1_unpin_pages(void *iommu_data,
- unsigned long *user_pfn,
- int npage)
+static void vfio_iommu_type1_unpin_pages(void *iommu_data,
+ dma_addr_t user_iova, int npage)
{
struct vfio_iommu *iommu = iommu_data;
bool do_accounting;
int i;
- if (!iommu || !user_pfn || npage <= 0)
- return -EINVAL;
-
/* Supported for v2 version only */
- if (!iommu->v2)
- return -EACCES;
+ if (WARN_ON(!iommu->v2))
+ return;
mutex_lock(&iommu->lock);
do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
+ dma_addr_t iova = user_iova + PAGE_SIZE * i;
struct vfio_dma *dma;
- dma_addr_t iova;
- iova = user_pfn[i] << PAGE_SHIFT;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma)
break;
@@ -979,7 +978,8 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
}
mutex_unlock(&iommu->lock);
- return i > 0 ? i : -EINVAL;
+
+ WARN_ON(i != npage);
}
static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
@@ -1287,6 +1287,35 @@ static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size)
return 0;
}
+/*
+ * Notify VFIO drivers using vfio_register_emulated_iommu_dev() to invalidate
+ * and unmap iovas within the range we're about to unmap. Drivers MUST unpin
+ * pages in response to an invalidation.
+ */
+static void vfio_notify_dma_unmap(struct vfio_iommu *iommu,
+ struct vfio_dma *dma)
+{
+ struct vfio_device *device;
+
+ if (list_empty(&iommu->device_list))
+ return;
+
+ /*
+ * The device is expected to call vfio_unpin_pages() for any IOVA it has
+ * pinned within the range. Since vfio_unpin_pages() will eventually
+ * call back down to this code and try to obtain the iommu->lock we must
+ * drop it.
+ */
+ mutex_lock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
+
+ list_for_each_entry(device, &iommu->device_list, iommu_entry)
+ device->ops->dma_unmap(device, dma->iova, dma->size);
+
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_lock(&iommu->lock);
+}
+
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_unmap *unmap,
struct vfio_bitmap *bitmap)
@@ -1377,12 +1406,6 @@ again:
if (!iommu->v2 && iova > dma->iova)
break;
- /*
- * Task with same address space who mapped this iova range is
- * allowed to unmap the iova range.
- */
- if (dma->task->mm != current->mm)
- break;
if (invalidate_vaddr) {
if (dma->vaddr_invalid) {
@@ -1406,8 +1429,6 @@ again:
}
if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
- struct vfio_iommu_type1_dma_unmap nb_unmap;
-
if (dma_last == dma) {
BUG_ON(++retries > 10);
} else {
@@ -1415,20 +1436,7 @@ again:
retries = 0;
}
- nb_unmap.iova = dma->iova;
- nb_unmap.size = dma->size;
-
- /*
- * Notify anyone (mdev vendor drivers) to invalidate and
- * unmap iovas within the range we're about to unmap.
- * Vendor drivers MUST unpin pages in response to an
- * invalidation.
- */
- mutex_unlock(&iommu->lock);
- blocking_notifier_call_chain(&iommu->notifier,
- VFIO_IOMMU_NOTIFY_DMA_UNMAP,
- &nb_unmap);
- mutex_lock(&iommu->lock);
+ vfio_notify_dma_unmap(iommu, dma);
goto again;
}
@@ -1679,18 +1687,6 @@ out_unlock:
return ret;
}
-static int vfio_bus_type(struct device *dev, void *data)
-{
- struct bus_type **bus = data;
-
- if (*bus && *bus != dev->bus)
- return -EINVAL;
-
- *bus = dev->bus;
-
- return 0;
-}
-
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
@@ -2153,13 +2149,26 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
list_splice_tail(iova_copy, iova);
}
+/* Redundantly walks non-present capabilities to simplify caller */
+static int vfio_iommu_device_capable(struct device *dev, void *data)
+{
+ return device_iommu_capable(dev, (enum iommu_cap)data);
+}
+
+static int vfio_iommu_domain_alloc(struct device *dev, void *data)
+{
+ struct iommu_domain **domain = data;
+
+ *domain = iommu_domain_alloc(dev->bus);
+ return 1; /* Don't iterate */
+}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group, enum vfio_group_type type)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
struct vfio_domain *domain, *d;
- struct bus_type *bus = NULL;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry *geo;
@@ -2192,18 +2201,19 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_unlock;
}
- /* Determine bus_type in order to allocate a domain */
- ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
- if (ret)
- goto out_free_group;
-
ret = -ENOMEM;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
goto out_free_group;
+ /*
+ * Going via the iommu_group iterator avoids races, and trivially gives
+ * us a representative device for the IOMMU API call. We don't actually
+ * want to iterate beyond the first device (if any).
+ */
ret = -EIO;
- domain->domain = iommu_domain_alloc(bus);
+ iommu_group_for_each_dev(iommu_group, &domain->domain,
+ vfio_iommu_domain_alloc);
if (!domain->domain)
goto out_free_domain;
@@ -2258,7 +2268,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
list_add(&group->next, &domain->group_list);
msi_remap = irq_domain_check_msi_remap() ||
- iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
+ iommu_group_for_each_dev(iommu_group, (void *)IOMMU_CAP_INTR_REMAP,
+ vfio_iommu_device_capable);
if (!allow_unsafe_interrupts && !msi_remap) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
@@ -2478,7 +2489,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (list_empty(&iommu->emulated_iommu_groups) &&
list_empty(&iommu->domain_list)) {
- WARN_ON(iommu->notifier.head);
+ WARN_ON(!list_empty(&iommu->device_list));
vfio_iommu_unmap_unpin_all(iommu);
}
goto detach_group_done;
@@ -2510,7 +2521,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (list_empty(&domain->group_list)) {
if (list_is_singular(&iommu->domain_list)) {
if (list_empty(&iommu->emulated_iommu_groups)) {
- WARN_ON(iommu->notifier.head);
+ WARN_ON(!list_empty(
+ &iommu->device_list));
vfio_iommu_unmap_unpin_all(iommu);
} else {
vfio_iommu_unmap_unpin_reaccount(iommu);
@@ -2571,7 +2583,8 @@ static void *vfio_iommu_type1_open(unsigned long arg)
iommu->dma_avail = dma_entry_limit;
iommu->container_open = true;
mutex_init(&iommu->lock);
- BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
+ mutex_init(&iommu->device_list_lock);
+ INIT_LIST_HEAD(&iommu->device_list);
init_waitqueue_head(&iommu->vaddr_wait);
iommu->pgsize_bitmap = PAGE_MASK;
INIT_LIST_HEAD(&iommu->emulated_iommu_groups);
@@ -3008,28 +3021,40 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
}
}
-static int vfio_iommu_type1_register_notifier(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb)
+static void vfio_iommu_type1_register_device(void *iommu_data,
+ struct vfio_device *vdev)
{
struct vfio_iommu *iommu = iommu_data;
- /* clear known events */
- *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP;
-
- /* refuse to register if still events remaining */
- if (*events)
- return -EINVAL;
+ if (!vdev->ops->dma_unmap)
+ return;
- return blocking_notifier_chain_register(&iommu->notifier, nb);
+ /*
+ * list_empty(&iommu->device_list) is tested under the iommu->lock while
+ * iteration for dma_unmap must be done under the device_list_lock.
+ * Holding both locks here allows avoiding the device_list_lock in
+ * several fast paths. See vfio_notify_dma_unmap()
+ */
+ mutex_lock(&iommu->lock);
+ mutex_lock(&iommu->device_list_lock);
+ list_add(&vdev->iommu_entry, &iommu->device_list);
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
}
-static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
- struct notifier_block *nb)
+static void vfio_iommu_type1_unregister_device(void *iommu_data,
+ struct vfio_device *vdev)
{
struct vfio_iommu *iommu = iommu_data;
- return blocking_notifier_chain_unregister(&iommu->notifier, nb);
+ if (!vdev->ops->dma_unmap)
+ return;
+
+ mutex_lock(&iommu->lock);
+ mutex_lock(&iommu->device_list_lock);
+ list_del(&vdev->iommu_entry);
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
}
static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
@@ -3163,8 +3188,8 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.detach_group = vfio_iommu_type1_detach_group,
.pin_pages = vfio_iommu_type1_pin_pages,
.unpin_pages = vfio_iommu_type1_unpin_pages,
- .register_notifier = vfio_iommu_type1_register_notifier,
- .unregister_notifier = vfio_iommu_type1_unregister_notifier,
+ .register_device = vfio_iommu_type1_register_device,
+ .unregister_device = vfio_iommu_type1_unregister_device,
.dma_rw = vfio_iommu_type1_dma_rw,
.group_iommu_domain = vfio_iommu_type1_group_iommu_domain,
.notify = vfio_iommu_type1_notify,
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio_main.c
index 521a3eabf0e1..7cb56c382c97 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio_main.c
@@ -231,6 +231,9 @@ int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
{
struct vfio_iommu_driver *driver, *tmp;
+ if (WARN_ON(!ops->register_device != !ops->unregister_device))
+ return -EINVAL;
+
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
return -ENOMEM;
@@ -504,7 +507,9 @@ static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev,
if (IS_ERR(iommu_group))
return ERR_CAST(iommu_group);
- iommu_group_set_name(iommu_group, "vfio-noiommu");
+ ret = iommu_group_set_name(iommu_group, "vfio-noiommu");
+ if (ret)
+ goto out_put_group;
ret = iommu_group_add_device(iommu_group, dev);
if (ret)
goto out_put_group;
@@ -554,7 +559,7 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
* restore cache coherency. It has to be checked here because it is only
* valid for cases where we are using iommu groups.
*/
- if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
+ if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
iommu_group_put(iommu_group);
return ERR_PTR(-EINVAL);
}
@@ -1082,6 +1087,7 @@ static void vfio_device_unassign_container(struct vfio_device *device)
static struct file *vfio_device_open(struct vfio_device *device)
{
+ struct vfio_iommu_driver *iommu_driver;
struct file *filep;
int ret;
@@ -1112,6 +1118,12 @@ static struct file *vfio_device_open(struct vfio_device *device)
if (ret)
goto err_undo_count;
}
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->register_device)
+ iommu_driver->ops->register_device(
+ device->group->container->iommu_data, device);
+
up_read(&device->group->group_rwsem);
}
mutex_unlock(&device->dev_set->lock);
@@ -1146,13 +1158,19 @@ static struct file *vfio_device_open(struct vfio_device *device)
err_close_device:
mutex_lock(&device->dev_set->lock);
down_read(&device->group->group_rwsem);
- if (device->open_count == 1 && device->ops->close_device)
+ if (device->open_count == 1 && device->ops->close_device) {
device->ops->close_device(device);
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->unregister_device)
+ iommu_driver->ops->unregister_device(
+ device->group->container->iommu_data, device);
+ }
err_undo_count:
+ up_read(&device->group->group_rwsem);
device->open_count--;
if (device->open_count == 0 && device->kvm)
device->kvm = NULL;
- up_read(&device->group->group_rwsem);
mutex_unlock(&device->dev_set->lock);
module_put(device->dev->driver->owner);
err_unassign_container:
@@ -1342,12 +1360,18 @@ static const struct file_operations vfio_group_fops = {
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_device *device = filep->private_data;
+ struct vfio_iommu_driver *iommu_driver;
mutex_lock(&device->dev_set->lock);
vfio_assert_device_open(device);
down_read(&device->group->group_rwsem);
if (device->open_count == 1 && device->ops->close_device)
device->ops->close_device(device);
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->unregister_device)
+ iommu_driver->ops->unregister_device(
+ device->group->container->iommu_data, device);
up_read(&device->group->group_rwsem);
device->open_count--;
if (device->open_count == 0)
@@ -1544,8 +1568,7 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
struct file *filp = NULL;
int ret;
- if (!device->ops->migration_set_state ||
- !device->ops->migration_get_state)
+ if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz,
@@ -1561,7 +1584,8 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
if (flags & VFIO_DEVICE_FEATURE_GET) {
enum vfio_device_mig_state curr_state;
- ret = device->ops->migration_get_state(device, &curr_state);
+ ret = device->mig_ops->migration_get_state(device,
+ &curr_state);
if (ret)
return ret;
mig.device_state = curr_state;
@@ -1569,7 +1593,7 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
}
/* Handle the VFIO_DEVICE_FEATURE_SET */
- filp = device->ops->migration_set_state(device, mig.device_state);
+ filp = device->mig_ops->migration_set_state(device, mig.device_state);
if (IS_ERR(filp) || !filp)
goto out_copy;
@@ -1592,8 +1616,7 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
};
int ret;
- if (!device->ops->migration_set_state ||
- !device->ops->migration_get_state)
+ if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
@@ -1815,6 +1838,7 @@ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
if (!buf) {
kfree(caps->buf);
+ caps->buf = NULL;
caps->size = 0;
return ERR_PTR(-ENOMEM);
}
@@ -1913,26 +1937,25 @@ int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
/*
- * Pin a set of guest PFNs and return their associated host PFNs for local
+ * Pin contiguous user pages and return their associated host pages for local
* domain only.
* @device [in] : device
- * @user_pfn [in]: array of user/guest PFNs to be pinned.
- * @npage [in] : count of elements in user_pfn array. This count should not
- * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @iova [in] : starting IOVA of user pages to be pinned.
+ * @npage [in] : count of pages to be pinned. This count should not
+ * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
* @prot [in] : protection flags
- * @phys_pfn[out]: array of host PFNs
+ * @pages[out] : array of host pages
* Return error or number of pages pinned.
*/
-int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage, int prot, unsigned long *phys_pfn)
+int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
+ int npage, int prot, struct page **pages)
{
struct vfio_container *container;
struct vfio_group *group = device->group;
struct vfio_iommu_driver *driver;
int ret;
- if (!user_pfn || !phys_pfn || !npage ||
- !vfio_assert_device_open(device))
+ if (!pages || !npage || !vfio_assert_device_open(device))
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
@@ -1946,8 +1969,8 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
ret = driver->ops->pin_pages(container->iommu_data,
- group->iommu_group, user_pfn,
- npage, prot, phys_pfn);
+ group->iommu_group, iova,
+ npage, prot, pages);
else
ret = -ENOTTY;
@@ -1956,37 +1979,28 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
EXPORT_SYMBOL(vfio_pin_pages);
/*
- * Unpin set of host PFNs for local domain only.
+ * Unpin contiguous host pages for local domain only.
* @device [in] : device
- * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
- * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- * @npage [in] : count of elements in user_pfn array. This count should not
+ * @iova [in] : starting address of user pages to be unpinned.
+ * @npage [in] : count of pages to be unpinned. This count should not
* be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- * Return error or number of pages unpinned.
*/
-int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage)
+void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
{
struct vfio_container *container;
struct vfio_iommu_driver *driver;
- int ret;
- if (!user_pfn || !npage || !vfio_assert_device_open(device))
- return -EINVAL;
+ if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
+ return;
- if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
- return -E2BIG;
+ if (WARN_ON(!vfio_assert_device_open(device)))
+ return;
/* group->container cannot change while a vfio device is open */
container = device->group->container;
driver = container->iommu_driver;
- if (likely(driver && driver->ops->unpin_pages))
- ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
- npage);
- else
- ret = -ENOTTY;
- return ret;
+ driver->ops->unpin_pages(container->iommu_data, iova, npage);
}
EXPORT_SYMBOL(vfio_unpin_pages);
@@ -2001,13 +2015,13 @@ EXPORT_SYMBOL(vfio_unpin_pages);
* not a real device DMA, it is not necessary to pin the user space memory.
*
* @device [in] : VFIO device
- * @user_iova [in] : base IOVA of a user space buffer
+ * @iova [in] : base IOVA of a user space buffer
* @data [in] : pointer to kernel buffer
* @len [in] : kernel buffer length
* @write : indicate read or write
* Return error code on failure or 0 on success.
*/
-int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
size_t len, bool write)
{
struct vfio_container *container;
@@ -2023,97 +2037,13 @@ int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
if (likely(driver && driver->ops->dma_rw))
ret = driver->ops->dma_rw(container->iommu_data,
- user_iova, data, len, write);
+ iova, data, len, write);
else
ret = -ENOTTY;
return ret;
}
EXPORT_SYMBOL(vfio_dma_rw);
-static int vfio_register_iommu_notifier(struct vfio_group *group,
- unsigned long *events,
- struct notifier_block *nb)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret;
-
- lockdep_assert_held_read(&group->group_rwsem);
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->register_notifier))
- ret = driver->ops->register_notifier(container->iommu_data,
- events, nb);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-
-static int vfio_unregister_iommu_notifier(struct vfio_group *group,
- struct notifier_block *nb)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret;
-
- lockdep_assert_held_read(&group->group_rwsem);
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->unregister_notifier))
- ret = driver->ops->unregister_notifier(container->iommu_data,
- nb);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-
-int vfio_register_notifier(struct vfio_device *device,
- enum vfio_notify_type type, unsigned long *events,
- struct notifier_block *nb)
-{
- struct vfio_group *group = device->group;
- int ret;
-
- if (!nb || !events || (*events == 0) ||
- !vfio_assert_device_open(device))
- return -EINVAL;
-
- switch (type) {
- case VFIO_IOMMU_NOTIFY:
- ret = vfio_register_iommu_notifier(group, events, nb);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-EXPORT_SYMBOL(vfio_register_notifier);
-
-int vfio_unregister_notifier(struct vfio_device *device,
- enum vfio_notify_type type,
- struct notifier_block *nb)
-{
- struct vfio_group *group = device->group;
- int ret;
-
- if (!nb || !vfio_assert_device_open(device))
- return -EINVAL;
-
- switch (type) {
- case VFIO_IOMMU_NOTIFY:
- ret = vfio_unregister_iommu_notifier(group, nb);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-EXPORT_SYMBOL(vfio_unregister_notifier);
-
/*
* Module/class support
*/
@@ -2159,13 +2089,17 @@ static int __init vfio_init(void)
if (ret)
goto err_alloc_chrdev;
- pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
#ifdef CONFIG_VFIO_NOIOMMU
- vfio_register_iommu_driver(&vfio_noiommu_ops);
+ ret = vfio_register_iommu_driver(&vfio_noiommu_ops);
#endif
+ if (ret)
+ goto err_driver_register;
+
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return 0;
+err_driver_register:
+ unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ffd9e6c2ffc1..7ebf106d50c1 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -159,9 +159,13 @@ enum {
};
#define VHOST_SCSI_MAX_TARGET 256
-#define VHOST_SCSI_MAX_VQ 128
+#define VHOST_SCSI_MAX_IO_VQ 1024
#define VHOST_SCSI_MAX_EVENT 128
+static unsigned vhost_scsi_max_io_vqs = 128;
+module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
+MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
+
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq;
/*
@@ -186,7 +190,9 @@ struct vhost_scsi {
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
struct vhost_dev dev;
- struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
+ struct vhost_scsi_virtqueue *vqs;
+ unsigned long *compl_bitmap;
+ struct vhost_scsi_inflight **old_inflight;
struct vhost_work vs_completion_work; /* cmd completion work item */
struct llist_head vs_completion_list; /* cmd completion queue */
@@ -245,7 +251,7 @@ static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
struct vhost_virtqueue *vq;
int idx, i;
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
@@ -533,7 +539,6 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_completion_work);
- DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
struct virtio_scsi_cmd_resp v_rsp;
struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
@@ -541,7 +546,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
struct iov_iter iov_iter;
int ret, vq;
- bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
+ bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
llnode = llist_del_all(&vs->vs_completion_list);
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
@@ -566,7 +571,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
vq = q - vs->vqs;
- __set_bit(vq, signal);
+ __set_bit(vq, vs->compl_bitmap);
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
@@ -574,8 +579,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
}
vq = -1;
- while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
- < VHOST_SCSI_MAX_VQ)
+ while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
+ < vs->dev.nvqs)
vhost_signal(&vs->dev, &vs->vqs[vq].vq);
}
@@ -643,14 +648,12 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
size_t offset;
unsigned int npages = 0;
- bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
+ bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
VHOST_SCSI_PREALLOC_UPAGES, &offset);
/* No pages were pinned */
if (bytes <= 0)
return bytes < 0 ? bytes : -EFAULT;
- iov_iter_advance(iter, bytes);
-
while (bytes) {
unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
sg_set_page(sg++, pages[npages++], n, offset);
@@ -1421,26 +1424,25 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
/* Callers must hold dev mutex */
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
- struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
int i;
/* Init new inflight and remember the old inflight */
- vhost_scsi_init_inflight(vs, old_inflight);
+ vhost_scsi_init_inflight(vs, vs->old_inflight);
/*
* The inflight->kref was initialized to 1. We decrement it here to
* indicate the start of the flush operation so that it will reach 0
* when all the reqs are finished.
*/
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
+ for (i = 0; i < vs->dev.nvqs; i++)
+ kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- wait_for_completion(&old_inflight[i]->comp);
+ for (i = 0; i < vs->dev.nvqs; i++)
+ wait_for_completion(&vs->old_inflight[i]->comp);
}
static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
@@ -1603,7 +1605,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
- for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
if (!vhost_vq_is_setup(vq))
continue;
@@ -1613,7 +1615,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
goto destroy_vq_cmds;
}
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, vs_tpg);
@@ -1715,7 +1717,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
target_undepend_item(&se_tpg->tpg_group.cg_item);
}
if (match) {
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
@@ -1724,7 +1726,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
/* Make sure cmds are not running before tearing them down. */
vhost_scsi_flush(vs);
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
vhost_scsi_destroy_vq_cmds(vq);
}
@@ -1764,7 +1766,7 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
return -EFAULT;
}
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->acked_features = features;
@@ -1778,16 +1780,40 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *vs;
struct vhost_virtqueue **vqs;
- int r = -ENOMEM, i;
+ int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
goto err_vs;
- vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
- if (!vqs)
+ if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
+ pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
+ VHOST_SCSI_MAX_IO_VQ);
+ nvqs = VHOST_SCSI_MAX_IO_VQ;
+ } else if (nvqs == 0) {
+ pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
+ nvqs = 1;
+ }
+ nvqs += VHOST_SCSI_VQ_IO;
+
+ vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
+ if (!vs->compl_bitmap)
+ goto err_compl_bitmap;
+
+ vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->old_inflight)
+ goto err_inflight;
+
+ vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->vqs)
goto err_vqs;
+ vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_local_vqs;
+
vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
@@ -1798,11 +1824,11 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
- for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
+ vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
VHOST_SCSI_WEIGHT, 0, true, NULL);
vhost_scsi_init_inflight(vs, NULL);
@@ -1810,7 +1836,13 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
f->private_data = vs;
return 0;
+err_local_vqs:
+ kfree(vs->vqs);
err_vqs:
+ kfree(vs->old_inflight);
+err_inflight:
+ bitmap_free(vs->compl_bitmap);
+err_compl_bitmap:
kvfree(vs);
err_vs:
return r;
@@ -1828,6 +1860,9 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
vhost_dev_stop(&vs->dev);
vhost_dev_cleanup(&vs->dev);
kfree(vs->dev.vqs);
+ kfree(vs->vqs);
+ kfree(vs->old_inflight);
+ bitmap_free(vs->compl_bitmap);
kvfree(vs);
return 0;
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 23dcbfdfa13b..166044642fd5 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -347,6 +347,14 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
return 0;
}
+static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->suspend;
+}
+
static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -470,6 +478,22 @@ static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
return 0;
}
+/* After a successful return of ioctl the device must not process more
+ * virtqueue descriptors. The device can answer to read or writes of config
+ * fields as if it were not suspended. In particular, writing to "queue_enable"
+ * with a value of 1 will not make the device start processing buffers.
+ */
+static long vhost_vdpa_suspend(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!ops->suspend)
+ return -EOPNOTSUPP;
+
+ return ops->suspend(vdpa);
+}
+
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
@@ -577,7 +601,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
if (cmd == VHOST_SET_BACKEND_FEATURES) {
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
- if (features & ~VHOST_VDPA_BACKEND_FEATURES)
+ if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
+ BIT_ULL(VHOST_BACKEND_F_SUSPEND)))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
+ !vhost_vdpa_can_suspend(v))
return -EOPNOTSUPP;
vhost_set_backend_features(&v->vdev, features);
return 0;
@@ -628,6 +656,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
break;
case VHOST_GET_BACKEND_FEATURES:
features = VHOST_VDPA_BACKEND_FEATURES;
+ if (vhost_vdpa_can_suspend(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
break;
@@ -640,6 +670,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
case VHOST_VDPA_GET_VQS_COUNT:
r = vhost_vdpa_get_vqs_count(v, argp);
break;
+ case VHOST_VDPA_SUSPEND:
+ r = vhost_vdpa_suspend(v);
+ break;
default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
if (r == -ENOIOCTLCMD)
@@ -1076,7 +1109,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
if (!bus)
return -EFAULT;
- if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
+ if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY))
return -ENOTSUPP;
v->domain = iommu_domain_alloc(bus);
@@ -1363,6 +1396,7 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
err:
put_device(&v->dev);
+ ida_simple_remove(&vhost_vdpa_ida, v->minor);
return r;
}
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index eab55accf381..11f59dd06a74 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -1095,7 +1095,8 @@ EXPORT_SYMBOL(vringh_need_notify_kern);
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
static int iotlb_translate(const struct vringh *vrh,
- u64 addr, u64 len, struct bio_vec iov[],
+ u64 addr, u64 len, u64 *translated,
+ struct bio_vec iov[],
int iov_size, u32 perm)
{
struct vhost_iotlb_map *map;
@@ -1136,43 +1137,76 @@ static int iotlb_translate(const struct vringh *vrh,
spin_unlock(vrh->iotlb_lock);
+ if (translated)
+ *translated = min(len, s);
+
return ret;
}
static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
- struct iov_iter iter;
- struct bio_vec iov[16];
- int ret;
+ u64 total_translated = 0;
- ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
- len, iov, 16, VHOST_MAP_RO);
- if (ret < 0)
- return ret;
+ while (total_translated < len) {
+ struct bio_vec iov[16];
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
- iov_iter_bvec(&iter, READ, iov, ret, len);
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
+ len - total_translated, &translated,
+ iov, ARRAY_SIZE(iov), VHOST_MAP_RO);
+ if (ret == -ENOBUFS)
+ ret = ARRAY_SIZE(iov);
+ else if (ret < 0)
+ return ret;
- ret = copy_from_iter(dst, len, &iter);
+ iov_iter_bvec(&iter, READ, iov, ret, translated);
- return ret;
+ ret = copy_from_iter(dst, translated, &iter);
+ if (ret < 0)
+ return ret;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
+
+ return total_translated;
}
static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
- struct iov_iter iter;
- struct bio_vec iov[16];
- int ret;
+ u64 total_translated = 0;
- ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
- len, iov, 16, VHOST_MAP_WO);
- if (ret < 0)
- return ret;
+ while (total_translated < len) {
+ struct bio_vec iov[16];
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
+ len - total_translated, &translated,
+ iov, ARRAY_SIZE(iov), VHOST_MAP_WO);
+ if (ret == -ENOBUFS)
+ ret = ARRAY_SIZE(iov);
+ else if (ret < 0)
+ return ret;
- iov_iter_bvec(&iter, WRITE, iov, ret, len);
+ iov_iter_bvec(&iter, WRITE, iov, ret, translated);
+
+ ret = copy_to_iter(src, translated, &iter);
+ if (ret < 0)
+ return ret;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
- return copy_to_iter(src, len, &iter);
+ return total_translated;
}
static inline int getu16_iotlb(const struct vringh *vrh,
@@ -1183,7 +1217,7 @@ static inline int getu16_iotlb(const struct vringh *vrh,
int ret;
/* Atomic read is needed for getu16 */
- ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
&iov, 1, VHOST_MAP_RO);
if (ret < 0)
return ret;
@@ -1204,7 +1238,7 @@ static inline int putu16_iotlb(const struct vringh *vrh,
int ret;
/* Atomic write is needed for putu16 */
- ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
&iov, 1, VHOST_MAP_WO);
if (ret < 0)
return ret;
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 2b9e2bbbb03e..fc02c5c16055 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -218,9 +218,8 @@ err:
static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
{
- unsigned int period = lp->pdata->period_ns;
- unsigned int duty = br * period / max_br;
struct pwm_device *pwm;
+ struct pwm_state state;
/* request pwm device with the consumer name */
if (!lp->pwm) {
@@ -230,18 +229,16 @@ static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
lp->pwm = pwm;
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pwm);
+ pwm_init_state(lp->pwm, &state);
+ } else {
+ pwm_get_state(lp->pwm, &state);
}
- pwm_config(lp->pwm, duty, period);
- if (duty)
- pwm_enable(lp->pwm);
- else
- pwm_disable(lp->pwm);
+ state.period = lp->pdata->period_ns;
+ state.duty_cycle = div_u64(br * state.period, max_br);
+ state.enabled = state.duty_cycle;
+
+ pwm_apply_state(lp->pwm, &state);
}
static int lp855x_bl_update_status(struct backlight_device *bl)
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index b2bfbf070200..dc37494baf42 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -12,7 +12,6 @@
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/lcd.h>
-#include <linux/of.h>
#include <linux/slab.h>
#include <video/platform_lcd.h>
@@ -133,19 +132,10 @@ static int platform_lcd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(platform_lcd_pm_ops, platform_lcd_suspend,
platform_lcd_resume);
-#ifdef CONFIG_OF
-static const struct of_device_id platform_lcd_of_match[] = {
- { .compatible = "platform-lcd" },
- {},
-};
-MODULE_DEVICE_TABLE(of, platform_lcd_of_match);
-#endif
-
static struct platform_driver platform_lcd_driver = {
.driver = {
.name = "platform-lcd",
.pm = &platform_lcd_pm_ops,
- .of_match_table = of_match_ptr(platform_lcd_of_match),
},
.probe = platform_lcd_probe,
};
diff --git a/drivers/video/backlight/rt4831-backlight.c b/drivers/video/backlight/rt4831-backlight.c
index 42155c7d2db1..eb8c59e8713f 100644
--- a/drivers/video/backlight/rt4831-backlight.c
+++ b/drivers/video/backlight/rt4831-backlight.c
@@ -12,6 +12,7 @@
#define RT4831_REG_BLCFG 0x02
#define RT4831_REG_BLDIML 0x04
#define RT4831_REG_ENABLE 0x08
+#define RT4831_REG_BLOPT2 0x11
#define RT4831_BLMAX_BRIGHTNESS 2048
@@ -23,6 +24,11 @@
#define RT4831_BLDIML_MASK GENMASK(2, 0)
#define RT4831_BLDIMH_MASK GENMASK(10, 3)
#define RT4831_BLDIMH_SHIFT 3
+#define RT4831_BLOCP_MASK GENMASK(1, 0)
+
+#define RT4831_BLOCP_MINUA 900000
+#define RT4831_BLOCP_MAXUA 1800000
+#define RT4831_BLOCP_STEPUA 300000
struct rt4831_priv {
struct device *dev;
@@ -85,7 +91,7 @@ static int rt4831_parse_backlight_properties(struct rt4831_priv *priv,
{
struct device *dev = priv->dev;
u8 propval;
- u32 brightness;
+ u32 brightness, ocp_uA;
unsigned int val = 0;
int ret;
@@ -120,6 +126,31 @@ static int rt4831_parse_backlight_properties(struct rt4831_priv *priv,
if (ret)
return ret;
+ /*
+ * This OCP level is used to protect and limit the inductor current.
+ * If inductor peak current reach the level, low-side MOSFET will be
+ * turned off. Meanwhile, the output channel current may be limited.
+ * To match the configured channel current, the inductor chosen must
+ * be higher than the OCP level.
+ *
+ * Not like the OVP level, the default 21V can be used in the most
+ * application. But if the chosen OCP level is smaller than needed,
+ * it will also affect the backlight channel output current to be
+ * smaller than the register setting.
+ */
+ ret = device_property_read_u32(dev, "richtek,bled-ocp-microamp",
+ &ocp_uA);
+ if (!ret) {
+ ocp_uA = clamp_val(ocp_uA, RT4831_BLOCP_MINUA,
+ RT4831_BLOCP_MAXUA);
+ val = DIV_ROUND_UP(ocp_uA - RT4831_BLOCP_MINUA,
+ RT4831_BLOCP_STEPUA);
+ ret = regmap_update_bits(priv->regmap, RT4831_REG_BLOPT2,
+ RT4831_BLOCP_MASK, val);
+ if (ret)
+ return ret;
+ }
+
ret = device_property_read_u8(dev, "richtek,channel-use", &propval);
if (ret) {
dev_err(dev, "richtek,channel-use DT property missing\n");
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index bd4dc97d4d34..db568f67e4dc 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -290,7 +290,7 @@ static char default_sti_path[21] __read_mostly;
static int __init sti_setup(char *str)
{
if (str)
- strlcpy (default_sti_path, str, sizeof (default_sti_path));
+ strscpy(default_sti_path, str, sizeof(default_sti_path));
return 1;
}
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 576612f18d59..fcdf017e2665 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -75,7 +75,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
static void vgacon_save_screen(struct vc_data *c);
static void vgacon_invert_region(struct vc_data *c, u16 * p, int count);
-static struct uni_pagedir *vgacon_uni_pagedir;
+static struct uni_pagedict *vgacon_uni_pagedir;
static int vgacon_refcount;
/* Description of the hardware situation */
@@ -342,7 +342,7 @@ static const char *vgacon_startup(void)
static void vgacon_init(struct vc_data *c, int init)
{
- struct uni_pagedir *p;
+ struct uni_pagedict *p;
/*
* We cannot be loaded as a module, therefore init will be 1
@@ -367,10 +367,10 @@ static void vgacon_init(struct vc_data *c, int init)
c->vc_complement_mask = 0x7700;
if (vga_512_chars)
c->vc_hi_font_mask = 0x0800;
- p = *c->vc_uni_pagedir_loc;
- if (c->vc_uni_pagedir_loc != &vgacon_uni_pagedir) {
+ p = *c->uni_pagedict_loc;
+ if (c->uni_pagedict_loc != &vgacon_uni_pagedir) {
con_free_unimap(c);
- c->vc_uni_pagedir_loc = &vgacon_uni_pagedir;
+ c->uni_pagedict_loc = &vgacon_uni_pagedir;
vgacon_refcount++;
}
if (!vgacon_uni_pagedir && p)
@@ -392,7 +392,7 @@ static void vgacon_deinit(struct vc_data *c)
if (!--vgacon_refcount)
con_free_unimap(c);
- c->vc_uni_pagedir_loc = &c->vc_uni_pagedir;
+ c->uni_pagedict_loc = &c->uni_pagedict;
con_set_default_unimap(c);
}
diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c
index 9811f1bad8d4..7db03ed77c76 100644
--- a/drivers/video/fbdev/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
@@ -84,9 +84,6 @@ static const struct fb_fix_screeninfo mc68x328fb_fix __initconst = {
/*
* Interface used by the world
*/
-int mc68x328fb_init(void);
-int mc68x328fb_setup(char *);
-
static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
static int mc68x328fb_set_par(struct fb_info *info);
@@ -403,7 +400,7 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
#endif
}
-int __init mc68x328fb_setup(char *options)
+static int __init mc68x328fb_setup(char *options)
{
if (!options || !*options)
return 1;
@@ -414,7 +411,7 @@ int __init mc68x328fb_setup(char *options)
* Initialisation
*/
-int __init mc68x328fb_init(void)
+static int __init mc68x328fb_init(void)
{
#ifndef MODULE
char *option = NULL;
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 8080116aea84..f65c96d1394d 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -698,16 +698,18 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
return -ENODEV;
panel = of_graph_get_remote_port_parent(endpoint);
- if (!panel)
- return -ENODEV;
+ if (!panel) {
+ err = -ENODEV;
+ goto out_endpoint_put;
+ }
err = clcdfb_of_get_backlight(&fb->dev->dev, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = clcdfb_of_get_mode(&fb->dev->dev, panel, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth",
&max_bandwidth);
@@ -736,11 +738,21 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
if (of_property_read_u32_array(endpoint,
"arm,pl11x,tft-r0g0b0-pads",
- tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0)
- return -ENOENT;
+ tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0) {
+ err = -ENOENT;
+ goto out_panel_put;
+ }
+
+ of_node_put(panel);
+ of_node_put(endpoint);
return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
tft_r0b0g0[1], tft_r0b0g0[2]);
+out_panel_put:
+ of_node_put(panel);
+out_endpoint_put:
+ of_node_put(endpoint);
+ return err;
}
static int clcdfb_of_vram_setup(struct clcd_fb *fb)
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index 6e07a97bbd31..d88265dbebf4 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -2540,27 +2540,16 @@ static int amifb_blank(int blank, struct fb_info *info)
static int amifb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0 ||
- var->yoffset >= info->var.yres_virtual || var->xoffset)
- return -EINVAL;
- } else {
+ if (!(var->vmode & FB_VMODE_YWRAP)) {
/*
* TODO: There will be problems when xpan!=1, so some columns
* on the right side will never be seen
*/
if (var->xoffset + info->var.xres >
- upx(16 << maxfmode, info->var.xres_virtual) ||
- var->yoffset + info->var.yres > info->var.yres_virtual)
+ upx(16 << maxfmode, info->var.xres_virtual))
return -EINVAL;
}
ami_pan_var(var, info);
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index eb3e47c58c5f..a2a381631628 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -781,7 +781,12 @@ static int arkfb_set_par(struct fb_info *info)
return -EINVAL;
}
- ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
+ value = (hdiv * info->var.pixclock) / hmul;
+ if (!value) {
+ fb_dbg(info, "invalid pixclock\n");
+ value = 1;
+ }
+ ark_set_pixclock(info, value);
svga_set_timings(par->state.vgabase, &ark_timing_regs, &(info->var), hmul, hdiv,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
(info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
@@ -792,6 +797,8 @@ static int arkfb_set_par(struct fb_info *info)
value = ((value * hmul / hdiv) / 8) - 5;
vga_wcrt(par->state.vgabase, 0x42, (value + 1) / 2);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index 52a35b661643..2bc4089865e6 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -236,8 +236,6 @@ static int *MV300_reg = MV300_reg_8bit;
#endif /* ATAFB_EXT */
-static int inverse;
-
/*
* struct fb_ops {
* * open/release and usage marking
@@ -467,27 +465,27 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 320x200, 15 kHz, 60 Hz (ST low) */
"st-low", 60, 320, 200, 32000, 32, 16, 31, 14, 96, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x200, 15 kHz, 60 Hz (ST medium) */
"st-mid", 60, 640, 200, 32000, 32, 16, 31, 14, 96, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x400, 30.25 kHz, 63.5 Hz (ST high) */
"st-high", 63, 640, 400, 32000, 128, 0, 40, 14, 128, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 320x480, 15 kHz, 60 Hz (TT low) */
"tt-low", 60, 320, 480, 31041, 120, 100, 8, 16, 140, 30,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x480, 29 kHz, 57 Hz (TT medium) */
"tt-mid", 60, 640, 480, 31041, 120, 100, 8, 16, 140, 30,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 1280x960, 72 kHz, 72 Hz (TT high) */
- "tt-high", 57, 1280, 960, 7760, 260, 60, 36, 4, 192, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "tt-high", 72, 1280, 960, 7760, 260, 60, 36, 4, 192, 4,
+ 0, FB_VMODE_NONINTERLACED
},
/*
@@ -496,12 +494,12 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 640x480, 31 kHz, 60 Hz (VGA) */
- "vga", 63.5, 640, 480, 32000, 18, 42, 31, 11, 96, 3,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "vga", 60, 640, 480, 39721, 42, 18, 31, 11, 100, 3,
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x400, 31 kHz, 70 Hz (VGA) */
- "vga70", 70, 640, 400, 32000, 18, 42, 31, 11, 96, 3,
- FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "vga70", 70, 640, 400, 39721, 42, 18, 31, 11, 100, 3,
+ FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED
},
/*
@@ -511,7 +509,7 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 896x608, 31 kHz, 60 Hz (Falcon High) */
"falh", 60, 896, 608, 32000, 18, 42, 31, 1, 96,3,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
},
};
@@ -1010,10 +1008,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
else if (yres_virtual < yres)
yres_virtual = yres;
- /* backward bug-compatibility */
- if (var->pixclock > 1)
- var->pixclock -= 1;
-
par->hw.falcon.line_width = bpp * xres / 16;
par->hw.falcon.line_offset = bpp * (xres_virtual - xres) / 16;
@@ -1072,8 +1066,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
xstretch = 2; /* Double pixel width only for hicolor */
/* Default values are used for vert./hor. timing if no pixelclock given. */
if (var->pixclock == 0) {
- int linesize;
-
/* Choose master pixelclock depending on hor. timing */
plen = 1 * xstretch;
if ((plen * xres + f25.right + f25.hsync + f25.left) *
@@ -1092,7 +1084,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
left_margin = pclock->left / plen;
right_margin = pclock->right / plen;
hsync_len = pclock->hsync / plen;
- linesize = left_margin + xres + right_margin + hsync_len;
upper_margin = 31;
lower_margin = 11;
vsync_len = 3;
@@ -1641,7 +1632,7 @@ static irqreturn_t falcon_vbl_switcher(int irq, void *dummy)
static int falcon_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int xoffset;
int bpp = info->var.bits_per_pixel;
@@ -2208,6 +2199,10 @@ static int ext_setcolreg(unsigned int regno, unsigned int red,
if (regno > 255)
return 1;
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+
switch (external_card_type) {
case IS_VGA:
OUTB(0x3c8, regno);
@@ -2261,7 +2256,7 @@ static void set_screen_base(void *s_base)
static int pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
if (!fbhw->set_screen_base ||
(!ATARIHW_PRESENT(EXTD_SHIFTER) && var->xoffset))
@@ -2407,55 +2402,19 @@ static void atafb_set_disp(struct fb_info *info)
static int
atafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
- int xoffset = var->xoffset;
- int yoffset = var->yoffset;
- int err;
-
- if (var->vmode & FB_VMODE_YWRAP) {
- if (yoffset < 0 || yoffset >= info->var.yres_virtual || xoffset)
- return -EINVAL;
- } else {
- if (xoffset + info->var.xres > info->var.xres_virtual ||
- yoffset + info->var.yres > info->var.yres_virtual)
- return -EINVAL;
- }
-
- if (fbhw->pan_display) {
- err = fbhw->pan_display(var, info);
- if (err)
- return err;
- } else
+ if (!fbhw->pan_display)
return -EINVAL;
- info->var.xoffset = xoffset;
- info->var.yoffset = yoffset;
-
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
-
- return 0;
+ return fbhw->pan_display(var, info);
}
/*
* generic drawing routines; imageblit needs updating for image depth > 1
*/
-#if BITS_PER_LONG == 32
-#define BYTES_PER_LONG 4
-#define SHIFT_PER_LONG 5
-#elif BITS_PER_LONG == 64
-#define BYTES_PER_LONG 8
-#define SHIFT_PER_LONG 6
-#else
-#define Please update me
-#endif
-
-
static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
u32 width, height;
@@ -2498,7 +2457,7 @@ static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
u32 dx, dy, sx, sy, width, height;
int rev_copy = 0;
@@ -2552,10 +2511,8 @@ static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
static void atafb_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
- unsigned long *dst;
- int dst_idx;
const char *src;
u32 dx, dy, width, height, pitch;
@@ -2582,10 +2539,6 @@ static void atafb_imageblit(struct fb_info *info, const struct fb_image *image)
if (image->depth == 1) {
// used for font data
- dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
- dst_idx += dy * par->next_line * 8 + dx;
src = image->data;
pitch = (image->width + 7) / 8;
while (height--) {
@@ -2622,14 +2575,14 @@ atafb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
switch (cmd) {
#ifdef FBCMD_GET_CURRENTPAR
case FBCMD_GET_CURRENTPAR:
- if (copy_to_user((void *)arg, (void *)&current_par,
+ if (copy_to_user((void *)arg, &current_par,
sizeof(struct atafb_par)))
return -EFAULT;
return 0;
#endif
#ifdef FBCMD_SET_CURRENTPAR
case FBCMD_SET_CURRENTPAR:
- if (copy_from_user((void *)&current_par, (void *)arg,
+ if (copy_from_user(&current_par, (void *)arg,
sizeof(struct atafb_par)))
return -EFAULT;
ata_set_par(&current_par);
@@ -2695,7 +2648,7 @@ static int atafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
* hw par just decoded */
static int atafb_set_par(struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
/* Decode wanted screen parameters */
fbhw->decode_var(&info->var, par);
@@ -2981,7 +2934,7 @@ static void __init atafb_setup_user(char *spec)
}
}
-int __init atafb_setup(char *options)
+static int __init atafb_setup(char *options)
{
char *this_opt;
int temp;
@@ -2996,7 +2949,7 @@ int __init atafb_setup(char *options)
default_par = temp;
mode_option = this_opt;
} else if (!strcmp(this_opt, "inverse"))
- inverse = 1;
+ fb_invert_cmaps();
else if (!strncmp(this_opt, "hwscroll_", 9)) {
hwscroll = simple_strtoul(this_opt + 9, NULL, 10);
if (hwscroll < 0)
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index a3e6faed7745..14eb718bd67c 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3891,7 +3891,7 @@ static int __init atyfb_setup(char *options)
&& (!strncmp(this_opt, "Mach64:", 7))) {
static unsigned char m64_num;
static char mach64_str[80];
- strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str));
+ strscpy(mach64_str, this_opt + 7, sizeof(mach64_str));
if (!store_video_par(mach64_str, m64_num)) {
m64_num++;
mach64_count = m64_num;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 6851f47613e1..a14a8d73035c 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -1980,7 +1980,7 @@ static int radeon_set_fbinfo(struct radeonfb_info *rinfo)
info->screen_base = rinfo->fb_base;
info->screen_size = rinfo->mapped_vram;
/* Fill fix common fields */
- strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
+ strscpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
info->fix.smem_start = rinfo->fb_base_phys;
info->fix.smem_len = rinfo->video_ram;
info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -2094,34 +2094,34 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
u32 tmp;
/* framebuffer size */
- if ((rinfo->family == CHIP_FAMILY_RS100) ||
+ if ((rinfo->family == CHIP_FAMILY_RS100) ||
(rinfo->family == CHIP_FAMILY_RS200) ||
(rinfo->family == CHIP_FAMILY_RS300) ||
(rinfo->family == CHIP_FAMILY_RC410) ||
(rinfo->family == CHIP_FAMILY_RS400) ||
(rinfo->family == CHIP_FAMILY_RS480) ) {
- u32 tom = INREG(NB_TOM);
- tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
-
- radeon_fifo_wait(6);
- OUTREG(MC_FB_LOCATION, tom);
- OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
- OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
- OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
-
- /* This is supposed to fix the crtc2 noise problem. */
- OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
-
- if ((rinfo->family == CHIP_FAMILY_RS100) ||
- (rinfo->family == CHIP_FAMILY_RS200)) {
- /* This is to workaround the asic bug for RMX, some versions
- of BIOS doesn't have this register initialized correctly.
- */
- OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
- ~CRTC_H_CUTOFF_ACTIVE_EN);
- }
- } else {
- tmp = INREG(CNFG_MEMSIZE);
+ u32 tom = INREG(NB_TOM);
+
+ tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
+ radeon_fifo_wait(6);
+ OUTREG(MC_FB_LOCATION, tom);
+ OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+ OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+ OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
+
+ /* This is supposed to fix the crtc2 noise problem. */
+ OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
+
+ if ((rinfo->family == CHIP_FAMILY_RS100) ||
+ (rinfo->family == CHIP_FAMILY_RS200)) {
+ /* This is to workaround the asic bug for RMX, some versions
+ * of BIOS doesn't have this register initialized correctly.
+ */
+ OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
+ ~CRTC_H_CUTOFF_ACTIVE_EN);
+ }
+ } else {
+ tmp = INREG(CNFG_MEMSIZE);
}
/* mem size is bits [28:0], mask off the rest */
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index e7702fe1fe7d..6403ae07970d 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -182,7 +182,7 @@ static int bw2_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
static void bw2_init_fix(struct fb_info *info, int linebytes)
{
- strlcpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
+ strscpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_MONO01;
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 393894af26f8..2b00a9d554fc 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
err_release_fb:
framebuffer_release(p);
err_disable:
+ pci_disable_device(dp);
err_out:
return rc;
}
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 51e072c03e1c..2a9fa06881b5 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -1999,7 +1999,7 @@ static int cirrusfb_set_fbinfo(struct fb_info *info)
}
/* Fill fix common fields */
- strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
+ strscpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
sizeof(info->fix.id));
/* monochrome: only 1 memory plane */
@@ -2301,7 +2301,7 @@ err_release_fb:
return error;
}
-void cirrusfb_zorro_unregister(struct zorro_dev *z)
+static void cirrusfb_zorro_unregister(struct zorro_dev *z)
{
struct fb_info *info = zorro_get_drvdata(z);
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index 771ce1f76951..a1061c2f1640 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -326,7 +326,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
info->var.vmode = FB_VMODE_NONINTERLACED;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.accel = FB_ACCEL_NONE;
- strlcpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
+ strscpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
fb_videomode_to_var(&info->var, &cfb->mode);
ret = fb_alloc_cmap(&info->cmap, BIT(CLPS711X_FB_BPP_MAX), 0);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index f114242b5a70..098b62f7b701 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -412,7 +412,7 @@ static int __init fb_console_setup(char *this_opt)
while ((options = strsep(&this_opt, ",")) != NULL) {
if (!strncmp(options, "font:", 5)) {
- strlcpy(fontname, options + 5, sizeof(fontname));
+ strscpy(fontname, options + 5, sizeof(fontname));
continue;
}
@@ -1060,9 +1060,9 @@ static void fbcon_init(struct vc_data *vc, int init)
vc->vc_complement_mask <<= 1;
}
- if (!*svc->vc_uni_pagedir_loc)
+ if (!*svc->uni_pagedict_loc)
con_set_default_unimap(svc);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
ops = info->fbcon_par;
@@ -1384,9 +1384,9 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
vc->vc_complement_mask <<= 1;
}
- if (!*svc->vc_uni_pagedir_loc)
+ if (!*svc->uni_pagedict_loc)
con_set_default_unimap(svc);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
@@ -2401,15 +2401,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
- int resize;
+ int resize, ret, old_userfont, old_width, old_height, old_charcount;
char *old_data = NULL;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
if (p->userfont)
old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
+ old_userfont = p->userfont;
if ((p->userfont = userfont))
REFCOUNT(data)++;
+
+ old_width = vc->vc_font.width;
+ old_height = vc->vc_font.height;
+ old_charcount = vc->vc_font.charcount;
+
vc->vc_font.width = w;
vc->vc_font.height = h;
vc->vc_font.charcount = charcount;
@@ -2425,7 +2431,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
cols /= w;
rows /= h;
- vc_resize(vc, cols, rows);
+ ret = vc_resize(vc, cols, rows);
+ if (ret)
+ goto err_out;
} else if (con_is_visible(vc)
&& vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
@@ -2435,6 +2443,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
if (old_data && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
+
+err_out:
+ p->fontdata = old_data;
+ vc->vc_font.data = (void *)old_data;
+
+ if (userfont) {
+ p->userfont = old_userfont;
+ REFCOUNT(data)--;
+ }
+
+ vc->vc_font.width = old_width;
+ vc->vc_font.height = old_height;
+ vc->vc_font.charcount = old_charcount;
+
+ return ret;
}
/*
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index c2a60b187467..4d7f63892dcc 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -84,6 +84,10 @@ void framebuffer_release(struct fb_info *info)
if (WARN_ON(refcount_read(&info->count)))
return;
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+ mutex_destroy(&info->bl_curve_mutex);
+#endif
+
kfree(info->apertures);
kfree(info);
}
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index d45355b9a58c..8f041f9b14c7 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -1134,7 +1134,7 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx)
info->fb_size = int_cfb_info->fb.fix.smem_len;
info->info = int_cfb_info;
- strlcpy(info->dev_name, int_cfb_info->fb.fix.id,
+ strscpy(info->dev_name, int_cfb_info->fb.fix.id,
sizeof(info->dev_name));
}
@@ -1229,7 +1229,7 @@ static int cyber2000fb_ddc_getsda(void *data)
static int cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
{
- strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
+ strscpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
sizeof(cfb->ddc_adapter.name));
cfb->ddc_adapter.owner = THIS_MODULE;
cfb->ddc_adapter.class = I2C_CLASS_DDC;
@@ -1304,7 +1304,7 @@ static int cyber2000fb_i2c_getscl(void *data)
static int cyber2000fb_i2c_register(struct cfb_info *cfb)
{
- strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
+ strscpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
sizeof(cfb->i2c_adapter.name));
cfb->i2c_adapter.owner = THIS_MODULE;
cfb->i2c_adapter.algo_data = &cfb->i2c_algo;
@@ -1500,7 +1500,7 @@ static int cyber2000fb_setup(char *options)
if (strncmp(opt, "font:", 5) == 0) {
static char default_font_storage[40];
- strlcpy(default_font_storage, opt + 5,
+ strscpy(default_font_storage, opt + 5,
sizeof(default_font_storage));
default_font = default_font_storage;
continue;
diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c
index 3688f9165848..18405c402ec1 100644
--- a/drivers/video/fbdev/dnfb.c
+++ b/drivers/video/fbdev/dnfb.c
@@ -280,7 +280,7 @@ static struct platform_device dnfb_device = {
.name = "dnfb",
};
-int __init dnfb_init(void)
+static int __init dnfb_init(void)
{
int ret;
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index b3d580e57221..7cba3969a970 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -883,7 +883,7 @@ static void ffb_init_fix(struct fb_info *info)
} else
ffb_type_name = "Elite 3D";
- strlcpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/fbdev/fm2fb.c b/drivers/video/fbdev/fm2fb.c
index 3b727d528fde..942e382cf1cf 100644
--- a/drivers/video/fbdev/fm2fb.c
+++ b/drivers/video/fbdev/fm2fb.c
@@ -293,7 +293,7 @@ static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id)
return 0;
}
-int __init fm2fb_setup(char *options)
+static int __init fm2fb_setup(char *options)
{
char *this_opt;
@@ -309,7 +309,7 @@ int __init fm2fb_setup(char *options)
return 0;
}
-int __init fm2fb_init(void)
+static int __init fm2fb_init(void)
{
char *option = NULL;
diff --git a/drivers/video/fbdev/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index 5d34d89fb665..e41204ecb0e3 100644
--- a/drivers/video/fbdev/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
@@ -410,13 +410,13 @@ static void __init gx1fb_setup(char *options)
continue;
if (!strncmp(this_opt, "mode:", 5))
- strlcpy(mode_option, this_opt + 5, sizeof(mode_option));
+ strscpy(mode_option, this_opt + 5, sizeof(mode_option));
else if (!strncmp(this_opt, "crt:", 4))
crt_option = !!simple_strtoul(this_opt + 4, NULL, 0);
else if (!strncmp(this_opt, "panel:", 6))
- strlcpy(panel_option, this_opt + 6, sizeof(panel_option));
+ strscpy(panel_option, this_opt + 6, sizeof(panel_option));
else
- strlcpy(mode_option, this_opt, sizeof(mode_option));
+ strscpy(mode_option, this_opt, sizeof(mode_option));
}
}
#endif
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index e5475ae1e158..94588b809ebf 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -650,7 +650,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cardtype = ent->driver_data;
par->refclk_ps = cardinfo[cardtype].refclk_ps;
info->fix = gxt4500_fix;
- strlcpy(info->fix.id, cardinfo[cardtype].cardname,
+ strscpy(info->fix.id, cardinfo[cardtype].cardname,
sizeof(info->fix.id));
info->pseudo_palette = par->pseudo_palette;
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index 8d418abdd767..cdd44e5deafe 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -375,7 +375,7 @@ static struct dio_driver hpfb_driver = {
.remove = hpfb_remove_one,
};
-int __init hpfb_init(void)
+static int __init hpfb_init(void)
{
unsigned int sid;
unsigned char i;
@@ -415,7 +415,7 @@ int __init hpfb_init(void)
return 0;
}
-void __exit hpfb_cleanup_module(void)
+static void __exit hpfb_cleanup_module(void)
{
dio_unregister_driver(&hpfb_driver);
}
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 09dd85553d4f..bd30d8314b68 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -159,7 +159,7 @@ static int i740fb_setup_ddc_bus(struct fb_info *info)
{
struct i740fb_par *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
@@ -400,7 +400,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
u32 xres, right, hslen, left, xtotal;
u32 yres, lower, vslen, upper, ytotal;
u32 vxres, xoffset, vyres, yoffset;
- u32 bpp, base, dacspeed24, mem;
+ u32 bpp, base, dacspeed24, mem, freq;
u8 r7;
int i;
@@ -643,7 +643,12 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
par->atc[VGA_ATC_OVERSCAN] = 0;
/* Calculate VCLK that most closely matches the requested dot clock */
- i740_calc_vclk((((u32)1e9) / var->pixclock) * (u32)(1e3), par);
+ freq = (((u32)1e9) / var->pixclock) * (u32)(1e3);
+ if (freq < I740_RFREQ_FIX) {
+ fb_dbg(info, "invalid pixclock\n");
+ freq = I740_RFREQ_FIX;
+ }
+ i740_calc_vclk(freq, par);
/* Since we program the clocks ourselves, always use VCLK2. */
par->misc |= 0x0C;
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index a2f644c97f28..94f3bc637fc8 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -41,7 +41,18 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
-#include <linux/platform_data/video-imxfb.h>
+#define PCR_TFT (1 << 31)
+#define PCR_BPIX_8 (3 << 25)
+#define PCR_BPIX_12 (4 << 25)
+#define PCR_BPIX_16 (5 << 25)
+#define PCR_BPIX_18 (6 << 25)
+
+struct imx_fb_videomode {
+ struct fb_videomode mode;
+ u32 pcr;
+ bool aus_mode;
+ unsigned char bpp;
+};
/*
* Complain if VAR is out of range.
@@ -656,7 +667,6 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
static int imxfb_init_fbinfo(struct platform_device *pdev)
{
- struct imx_fb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct fb_info *info = platform_get_drvdata(pdev);
struct imxfb_info *fbi = info->par;
struct device_node *np;
@@ -671,7 +681,7 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
fbi->devtype = pdev->id_entry->driver_data;
- strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
+ strscpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
@@ -690,25 +700,20 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
info->fbops = &imxfb_ops;
info->flags = FBINFO_FLAG_DEFAULT |
FBINFO_READS_FAST;
- if (pdata) {
- fbi->lscr1 = pdata->lscr1;
- fbi->dmacr = pdata->dmacr;
- fbi->pwmr = pdata->pwmr;
- } else {
- np = pdev->dev.of_node;
- info->var.grayscale = of_property_read_bool(np,
- "cmap-greyscale");
- fbi->cmap_inverse = of_property_read_bool(np, "cmap-inverse");
- fbi->cmap_static = of_property_read_bool(np, "cmap-static");
- fbi->lscr1 = IMXFB_LSCR1_DEFAULT;
+ np = pdev->dev.of_node;
+ info->var.grayscale = of_property_read_bool(np,
+ "cmap-greyscale");
+ fbi->cmap_inverse = of_property_read_bool(np, "cmap-inverse");
+ fbi->cmap_static = of_property_read_bool(np, "cmap-static");
- of_property_read_u32(np, "fsl,lpccr", &fbi->pwmr);
+ fbi->lscr1 = IMXFB_LSCR1_DEFAULT;
- of_property_read_u32(np, "fsl,lscr1", &fbi->lscr1);
+ of_property_read_u32(np, "fsl,lpccr", &fbi->pwmr);
- of_property_read_u32(np, "fsl,dmacr", &fbi->dmacr);
- }
+ of_property_read_u32(np, "fsl,lscr1", &fbi->lscr1);
+
+ of_property_read_u32(np, "fsl,dmacr", &fbi->dmacr);
return 0;
}
@@ -863,10 +868,10 @@ static int imxfb_probe(struct platform_device *pdev)
struct imxfb_info *fbi;
struct lcd_device *lcd;
struct fb_info *info;
- struct imx_fb_platform_data *pdata;
struct resource *res;
struct imx_fb_videomode *m;
const struct of_device_id *of_id;
+ struct device_node *display_np;
int ret, i;
int bytes_per_pixel;
@@ -884,8 +889,6 @@ static int imxfb_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- pdata = dev_get_platdata(&pdev->dev);
-
info = framebuffer_alloc(sizeof(struct imxfb_info), &pdev->dev);
if (!info)
return -ENOMEM;
@@ -898,43 +901,34 @@ static int imxfb_probe(struct platform_device *pdev)
if (ret < 0)
goto failed_init;
- if (pdata) {
- if (!fb_mode)
- fb_mode = pdata->mode[0].mode.name;
-
- fbi->mode = pdata->mode;
- fbi->num_modes = pdata->num_modes;
- } else {
- struct device_node *display_np;
- fb_mode = NULL;
-
- display_np = of_parse_phandle(pdev->dev.of_node, "display", 0);
- if (!display_np) {
- dev_err(&pdev->dev, "No display defined in devicetree\n");
- ret = -EINVAL;
- goto failed_of_parse;
- }
+ fb_mode = NULL;
- /*
- * imxfb does not support more modes, we choose only the native
- * mode.
- */
- fbi->num_modes = 1;
-
- fbi->mode = devm_kzalloc(&pdev->dev,
- sizeof(struct imx_fb_videomode), GFP_KERNEL);
- if (!fbi->mode) {
- ret = -ENOMEM;
- of_node_put(display_np);
- goto failed_of_parse;
- }
+ display_np = of_parse_phandle(pdev->dev.of_node, "display", 0);
+ if (!display_np) {
+ dev_err(&pdev->dev, "No display defined in devicetree\n");
+ ret = -EINVAL;
+ goto failed_of_parse;
+ }
- ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+ /*
+ * imxfb does not support more modes, we choose only the native
+ * mode.
+ */
+ fbi->num_modes = 1;
+
+ fbi->mode = devm_kzalloc(&pdev->dev,
+ sizeof(struct imx_fb_videomode), GFP_KERNEL);
+ if (!fbi->mode) {
+ ret = -ENOMEM;
of_node_put(display_np);
- if (ret)
- goto failed_of_parse;
+ goto failed_of_parse;
}
+ ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+ of_node_put(display_np);
+ if (ret)
+ goto failed_of_parse;
+
/* Calculate maximum bytes used per pixel. In most cases this should
* be the same as m->bpp/8 */
m = &fbi->mode[0];
@@ -943,13 +937,6 @@ static int imxfb_probe(struct platform_device *pdev)
info->fix.smem_len = max_t(size_t, info->fix.smem_len,
m->mode.xres * m->mode.yres * bytes_per_pixel);
- res = request_mem_region(res->start, resource_size(res),
- DRIVER_NAME);
- if (!res) {
- ret = -EBUSY;
- goto failed_req;
- }
-
fbi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fbi->clk_ipg)) {
ret = PTR_ERR(fbi->clk_ipg);
@@ -983,10 +970,10 @@ static int imxfb_probe(struct platform_device *pdev)
goto failed_getclock;
}
- fbi->regs = ioremap(res->start, resource_size(res));
- if (fbi->regs == NULL) {
+ fbi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fbi->regs)) {
dev_err(&pdev->dev, "Cannot map frame buffer registers\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(fbi->regs);
goto failed_ioremap;
}
@@ -1001,13 +988,6 @@ static int imxfb_probe(struct platform_device *pdev)
info->fix.smem_start = fbi->map_dma;
- if (pdata && pdata->init) {
- ret = pdata->init(fbi->pdev);
- if (ret)
- goto failed_platform_init;
- }
-
-
INIT_LIST_HEAD(&info->modelist);
for (i = 0; i < fbi->num_modes; i++)
fb_add_videomode(&fbi->mode[i].mode, &info->modelist);
@@ -1059,17 +1039,12 @@ failed_lcd:
failed_register:
fb_dealloc_cmap(&info->cmap);
failed_cmap:
- if (pdata && pdata->exit)
- pdata->exit(fbi->pdev);
-failed_platform_init:
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer,
fbi->map_dma);
failed_map:
- iounmap(fbi->regs);
failed_ioremap:
failed_getclock:
release_mem_region(res->start, resource_size(res));
-failed_req:
failed_of_parse:
kfree(info->pseudo_palette);
failed_init:
@@ -1079,26 +1054,15 @@ failed_init:
static int imxfb_remove(struct platform_device *pdev)
{
- struct imx_fb_platform_data *pdata;
struct fb_info *info = platform_get_drvdata(pdev);
struct imxfb_info *fbi = info->par;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
imxfb_disable_controller(fbi);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata && pdata->exit)
- pdata->exit(fbi->pdev);
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer,
fbi->map_dma);
- iounmap(fbi->regs);
- release_mem_region(res->start, resource_size(res));
kfree(info->pseudo_palette);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 236521b19daf..68bba2688f4c 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2383,9 +2383,9 @@ static int __init matroxfb_setup(char *options) {
else if (!strncmp(this_opt, "mem:", 4))
mem = simple_strtoul(this_opt+4, NULL, 0);
else if (!strncmp(this_opt, "mode:", 5))
- strlcpy(videomode, this_opt+5, sizeof(videomode));
+ strscpy(videomode, this_opt + 5, sizeof(videomode));
else if (!strncmp(this_opt, "outputs:", 8))
- strlcpy(outputs, this_opt+8, sizeof(outputs));
+ strscpy(outputs, this_opt + 8, sizeof(outputs));
else if (!strncmp(this_opt, "dfp:", 4)) {
dfp_type = simple_strtoul(this_opt+4, NULL, 0);
dfp = 1;
@@ -2455,7 +2455,7 @@ static int __init matroxfb_setup(char *options) {
else if (!strcmp(this_opt, "dfp"))
dfp = value;
else {
- strlcpy(videomode, this_opt, sizeof(videomode));
+ strscpy(videomode, this_opt, sizeof(videomode));
}
}
}
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index b1acb1ebebe9..91001990e351 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#ifdef CONFIG_PPC32
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index 9d9fe5c3a7a1..161fc65d6b57 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -489,7 +489,7 @@ static void hwa742_update_window_auto(struct timer_list *unused)
__hwa742_update_window_auto(false);
}
-int hwa742_update_window_async(struct fb_info *fbi,
+static int hwa742_update_window_async(struct fb_info *fbi,
struct omapfb_update_window *win,
void (*complete_callback)(void *arg),
void *complete_callback_data)
@@ -522,7 +522,6 @@ int hwa742_update_window_async(struct fb_info *fbi,
out:
return r;
}
-EXPORT_SYMBOL(hwa742_update_window_async);
static int hwa742_setup_plane(int plane, int channel_out,
unsigned long offset, int screen_width,
diff --git a/drivers/video/fbdev/omap/omapfb.h b/drivers/video/fbdev/omap/omapfb.h
index beb841ccb99c..ab1cb6e7f5f8 100644
--- a/drivers/video/fbdev/omap/omapfb.h
+++ b/drivers/video/fbdev/omap/omapfb.h
@@ -227,13 +227,4 @@ extern int omapfb_register_client(struct omapfb_notifier_block *nb,
omapfb_notifier_callback_t callback,
void *callback_data);
extern int omapfb_unregister_client(struct omapfb_notifier_block *nb);
-extern int omapfb_update_window_async(struct fb_info *fbi,
- struct omapfb_update_window *win,
- void (*callback)(void *),
- void *callback_data);
-extern int hwa742_update_window_async(struct fb_info *fbi,
- struct omapfb_update_window *win,
- void (*callback)(void *),
- void *callback_data);
-
#endif /* __OMAPFB_H */
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 292fcb0a24fc..17cda5765683 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -668,7 +668,7 @@ static int omapfb_set_par(struct fb_info *fbi)
return r;
}
-int omapfb_update_window_async(struct fb_info *fbi,
+static int omapfb_update_window_async(struct fb_info *fbi,
struct omapfb_update_window *win,
void (*callback)(void *),
void *callback_data)
@@ -714,7 +714,6 @@ int omapfb_update_window_async(struct fb_info *fbi,
return fbdev->ctrl->update_window(fbi, win, callback, callback_data);
}
-EXPORT_SYMBOL(omapfb_update_window_async);
static int omapfb_update_win(struct fb_info *fbi,
struct omapfb_update_window *win)
@@ -1643,15 +1642,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
goto cleanup;
}
fbdev->int_irq = platform_get_irq(pdev, 0);
- if (!fbdev->int_irq) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (fbdev->int_irq < 0) {
r = ENXIO;
goto cleanup;
}
fbdev->ext_irq = platform_get_irq(pdev, 1);
- if (!fbdev->ext_irq) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (fbdev->ext_irq < 0) {
r = ENXIO;
goto cleanup;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index afa688e754b9..5ccddcfce722 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1331,7 +1331,7 @@ static void clear_fb_info(struct fb_info *fbi)
{
memset(&fbi->var, 0, sizeof(fbi->var));
memset(&fbi->fix, 0, sizeof(fbi->fix));
- strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
+ strscpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
}
static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev)
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index d3be2c64f1c0..8fd79deb1e2a 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -617,6 +617,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return -EINVAL;
}
+ if (!var->pixclock) {
+ DPRINTK("pixclock is zero\n");
+ return -EINVAL;
+ }
+
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n",
PICOS2KHZ(var->pixclock));
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index e943300d23e8..d5d0bbd39213 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -640,7 +640,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
info->node = -1;
- strlcpy(info->fix.id, mi->id, 16);
+ strscpy(info->fix.id, mi->id, 16);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
info->fix.xpanstep = 0;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 66cfc3e9d3cf..696ac5431180 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2042,7 +2042,7 @@ static int __init pxafb_setup_options(void)
return -ENODEV;
if (options)
- strlcpy(g_options, options, sizeof(g_options));
+ strscpy(g_options, options, sizeof(g_options));
return 0;
}
diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c
index 079a2a7fb2c5..964bc88bb89c 100644
--- a/drivers/video/fbdev/q40fb.c
+++ b/drivers/video/fbdev/q40fb.c
@@ -133,7 +133,7 @@ static struct platform_device q40fb_device = {
.name = "q40fb",
};
-int __init q40fb_init(void)
+static int __init q40fb_init(void)
{
int ret = 0;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index b93c8eb02336..67b63a753cb2 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -248,7 +248,7 @@ static int s3fb_setup_ddc_bus(struct fb_info *info)
{
struct s3fb_info *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
@@ -905,6 +905,8 @@ static int s3fb_set_par(struct fb_info *info)
value = clamp((htotal + hsstart + 1) / 2 + 2, hsstart + 4, htotal + 1);
svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index e31cf63b0a62..017c8efe8267 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1224,47 +1224,6 @@ int __init sa1100fb_init(void)
return platform_driver_register(&sa1100fb_driver);
}
-int __init sa1100fb_setup(char *options)
-{
-#if 0
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
-
- if (!strncmp(this_opt, "bpp:", 4))
- current_par.max_bpp =
- simple_strtoul(this_opt + 4, NULL, 0);
-
- if (!strncmp(this_opt, "lccr0:", 6))
- lcd_shadow.lccr0 =
- simple_strtoul(this_opt + 6, NULL, 0);
- if (!strncmp(this_opt, "lccr1:", 6)) {
- lcd_shadow.lccr1 =
- simple_strtoul(this_opt + 6, NULL, 0);
- current_par.max_xres =
- (lcd_shadow.lccr1 & 0x3ff) + 16;
- }
- if (!strncmp(this_opt, "lccr2:", 6)) {
- lcd_shadow.lccr2 =
- simple_strtoul(this_opt + 6, NULL, 0);
- current_par.max_yres =
- (lcd_shadow.
- lccr0 & LCCR0_SDS) ? ((lcd_shadow.
- lccr2 & 0x3ff) +
- 1) *
- 2 : ((lcd_shadow.lccr2 & 0x3ff) + 1);
- }
- if (!strncmp(this_opt, "lccr3:", 6))
- lcd_shadow.lccr3 =
- simple_strtoul(this_opt + 6, NULL, 0);
- }
-#endif
- return 0;
-}
-
module_init(sa1100fb_init);
MODULE_DESCRIPTION("StrongARM-1100/1110 framebuffer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index cf2a90ecd64e..e770b4a356b5 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -355,7 +355,7 @@ static int simplefb_regulators_get(struct simplefb_par *par,
if (!p || p == prop->name)
continue;
- strlcpy(name, prop->name,
+ strscpy(name, prop->name,
strlen(prop->name) - strlen(SUPPLY_SUFFIX) + 1);
regulator = devm_regulator_get_optional(&pdev->dev, name);
if (IS_ERR(regulator)) {
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index b568c646a76c..2ba91d62af92 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -355,12 +355,12 @@ SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay,
}
break;
case 400:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDwidth >= 600))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDheight >= 600))) {
if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth];
}
break;
case 512:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDwidth >= 768))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDheight >= 768))) {
if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth];
}
break;
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index f28fd69d5eb7..c9e77429dfa3 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -649,37 +649,37 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
u16 xres=0, yres, myres;
#ifdef CONFIG_FB_SIS_300
- if(ivideo->sisvga_engine == SIS_300_VGA) {
- if(!(sisbios_mode[myindex].chipset & MD_SIS300))
+ if (ivideo->sisvga_engine == SIS_300_VGA) {
+ if (!(sisbios_mode[myindex].chipset & MD_SIS300))
return -1 ;
}
#endif
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- if(!(sisbios_mode[myindex].chipset & MD_SIS315))
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
+ if (!(sisbios_mode[myindex].chipset & MD_SIS315))
return -1;
}
#endif
myres = sisbios_mode[myindex].yres;
- switch(vbflags & VB_DISPTYPE_DISP2) {
+ switch (vbflags & VB_DISPTYPE_DISP2) {
case CRT2_LCD:
xres = ivideo->lcdxres; yres = ivideo->lcdyres;
- if((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
- (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
- if(sisbios_mode[myindex].xres > xres)
+ if ((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
+ (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
+ if (sisbios_mode[myindex].xres > xres)
return -1;
- if(myres > yres)
+ if (myres > yres)
return -1;
}
- if(ivideo->sisfb_fstn) {
- if(sisbios_mode[myindex].xres == 320) {
- if(myres == 240) {
- switch(sisbios_mode[myindex].mode_no[1]) {
+ if (ivideo->sisfb_fstn) {
+ if (sisbios_mode[myindex].xres == 320) {
+ if (myres == 240) {
+ switch (sisbios_mode[myindex].mode_no[1]) {
case 0x50: myindex = MODE_FSTN_8; break;
case 0x56: myindex = MODE_FSTN_16; break;
case 0x53: return -1;
@@ -688,7 +688,7 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
}
}
- if(SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->sisfb_fstn,
ivideo->SiS_Pr.SiS_CustomT, xres, yres, ivideo->vbflags2) < 0x14) {
return -1;
@@ -696,14 +696,14 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
break;
case CRT2_TV:
- if(SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
break;
case CRT2_VGA:
- if(SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
@@ -1872,7 +1872,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strlcpy(fix->id, ivideo->myid, sizeof(fix->id));
+ strscpy(fix->id, ivideo->myid, sizeof(fix->id));
mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
@@ -2204,82 +2204,88 @@ static bool sisfb_test_DDC1(struct sis_video_info *ivideo)
static void sisfb_sense_crt1(struct sis_video_info *ivideo)
{
- bool mustwait = false;
- u8 sr1F, cr17;
+ bool mustwait = false;
+ u8 sr1F, cr17;
#ifdef CONFIG_FB_SIS_315
- u8 cr63=0;
+ u8 cr63 = 0;
#endif
- u16 temp = 0xffff;
- int i;
+ u16 temp = 0xffff;
+ int i;
+
+ sr1F = SiS_GetReg(SISSR, 0x1F);
+ SiS_SetRegOR(SISSR, 0x1F, 0x04);
+ SiS_SetRegAND(SISSR, 0x1F, 0x3F);
- sr1F = SiS_GetReg(SISSR, 0x1F);
- SiS_SetRegOR(SISSR, 0x1F, 0x04);
- SiS_SetRegAND(SISSR, 0x1F, 0x3F);
- if(sr1F & 0xc0) mustwait = true;
+ if (sr1F & 0xc0)
+ mustwait = true;
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
- cr63 &= 0x40;
- SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
- }
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
+ cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
+ cr63 &= 0x40;
+ SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
+ }
#endif
- cr17 = SiS_GetReg(SISCR, 0x17);
- cr17 &= 0x80;
- if(!cr17) {
- SiS_SetRegOR(SISCR, 0x17, 0x80);
- mustwait = true;
- SiS_SetReg(SISSR, 0x00, 0x01);
- SiS_SetReg(SISSR, 0x00, 0x03);
- }
+ cr17 = SiS_GetReg(SISCR, 0x17);
+ cr17 &= 0x80;
- if(mustwait) {
- for(i=0; i < 10; i++) sisfbwaitretracecrt1(ivideo);
- }
+ if (!cr17) {
+ SiS_SetRegOR(SISCR, 0x17, 0x80);
+ mustwait = true;
+ SiS_SetReg(SISSR, 0x00, 0x01);
+ SiS_SetReg(SISSR, 0x00, 0x03);
+ }
+ if (mustwait) {
+ for (i = 0; i < 10; i++)
+ sisfbwaitretracecrt1(ivideo);
+ }
#ifdef CONFIG_FB_SIS_315
- if(ivideo->chip >= SIS_330) {
- SiS_SetRegAND(SISCR, 0x32, ~0x20);
- if(ivideo->chip >= SIS_340) {
- SiS_SetReg(SISCR, 0x57, 0x4a);
- } else {
- SiS_SetReg(SISCR, 0x57, 0x5f);
- }
- SiS_SetRegOR(SISCR, 0x53, 0x02);
- while ((SiS_GetRegByte(SISINPSTAT)) & 0x01) break;
- while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01)) break;
- if ((SiS_GetRegByte(SISMISCW)) & 0x10) temp = 1;
- SiS_SetRegAND(SISCR, 0x53, 0xfd);
- SiS_SetRegAND(SISCR, 0x57, 0x00);
- }
+ if (ivideo->chip >= SIS_330) {
+ SiS_SetRegAND(SISCR, 0x32, ~0x20);
+ if (ivideo->chip >= SIS_340)
+ SiS_SetReg(SISCR, 0x57, 0x4a);
+ else
+ SiS_SetReg(SISCR, 0x57, 0x5f);
+
+ SiS_SetRegOR(SISCR, 0x53, 0x02);
+ while ((SiS_GetRegByte(SISINPSTAT)) & 0x01)
+ break;
+ while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01))
+ break;
+ if ((SiS_GetRegByte(SISMISCW)) & 0x10)
+ temp = 1;
+
+ SiS_SetRegAND(SISCR, 0x53, 0xfd);
+ SiS_SetRegAND(SISCR, 0x57, 0x00);
+ }
#endif
- if(temp == 0xffff) {
- i = 3;
- do {
- temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
- ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
- } while(((temp == 0) || (temp == 0xffff)) && i--);
+ if (temp == 0xffff) {
+ i = 3;
- if((temp == 0) || (temp == 0xffff)) {
- if(sisfb_test_DDC1(ivideo)) temp = 1;
- }
- }
+ do {
+ temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
+ ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
+ } while (((temp == 0) || (temp == 0xffff)) && i--);
- if((temp) && (temp != 0xffff)) {
- SiS_SetRegOR(SISCR, 0x32, 0x20);
- }
+ if ((temp == 0) || (temp == 0xffff)) {
+ if (sisfb_test_DDC1(ivideo))
+ temp = 1;
+ }
+ }
+
+ if ((temp) && (temp != 0xffff))
+ SiS_SetRegOR(SISCR, 0x32, 0x20);
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
- }
+ if (ivideo->sisvga_engine == SIS_315_VGA)
+ SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
#endif
- SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
-
- SiS_SetReg(SISSR, 0x1F, sr1F);
+ SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
+ SiS_SetReg(SISSR, 0x1F, sr1F);
}
/* Determine and detect attached devices on SiS30x */
@@ -2293,25 +2299,25 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
ivideo->SiS_Pr.PanelSelfDetected = false;
/* LCD detection only for TMDS bridges */
- if(!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
+ if (!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
return;
- if(ivideo->vbflags2 & VB2_30xBDH)
+ if (ivideo->vbflags2 & VB2_30xBDH)
return;
/* If LCD already set up by BIOS, skip it */
reg = SiS_GetReg(SISCR, 0x32);
- if(reg & 0x08)
+ if (reg & 0x08)
return;
realcrtno = 1;
- if(ivideo->SiS_Pr.DDCPortMixup)
+ if (ivideo->SiS_Pr.DDCPortMixup)
realcrtno = 0;
/* Check DDC capabilities */
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine,
realcrtno, 0, &buffer[0], ivideo->vbflags2);
- if((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
+ if ((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
return;
/* Read DDC data */
@@ -2320,17 +2326,17 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
ivideo->sisvga_engine, realcrtno, 1,
&buffer[0], ivideo->vbflags2);
- } while((temp) && i--);
+ } while ((temp) && i--);
- if(temp)
+ if (temp)
return;
/* No digital device */
- if(!(buffer[0x14] & 0x80))
+ if (!(buffer[0x14] & 0x80))
return;
/* First detailed timing preferred timing? */
- if(!(buffer[0x18] & 0x02))
+ if (!(buffer[0x18] & 0x02))
return;
xres = buffer[0x38] | ((buffer[0x3a] & 0xf0) << 4);
@@ -2338,26 +2344,26 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
switch(xres) {
case 1024:
- if(yres == 768)
+ if (yres == 768)
paneltype = 0x02;
break;
case 1280:
- if(yres == 1024)
+ if (yres == 1024)
paneltype = 0x03;
break;
case 1600:
- if((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
+ if ((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
paneltype = 0x0b;
break;
}
- if(!paneltype)
+ if (!paneltype)
return;
- if(buffer[0x23])
+ if (buffer[0x23])
cr37 |= 0x10;
- if((buffer[0x47] & 0x18) == 0x18)
+ if ((buffer[0x47] & 0x18) == 0x18)
cr37 |= ((((buffer[0x47] & 0x06) ^ 0x06) << 5) | 0x20);
else
cr37 |= 0xc0;
@@ -2372,31 +2378,34 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
static int SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test)
{
- int temp, mytest, result, i, j;
-
- for(j = 0; j < 10; j++) {
- result = 0;
- for(i = 0; i < 3; i++) {
- mytest = test;
- SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
- temp = (type >> 8) | (mytest & 0x00ff);
- SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
- SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
- mytest >>= 8;
- mytest &= 0x7f;
- temp = SiS_GetReg(SISPART4, 0x03);
- temp ^= 0x0e;
- temp &= mytest;
- if(temp == mytest) result++;
+ int temp, mytest, result, i, j;
+
+ for (j = 0; j < 10; j++) {
+ result = 0;
+ for (i = 0; i < 3; i++) {
+ mytest = test;
+ SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
+ temp = (type >> 8) | (mytest & 0x00ff);
+ SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
+ SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
+ mytest >>= 8;
+ mytest &= 0x7f;
+ temp = SiS_GetReg(SISPART4, 0x03);
+ temp ^= 0x0e;
+ temp &= mytest;
+ if (temp == mytest)
+ result++;
#if 1
- SiS_SetReg(SISPART4, 0x11, 0x00);
- SiS_SetRegAND(SISPART4, 0x10, 0xe0);
- SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
+ SiS_SetReg(SISPART4, 0x11, 0x00);
+ SiS_SetRegAND(SISPART4, 0x10, 0xe0);
+ SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
#endif
- }
- if((result == 0) || (result >= 2)) break;
- }
- return result;
+ }
+
+ if ((result == 0) || (result >= 2))
+ break;
+ }
+ return result;
}
static void SiS_Sense30x(struct sis_video_info *ivideo)
@@ -4262,18 +4271,17 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
- for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
-
+ for (k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
RankCapacity = buswidth * SiS_DRAMType[k][3];
- if(RankCapacity != PseudoRankCapacity)
+ if (RankCapacity != PseudoRankCapacity)
continue;
- if((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
+ if ((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
continue;
BankNumHigh = RankCapacity * 16 * iteration - 1;
- if(iteration == 3) { /* Rank No */
+ if (iteration == 3) { /* Rank No */
BankNumMid = RankCapacity * 16 - 1;
} else {
BankNumMid = RankCapacity * 16 * iteration / 2 - 1;
@@ -4287,18 +4295,22 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
SiS_SetRegAND(SISSR, 0x15, 0xFB); /* Test */
SiS_SetRegOR(SISSR, 0x15, 0x04); /* Test */
sr14 = (SiS_DRAMType[k][3] * buswidth) - 1;
- if(buswidth == 4) sr14 |= 0x80;
- else if(buswidth == 2) sr14 |= 0x40;
+
+ if (buswidth == 4)
+ sr14 |= 0x80;
+ else if (buswidth == 2)
+ sr14 |= 0x40;
+
SiS_SetReg(SISSR, 0x13, SiS_DRAMType[k][4]);
SiS_SetReg(SISSR, 0x14, sr14);
BankNumHigh <<= 16;
BankNumMid <<= 16;
- if((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
- (BankNumMid + PhysicalAdrHigh >= mapsize) ||
- (BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
- (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
+ if ((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
+ (BankNumMid + PhysicalAdrHigh >= mapsize) ||
+ (BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
+ (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
continue;
/* Write data */
@@ -4312,7 +4324,7 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
(FBAddr + BankNumHigh + PhysicalAdrOtherPage));
/* Read data */
- if(readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
+ if (readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
return 1;
}
@@ -5867,7 +5879,7 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ivideo->cardnumber++;
}
- strlcpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
+ strscpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
ivideo->warncount = 0;
ivideo->chip_id = pdev->device;
@@ -6150,24 +6162,20 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
int result = 1;
- /* if((ivideo->chip == SIS_315H) ||
- (ivideo->chip == SIS_315) ||
- (ivideo->chip == SIS_315PRO) ||
- (ivideo->chip == SIS_330)) {
- sisfb_post_sis315330(pdev);
- } else */ if(ivideo->chip == XGI_20) {
+
+ if (ivideo->chip == XGI_20) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
- } else if((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
+ } else if ((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
} else {
printk(KERN_INFO "sisfb: Card is not "
"POSTed and sisfb can't do this either.\n");
}
- if(!result) {
+ if (!result) {
printk(KERN_ERR "sisfb: Failed to POST card\n");
ret = -ENODEV;
goto error_3;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index d119b1d08007..8ab9a3fbd281 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -131,8 +131,6 @@ static struct fb_info info;
*/
static struct xxx_par __initdata current_par;
-int xxxfb_init(void);
-
/**
* xxxfb_open - Optional function. Called when the framebuffer is
* first accessed.
@@ -886,7 +884,7 @@ static struct pci_driver xxxfb_driver = {
MODULE_DEVICE_TABLE(pci, xxxfb_id_table);
-int __init xxxfb_init(void)
+static int __init xxxfb_init(void)
{
/*
* For kernel boot options (in 'video=xxxfb:<options>' format)
@@ -967,7 +965,7 @@ static struct platform_device *xxxfb_device;
* Only necessary if your driver takes special options,
* otherwise we fall back on the generic fb_setup().
*/
-int __init xxxfb_setup(char *options)
+static int __init xxxfb_setup(char *options)
{
/* Parse user specified options (`video=xxxfb:') */
}
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 6a52eba64559..fce6cfbadfd6 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1719,7 +1719,7 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
enable = 0;
}
- strlcpy(fb->fix.id, fbname, sizeof(fb->fix.id));
+ strscpy(fb->fix.id, fbname, sizeof(fb->fix.id));
memcpy(&par->ops,
(head == HEAD_CRT) ? &sm501fb_ops_crt : &sm501fb_ops_pnl,
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 5c765655d000..52e4ed9da78c 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -450,7 +450,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
if (ret < 0)
return ret;
- /* Set Set Area Color Mode ON/OFF & Low Power Display Mode */
+ /* Set Area Color Mode ON/OFF & Low Power Display Mode */
if (par->area_color_enable || par->low_power) {
u32 mode;
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index 27d4b0ace2d6..cd4d640f9477 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1382,7 +1382,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail;
}
sst_get_memsize(info, &fix->smem_len);
- strlcpy(fix->id, spec->name, sizeof(fix->id));
+ strscpy(fix->id, spec->name, sizeof(fix->id));
printk(KERN_INFO "%s (revision %d) with %s dac\n",
fix->id, par->revision, par->dac_sw.name);
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 15b079505a00..490bd9a14763 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -80,7 +80,7 @@ static int gfb_set_fbinfo(struct gfb_info *gp)
info->pseudo_palette = gp->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "gfb", sizeof(info->fix.id));
+ strscpy(info->fix.id, "gfb", sizeof(info->fix.id));
info->fix.smem_start = gp->fb_base_phys;
info->fix.smem_len = gp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 1d3bacd9d5ac..1279b02234f8 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -84,7 +84,7 @@ static int s3d_set_fbinfo(struct s3d_info *sp)
info->pseudo_palette = sp->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "s3d", sizeof(info->fix.id));
+ strscpy(info->fix.id, "s3d", sizeof(info->fix.id));
info->fix.smem_start = sp->fb_base_phys;
info->fix.smem_len = sp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index 9daf17b11106..f7b463633ba0 100644
--- a/drivers/video/fbdev/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
@@ -207,7 +207,7 @@ static int e3d_set_fbinfo(struct e3d_info *ep)
info->pseudo_palette = ep->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "e3d", sizeof(info->fix.id));
+ strscpy(info->fix.id, "e3d", sizeof(info->fix.id));
info->fix.smem_start = ep->fb_base_phys;
info->fix.smem_len = ep->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 1638a40fed22..01d87f53324d 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -333,7 +333,7 @@ tcx_init_fix(struct fb_info *info, int linebytes)
else
tcx_name = "TCX24";
- strlcpy(info->fix.id, tcx_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, tcx_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 67e37a62b07c..8a8122f8bfeb 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -1264,7 +1264,7 @@ static int tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan, const char *name,
{
int rc;
- strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.class = I2C_CLASS_DDC;
chan->adapter.algo_data = &chan->algo;
@@ -1293,7 +1293,7 @@ static int tdfxfb_setup_i2c_bus(struct tdfxfb_i2c_chan *chan, const char *name,
{
int rc;
- strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = dev;
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index ae0cf5540636..1fff5fd7ab51 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -1344,7 +1344,7 @@ tgafb_init_fix(struct fb_info *info)
memory_size = 16777216;
}
- strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 319131bd72cf..cda095420ee8 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -270,7 +270,7 @@ static int tridentfb_setup_ddc_bus(struct fb_info *info)
{
struct tridentfb_par *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index a6c9d4f26669..1007023a5e88 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -90,11 +90,7 @@ struct fb_info_valkyrie {
u32 pseudo_palette[16];
};
-/*
- * Exported functions
- */
-int valkyriefb_init(void);
-int valkyriefb_setup(char*);
+static int valkyriefb_setup(char*);
static int valkyriefb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
@@ -302,7 +298,7 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
default_vmode, default_cmode);
}
-int __init valkyriefb_init(void)
+static int __init valkyriefb_init(void)
{
struct fb_info_valkyrie *p;
unsigned long frame_buffer_phys, cmap_regs_phys;
@@ -549,7 +545,7 @@ static int __init valkyrie_init_info(struct fb_info *info,
/*
* Parse user specified options (`video=valkyriefb:')
*/
-int __init valkyriefb_setup(char *options)
+static int __init valkyriefb_setup(char *options)
{
char *this_opt;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index a92a8c670cf0..4274c6efb249 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -507,6 +507,8 @@ static int vt8623fb_set_par(struct fb_info *info)
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
1, info->node);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 56c77f63cd22..0a53a61231c2 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -35,11 +35,12 @@ if VIRTIO_MENU
config VIRTIO_HARDEN_NOTIFICATION
bool "Harden virtio notification"
+ depends on BROKEN
help
Enable this to harden the device notifications and suppress
those that happen at a time where notifications are illegal.
- Experimental: Note that several drivers still have bugs that
+ Experimental: Note that several drivers still have issues that
may cause crashes or hangs when correct handling of
notifications is enforced; depending on the subset of
drivers and devices you use, this may or may not work.
@@ -126,9 +127,11 @@ config VIRTIO_MEM
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
- This driver was only tested under x86-64 and arm64, but should
- theoretically work on all architectures that support memory hotplug
- and hotremove.
+ This driver currently only supports x86-64 and arm64. Although it
+ should compile on other architectures that implement memory
+ hot(un)plug, architecture-specific and/or common
+ code changes may be required for virtio-mem, kdump and kexec to work as
+ expected.
If unsure, say M.
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 14c142d77fba..828ced060742 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -428,7 +428,9 @@ int register_virtio_device(struct virtio_device *dev)
goto out;
dev->index = err;
- dev_set_name(&dev->dev, "virtio%u", dev->index);
+ err = dev_set_name(&dev->dev, "virtio%u", dev->index);
+ if (err)
+ goto out_ida_remove;
err = virtio_device_of_init(dev);
if (err)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bd360b91e9d3..3f78a3a1eb75 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -856,7 +856,7 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
vb->shrinker.count_objects = virtio_balloon_shrinker_count;
vb->shrinker.seeks = DEFAULT_SEEKS;
- return register_shrinker(&vb->shrinker);
+ return register_shrinker(&vb->shrinker, "virtio-balloon");
}
static int virtballoon_probe(struct virtio_device *vdev)
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index e07486f01999..0c2892ec6817 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -862,8 +862,7 @@ static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
unsigned long mb_id,
unsigned long start_pfn)
{
- const bool is_movable = page_zonenum(pfn_to_page(start_pfn)) ==
- ZONE_MOVABLE;
+ const bool is_movable = is_zone_movable_page(pfn_to_page(start_pfn));
int new_state;
switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
@@ -1158,8 +1157,7 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
*/
static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
{
- const bool is_movable = page_zonenum(pfn_to_page(pfn)) ==
- ZONE_MOVABLE;
+ const bool is_movable = is_zone_movable_page(pfn_to_page(pfn));
int rc, retry_count;
/*
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 083ff1eb743d..3ff746e3f24a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -403,6 +403,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
goto error_new_virtqueue;
}
+ vq->num_max = num;
+
/* Activate the queue */
writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
if (vm_dev->version == 1) {
@@ -487,6 +489,9 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
if (err)
return err;
+ if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source"))
+ enable_irq_wake(irq);
+
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index ca51fcc9daab..ad258a9d3b9f 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -214,9 +214,15 @@ static void vp_del_vq(struct virtqueue *vq)
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vp_dev->lock, flags);
+ /*
+ * If it fails during re-enable reset vq. This way we won't rejoin
+ * info->node to the queue. Prevent unexpected irqs.
+ */
+ if (!vq->reset) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ }
vp_dev->del_vq(info);
kfree(info);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index a5e5721145c7..2257f1b3d8ae 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -135,6 +135,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
+ vq->num_max = num;
+
q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (q_pfn >> 32) {
dev_err(&vp_dev->pci_dev->dev,
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 623906b4996c..c3b9f2761849 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
+
+ if (features & BIT_ULL(VIRTIO_F_RING_RESET))
+ __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
}
/* virtio config->finalize_features() implementation */
@@ -176,6 +179,110 @@ static void vp_reset(struct virtio_device *vdev)
vp_synchronize_vectors(vdev);
}
+static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ unsigned long index;
+
+ index = vq->index;
+
+ /* activate the queue */
+ vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
+ vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
+ virtqueue_get_avail_addr(vq),
+ virtqueue_get_used_addr(vq));
+
+ if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
+ msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
+ if (msix_vec == VIRTIO_MSI_NO_VECTOR)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_vq_info *info;
+ unsigned long flags;
+
+ if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+ return -ENOENT;
+
+ vp_modern_set_queue_reset(mdev, vq->index);
+
+ info = vp_dev->vqs[vq->index];
+
+ /* delete vq from irq handler */
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+ INIT_LIST_HEAD(&info->node);
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ __virtqueue_break(vq);
+#endif
+
+ /* For the case where vq has an exclusive irq, call synchronize_irq() to
+ * wait for completion.
+ *
+ * note: We can't use disable_irq() since it conflicts with the affinity
+ * managed IRQ that is used by some drivers.
+ */
+ if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+ synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+ vq->reset = true;
+
+ return 0;
+}
+
+static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_vq_info *info;
+ unsigned long flags, index;
+ int err;
+
+ if (!vq->reset)
+ return -EBUSY;
+
+ index = vq->index;
+ info = vp_dev->vqs[index];
+
+ if (vp_modern_get_queue_reset(mdev, index))
+ return -EBUSY;
+
+ if (vp_modern_get_queue_enable(mdev, index))
+ return -EBUSY;
+
+ err = vp_active_vq(vq, info->msix_vector);
+ if (err)
+ return err;
+
+ if (vq->callback) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_add(&info->node, &vp_dev->virtqueues);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ } else {
+ INIT_LIST_HEAD(&info->node);
+ }
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ __virtqueue_unbreak(vq);
+#endif
+
+ vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+ vq->reset = false;
+
+ return 0;
+}
+
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
return vp_modern_config_vector(&vp_dev->mdev, vector);
@@ -218,32 +325,21 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
- /* activate the queue */
- vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
- vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
- virtqueue_get_avail_addr(vq),
- virtqueue_get_used_addr(vq));
+ vq->num_max = num;
+
+ err = vp_active_vq(vq, msix_vec);
+ if (err)
+ goto err;
vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
if (!vq->priv) {
err = -ENOMEM;
- goto err_map_notify;
- }
-
- if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
- if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
- err = -EBUSY;
- goto err_assign_vector;
- }
+ goto err;
}
return vq;
-err_assign_vector:
- if (!mdev->notify_base)
- pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv);
-err_map_notify:
+err:
vring_del_virtqueue(vq);
return ERR_PTR(err);
}
@@ -401,6 +497,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
+ .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
+ .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -419,6 +517,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
+ .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
+ .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};
/* the PCI probing function */
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index fa2a9445bb18..869cb46bef96 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -3,6 +3,7 @@
#include <linux/virtio_pci_modern.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/delay.h>
/*
* vp_modern_map_capability - map a part of virtio pci capability
@@ -475,6 +476,44 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
EXPORT_SYMBOL_GPL(vp_modern_set_status);
/*
+ * vp_modern_get_queue_reset - get the queue reset status
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ */
+int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+
+ vp_iowrite16(index, &cfg->cfg.queue_select);
+ return vp_ioread16(&cfg->queue_reset);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_reset);
+
+/*
+ * vp_modern_set_queue_reset - reset the queue
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ */
+void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+
+ vp_iowrite16(index, &cfg->cfg.queue_select);
+ vp_iowrite16(1, &cfg->queue_reset);
+
+ while (vp_ioread16(&cfg->queue_reset))
+ msleep(1);
+
+ while (vp_ioread16(&cfg->cfg.queue_enable))
+ msleep(1);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
+
+/*
* vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
* @mdev: the modern virtio-pci device
* @index: queue index
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 643ca779fcc6..4620e9d79dde 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -85,6 +85,71 @@ struct vring_desc_extra {
u16 next; /* The next desc state in a list. */
};
+struct vring_virtqueue_split {
+ /* Actual memory layout for this queue. */
+ struct vring vring;
+
+ /* Last written value to avail->flags */
+ u16 avail_flags_shadow;
+
+ /*
+ * Last written value to avail->idx in
+ * guest byte order.
+ */
+ u16 avail_idx_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_split *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t queue_dma_addr;
+ size_t queue_size_in_bytes;
+
+ /*
+ * The parameters for creating vrings are reserved for creating new
+ * vring.
+ */
+ u32 vring_align;
+ bool may_reduce_num;
+};
+
+struct vring_virtqueue_packed {
+ /* Actual memory layout for this queue. */
+ struct {
+ unsigned int num;
+ struct vring_packed_desc *desc;
+ struct vring_packed_desc_event *driver;
+ struct vring_packed_desc_event *device;
+ } vring;
+
+ /* Driver ring wrap counter. */
+ bool avail_wrap_counter;
+
+ /* Avail used flags. */
+ u16 avail_used_flags;
+
+ /* Index of the next avail descriptor. */
+ u16 next_avail_idx;
+
+ /*
+ * Last written value to driver->flags in
+ * guest byte order.
+ */
+ u16 event_flags_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_packed *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t ring_dma_addr;
+ dma_addr_t driver_event_dma_addr;
+ dma_addr_t device_event_dma_addr;
+ size_t ring_size_in_bytes;
+ size_t event_size_in_bytes;
+};
+
struct vring_virtqueue {
struct virtqueue vq;
@@ -124,64 +189,10 @@ struct vring_virtqueue {
union {
/* Available for split ring */
- struct {
- /* Actual memory layout for this queue. */
- struct vring vring;
-
- /* Last written value to avail->flags */
- u16 avail_flags_shadow;
-
- /*
- * Last written value to avail->idx in
- * guest byte order.
- */
- u16 avail_idx_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_split *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t queue_dma_addr;
- size_t queue_size_in_bytes;
- } split;
+ struct vring_virtqueue_split split;
/* Available for packed ring */
- struct {
- /* Actual memory layout for this queue. */
- struct {
- unsigned int num;
- struct vring_packed_desc *desc;
- struct vring_packed_desc_event *driver;
- struct vring_packed_desc_event *device;
- } vring;
-
- /* Driver ring wrap counter. */
- bool avail_wrap_counter;
-
- /* Avail used flags. */
- u16 avail_used_flags;
-
- /* Index of the next avail descriptor. */
- u16 next_avail_idx;
-
- /*
- * Last written value to driver->flags in
- * guest byte order.
- */
- u16 event_flags_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_packed *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t ring_dma_addr;
- dma_addr_t driver_event_dma_addr;
- dma_addr_t device_event_dma_addr;
- size_t ring_size_in_bytes;
- size_t event_size_in_bytes;
- } packed;
+ struct vring_virtqueue_packed packed;
};
/* How to notify other side. FIXME: commonalize hcalls! */
@@ -200,6 +211,16 @@ struct vring_virtqueue {
#endif
};
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name);
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
+static void vring_free(struct virtqueue *_vq);
/*
* Helpers.
@@ -364,6 +385,24 @@ static int vring_mapping_error(const struct vring_virtqueue *vq,
return dma_mapping_error(vring_dma_dev(vq), addr);
}
+static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
+{
+ vq->vq.num_free = num;
+
+ if (vq->packed_ring)
+ vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+ else
+ vq->last_used_idx = 0;
+
+ vq->event_triggered = false;
+ vq->num_added = 0;
+
+#ifdef DEBUG
+ vq->in_use = false;
+ vq->last_add_time_valid = false;
+#endif
+}
+
/*
* Split ring specific functions - *_split().
@@ -907,28 +946,107 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
return NULL;
}
-static struct virtqueue *vring_create_virtqueue_split(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
+ struct vring_virtqueue *vq)
+{
+ struct virtio_device *vdev;
+
+ vdev = vq->vq.vdev;
+
+ vring_split->avail_flags_shadow = 0;
+ vring_split->avail_idx_shadow = 0;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!vq->vq.callback) {
+ vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
+ vring_split->avail_flags_shadow);
+ }
+}
+
+static void virtqueue_reinit_split(struct vring_virtqueue *vq)
+{
+ int num;
+
+ num = vq->split.vring.num;
+
+ vq->split.vring.avail->flags = 0;
+ vq->split.vring.avail->idx = 0;
+
+ /* reset avail event */
+ vq->split.vring.avail->ring[num] = 0;
+
+ vq->split.vring.used->flags = 0;
+ vq->split.vring.used->idx = 0;
+
+ /* reset used event */
+ *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
+
+ virtqueue_init(vq, num);
+
+ virtqueue_vring_init_split(&vq->split, vq);
+}
+
+static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
+ struct vring_virtqueue_split *vring_split)
+{
+ vq->split = *vring_split;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
+{
+ struct vring_desc_state_split *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_split->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
+ if (!state)
+ goto err_state;
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_extra;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_split));
+
+ vring_split->desc_state = state;
+ vring_split->desc_extra = extra;
+ return 0;
+
+err_extra:
+ kfree(state);
+err_state:
+ return -ENOMEM;
+}
+
+static void vring_free_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev)
+{
+ vring_free_queue(vdev, vring_split->queue_size_in_bytes,
+ vring_split->vring.desc,
+ vring_split->queue_dma_addr);
+
+ kfree(vring_split->desc_state);
+ kfree(vring_split->desc_extra);
+}
+
+static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ u32 num,
+ unsigned int vring_align,
+ bool may_reduce_num)
{
- struct virtqueue *vq;
void *queue = NULL;
dma_addr_t dma_addr;
- size_t queue_size_in_bytes;
- struct vring vring;
/* We assume num is a power of 2. */
if (num & (num - 1)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
- return NULL;
+ return -EINVAL;
}
/* TODO: allocate each queue chunk individually */
@@ -939,11 +1057,11 @@ static struct virtqueue *vring_create_virtqueue_split(
if (queue)
break;
if (!may_reduce_num)
- return NULL;
+ return -ENOMEM;
}
if (!num)
- return NULL;
+ return -ENOMEM;
if (!queue) {
/* Try to get a single page. You are my only hope! */
@@ -951,26 +1069,85 @@ static struct virtqueue *vring_create_virtqueue_split(
&dma_addr, GFP_KERNEL|__GFP_ZERO);
}
if (!queue)
- return NULL;
+ return -ENOMEM;
+
+ vring_init(&vring_split->vring, num, queue, vring_align);
+
+ vring_split->queue_dma_addr = dma_addr;
+ vring_split->queue_size_in_bytes = vring_size(num, vring_align);
+
+ vring_split->vring_align = vring_align;
+ vring_split->may_reduce_num = may_reduce_num;
+
+ return 0;
+}
+
+static struct virtqueue *vring_create_virtqueue_split(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct virtqueue *vq;
+ int err;
- queue_size_in_bytes = vring_size(num, vring_align);
- vring_init(&vring, num, queue, vring_align);
+ err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
+ may_reduce_num);
+ if (err)
+ return NULL;
- vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
if (!vq) {
- vring_free_queue(vdev, queue_size_in_bytes, queue,
- dma_addr);
+ vring_free_split(&vring_split, vdev);
return NULL;
}
- to_vvq(vq)->split.queue_dma_addr = dma_addr;
- to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
to_vvq(vq)->we_own_ring = true;
return vq;
}
+static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ err = vring_alloc_queue_split(&vring_split, vdev, num,
+ vq->split.vring_align,
+ vq->split.may_reduce_num);
+ if (err)
+ goto err;
+
+ err = vring_alloc_state_extra_split(&vring_split);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_split(&vring_split, vq);
+
+ virtqueue_init(vq, vring_split.vring.num);
+ virtqueue_vring_attach_split(vq, &vring_split);
+
+ return 0;
+
+err_state_extra:
+ vring_free_split(&vring_split, vdev);
+err:
+ virtqueue_reinit_split(vq);
+ return -ENOMEM;
+}
+
/*
* Packed ring specific functions - *_packed().
@@ -1637,8 +1814,7 @@ static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
return NULL;
}
-static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
- unsigned int num)
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
{
struct vring_desc_extra *desc_extra;
unsigned int i;
@@ -1656,19 +1832,32 @@ static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *v
return desc_extra;
}
-static struct virtqueue *vring_create_virtqueue_packed(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev)
+{
+ if (vring_packed->vring.desc)
+ vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
+ vring_packed->vring.desc,
+ vring_packed->ring_dma_addr);
+
+ if (vring_packed->vring.driver)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.driver,
+ vring_packed->driver_event_dma_addr);
+
+ if (vring_packed->vring.device)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.device,
+ vring_packed->device_event_dma_addr);
+
+ kfree(vring_packed->desc_state);
+ kfree(vring_packed->desc_extra);
+}
+
+static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev,
+ u32 num)
{
- struct vring_virtqueue *vq;
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
@@ -1680,7 +1869,11 @@ static struct virtqueue *vring_create_virtqueue_packed(
&ring_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!ring)
- goto err_ring;
+ goto err;
+
+ vring_packed->vring.desc = ring;
+ vring_packed->ring_dma_addr = ring_dma_addr;
+ vring_packed->ring_size_in_bytes = ring_size_in_bytes;
event_size_in_bytes = sizeof(struct vring_packed_desc_event);
@@ -1688,13 +1881,112 @@ static struct virtqueue *vring_create_virtqueue_packed(
&driver_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!driver)
- goto err_driver;
+ goto err;
+
+ vring_packed->vring.driver = driver;
+ vring_packed->event_size_in_bytes = event_size_in_bytes;
+ vring_packed->driver_event_dma_addr = driver_event_dma_addr;
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!device)
- goto err_device;
+ goto err;
+
+ vring_packed->vring.device = device;
+ vring_packed->device_event_dma_addr = device_event_dma_addr;
+
+ vring_packed->vring.num = num;
+
+ return 0;
+
+err:
+ vring_free_packed(vring_packed, vdev);
+ return -ENOMEM;
+}
+
+static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
+{
+ struct vring_desc_state_packed *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_packed->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
+ if (!state)
+ goto err_desc_state;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_packed));
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_desc_extra;
+
+ vring_packed->desc_state = state;
+ vring_packed->desc_extra = extra;
+
+ return 0;
+
+err_desc_extra:
+ kfree(state);
+err_desc_state:
+ return -ENOMEM;
+}
+
+static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
+ bool callback)
+{
+ vring_packed->next_avail_idx = 0;
+ vring_packed->avail_wrap_counter = 1;
+ vring_packed->event_flags_shadow = 0;
+ vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!callback) {
+ vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+ vring_packed->vring.driver->flags =
+ cpu_to_le16(vring_packed->event_flags_shadow);
+ }
+}
+
+static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
+ struct vring_virtqueue_packed *vring_packed)
+{
+ vq->packed = *vring_packed;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
+{
+ memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
+ memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
+
+ /* we need to reset the desc.flags. For more, see is_used_desc_packed() */
+ memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
+
+ virtqueue_init(vq, vq->packed.vring.num);
+ virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
+}
+
+static struct virtqueue *vring_create_virtqueue_packed(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
@@ -1703,8 +1995,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = true;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -1713,15 +2005,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
- vq->event_triggered = false;
- vq->num_added = 0;
vq->packed_ring = true;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -1730,65 +2015,58 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->packed.ring_dma_addr = ring_dma_addr;
- vq->packed.driver_event_dma_addr = driver_event_dma_addr;
- vq->packed.device_event_dma_addr = device_event_dma_addr;
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
- vq->packed.ring_size_in_bytes = ring_size_in_bytes;
- vq->packed.event_size_in_bytes = event_size_in_bytes;
+ virtqueue_vring_init_packed(&vring_packed, !!callback);
- vq->packed.vring.num = num;
- vq->packed.vring.desc = ring;
- vq->packed.vring.driver = driver;
- vq->packed.vring.device = device;
-
- vq->packed.next_avail_idx = 0;
- vq->packed.avail_wrap_counter = 1;
- vq->packed.event_flags_shadow = 0;
- vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
-
- vq->packed.desc_state = kmalloc_array(num,
- sizeof(struct vring_desc_state_packed),
- GFP_KERNEL);
- if (!vq->packed.desc_state)
- goto err_desc_state;
-
- memset(vq->packed.desc_state, 0,
- num * sizeof(struct vring_desc_state_packed));
-
- /* Put everything in free lists. */
- vq->free_head = 0;
-
- vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
- if (!vq->packed.desc_extra)
- goto err_desc_extra;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
- vq->packed.vring.driver->flags =
- cpu_to_le16(vq->packed.event_flags_shadow);
- }
+ virtqueue_init(vq, num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-err_desc_extra:
- kfree(vq->packed.desc_state);
-err_desc_state:
+err_state_extra:
kfree(vq);
err_vq:
- vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
-err_device:
- vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
-err_driver:
- vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
+ vring_free_packed(&vring_packed, vdev);
err_ring:
return NULL;
}
+static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
+
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
+
+ virtqueue_init(vq, vring_packed.vring.num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
+
+ return 0;
+
+err_state_extra:
+ vring_free_packed(&vring_packed, vdev);
+err_ring:
+ virtqueue_reinit_packed(vq);
+ return -ENOMEM;
+}
+
/*
* Generic functions and exported symbols.
@@ -2131,8 +2409,8 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
* @_vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
- * This is not valid on an active queue; it is useful only for device
- * shutdown.
+ * This is not valid on an active queue; it is useful for device
+ * shutdown or the reset queue.
*/
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
@@ -2148,6 +2426,14 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
}
+/**
+ * vring_interrupt - notify a virtqueue on an interrupt
+ * @irq: the IRQ number (ignored)
+ * @_vq: the struct virtqueue to notify
+ *
+ * Calls the callback function of @_vq to process the virtqueue
+ * notification.
+ */
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -2180,16 +2466,17 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
EXPORT_SYMBOL_GPL(vring_interrupt);
/* Only available for split ring */
-struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring vring,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
{
struct vring_virtqueue *vq;
+ int err;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
@@ -2202,8 +2489,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = vring.num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -2212,14 +2499,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0;
- vq->event_triggered = false;
- vq->num_added = 0;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2228,47 +2508,22 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->split.queue_dma_addr = 0;
- vq->split.queue_size_in_bytes = 0;
-
- vq->split.vring = vring;
- vq->split.avail_flags_shadow = 0;
- vq->split.avail_idx_shadow = 0;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- if (!vq->event)
- vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
- vq->split.avail_flags_shadow);
+ err = vring_alloc_state_extra_split(vring_split);
+ if (err) {
+ kfree(vq);
+ return NULL;
}
- vq->split.desc_state = kmalloc_array(vring.num,
- sizeof(struct vring_desc_state_split), GFP_KERNEL);
- if (!vq->split.desc_state)
- goto err_state;
-
- vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
- if (!vq->split.desc_extra)
- goto err_extra;
+ virtqueue_vring_init_split(vring_split, vq);
- /* Put everything in free lists. */
- vq->free_head = 0;
- memset(vq->split.desc_state, 0, vring.num *
- sizeof(struct vring_desc_state_split));
+ virtqueue_init(vq, vring_split->vring.num);
+ virtqueue_vring_attach_split(vq, vring_split);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-
-err_extra:
- kfree(vq->split.desc_state);
-err_state:
- kfree(vq);
- return NULL;
}
-EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
struct virtqueue *vring_create_virtqueue(
unsigned int index,
@@ -2294,6 +2549,75 @@ struct virtqueue *vring_create_virtqueue(
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+/**
+ * virtqueue_resize - resize the vring of vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @num: new ring num
+ * @recycle: callback for recycle the useless buffer
+ *
+ * When it is really necessary to create a new vring, it will set the current vq
+ * into the reset state. Then call the passed callback to recycle the buffer
+ * that is no longer used. Only after the new vring is successfully created, the
+ * old vring will be released.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
+ * vq can still work normally
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -E2BIG/-EINVAL: num error
+ * -EPERM: Operation not permitted
+ *
+ */
+int virtqueue_resize(struct virtqueue *_vq, u32 num,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+ void *buf;
+ int err;
+
+ if (!vq->we_own_ring)
+ return -EPERM;
+
+ if (num > vq->vq.num_max)
+ return -E2BIG;
+
+ if (!num)
+ return -EINVAL;
+
+ if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
+ return 0;
+
+ if (!vdev->config->disable_vq_and_reset)
+ return -ENOENT;
+
+ if (!vdev->config->enable_vq_after_reset)
+ return -ENOENT;
+
+ err = vdev->config->disable_vq_and_reset(_vq);
+ if (err)
+ return err;
+
+ while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+ recycle(_vq, buf);
+
+ if (vq->packed_ring)
+ err = virtqueue_resize_packed(_vq, num);
+ else
+ err = virtqueue_resize_split(_vq, num);
+
+ if (vdev->config->enable_vq_after_reset(_vq))
+ return -EBUSY;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtqueue_resize);
+
/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
@@ -2306,25 +2630,21 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name)
{
- struct vring vring;
+ struct vring_virtqueue_split vring_split = {};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
- vring_init(&vring, num, pages, vring_align);
- return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vring_init(&vring_split.vring, num, pages, vring_align);
+ return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
-void vring_del_virtqueue(struct virtqueue *_vq)
+static void vring_free(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- spin_lock(&vq->vq.vdev->vqs_list_lock);
- list_del(&_vq->list);
- spin_unlock(&vq->vq.vdev->vqs_list_lock);
-
if (vq->we_own_ring) {
if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev,
@@ -2355,6 +2675,18 @@ void vring_del_virtqueue(struct virtqueue *_vq)
kfree(vq->split.desc_state);
kfree(vq->split.desc_extra);
}
+}
+
+void vring_del_virtqueue(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ spin_lock(&vq->vq.vdev->vqs_list_lock);
+ list_del(&_vq->list);
+ spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
+ vring_free(_vq);
+
kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2402,6 +2734,30 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_break(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, true);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_break);
+
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_unbreak(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, false);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
+
bool virtqueue_is_broken(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index c40f7deb6b5a..9670cc79371d 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -183,6 +183,8 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto error_new_virtqueue;
}
+ vq->num_max = max_num;
+
/* Setup virtqueue callback */
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
cb.private = info;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 32fd37698932..9295492d24f7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1647,6 +1647,7 @@ config SIEMENS_SIMATIC_IPC_WDT
tristate "Siemens Simatic IPC Watchdog"
depends on SIEMENS_SIMATIC_IPC
select WATCHDOG_CORE
+ select P2SB
help
This driver adds support for several watchdogs found in Industrial
PCs from Siemens.
@@ -1962,6 +1963,14 @@ config MEN_A21_WDT
# PPC64 Architecture
+config PSERIES_WDT
+ tristate "POWER Architecture Platform Watchdog Timer"
+ depends on PPC_PSERIES
+ select WATCHDOG_CORE
+ help
+ Driver for virtual watchdog timers provided by PAPR
+ hypervisors (e.g. PowerVM, KVM).
+
config WATCHDOG_RTAS
tristate "RTAS watchdog"
depends on PPC_RTAS
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c324e9d820e9..cdeb119e6e61 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -187,6 +187,7 @@ obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
obj-$(CONFIG_MEN_A21_WDT) += mena21_wdt.o
# PPC64 Architecture
+obj-$(CONFIG_PSERIES_WDT) += pseries-wdt.o
obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
# S390 Architecture
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index 1635f421ef2c..854b1cc723cb 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -274,6 +274,8 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
dev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!dev->reg)
+ return -ENOMEM;
/* init clock */
dev->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index 1ffcf6aca6ae..9388838899ac 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -192,7 +192,6 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int bcm7038_wdt_suspend(struct device *dev)
{
struct bcm7038_watchdog *wdt = dev_get_drvdata(dev);
@@ -212,10 +211,9 @@ static int bcm7038_wdt_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops, bcm7038_wdt_suspend,
- bcm7038_wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops,
+ bcm7038_wdt_suspend, bcm7038_wdt_resume);
static const struct of_device_id bcm7038_wdt_match[] = {
{ .compatible = "brcm,bcm6345-wdt" },
@@ -236,7 +234,7 @@ static struct platform_driver bcm7038_wdt_driver = {
.driver = {
.name = "bcm7038-wdt",
.of_match_table = bcm7038_wdt_match,
- .pm = &bcm7038_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&bcm7038_wdt_pm_ops),
}
};
module_platform_driver(bcm7038_wdt_driver);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 5e4dc1a0f2c6..75da5cd02615 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -74,7 +74,7 @@ static unsigned long long period_to_sec(unsigned int period)
/*
* This procedure will find the highest period which will give a timeout
* greater than the one required. e.g. for a bus speed of 66666666 and
- * and a parameter of 2 secs, then this procedure will return a value of 38.
+ * a parameter of 2 secs, then this procedure will return a value of 38.
*/
static unsigned int sec_to_period(unsigned int secs)
{
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index cd578843277e..52962e8d11a6 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -218,7 +218,7 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
/*
* Set the new value in the watchdog. Some versions of dw_wdt
- * have have TOPINIT in the TIMEOUT_RANGE register (as per
+ * have TOPINIT in the TIMEOUT_RANGE register (as per
* CP_WDT_DUAL_TOP in WDT_COMP_PARAMS_1). On those we
* effectively get a pat of the watchdog right here.
*/
@@ -375,7 +375,6 @@ static irqreturn_t dw_wdt_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
static int dw_wdt_suspend(struct device *dev)
{
struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
@@ -410,9 +409,8 @@ static int dw_wdt_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
/*
* In case if DW WDT IP core is synthesized with fixed TOP feature disabled the
@@ -710,7 +708,7 @@ static struct platform_driver dw_wdt_driver = {
.driver = {
.name = "dw_wdt",
.of_match_table = of_match_ptr(dw_wdt_of_match),
- .pm = &dw_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&dw_wdt_pm_ops),
},
};
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 7f59c680de25..6a16d3d0bb1e 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -634,7 +634,9 @@ static int __init fintek_wdt_init(void)
pdata.type = ret;
- platform_driver_register(&fintek_wdt_driver);
+ ret = platform_driver_register(&fintek_wdt_driver);
+ if (ret)
+ return ret;
wdt_res.name = "superio port";
wdt_res.flags = IORESOURCE_IO;
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index b76ad6ba0915..33835c0b06de 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -6,7 +6,7 @@
* Copyright (C) 2022 Luca Ceresoli
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/err.h>
@@ -260,5 +260,5 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index f0d4e3cc7459..e97787536792 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -401,7 +401,6 @@ static int mtk_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int mtk_wdt_suspend(struct device *dev)
{
struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev);
@@ -423,7 +422,6 @@ static int mtk_wdt_resume(struct device *dev)
return 0;
}
-#endif
static const struct of_device_id mtk_wdt_dt_ids[] = {
{ .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data },
@@ -437,16 +435,14 @@ static const struct of_device_id mtk_wdt_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
-static const struct dev_pm_ops mtk_wdt_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mtk_wdt_suspend,
- mtk_wdt_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(mtk_wdt_pm_ops,
+ mtk_wdt_suspend, mtk_wdt_resume);
static struct platform_driver mtk_wdt_driver = {
.probe = mtk_wdt_probe,
.driver = {
.name = DRV_NAME,
- .pm = &mtk_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_wdt_pm_ops),
.of_match_table = mtk_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 9f9a340427fc..c7f745caf203 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -442,7 +442,7 @@ static long pc87413_ioctl(struct file *file, unsigned int cmd,
}
}
-/* -- Notifier funtions -----------------------------------------*/
+/* -- Notifier functions -----------------------------------------*/
/**
* pc87413_notify_sys:
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index 0937b8d33104..f4bfbffaf49c 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -9,6 +9,12 @@
#include <linux/regmap.h>
#include <linux/watchdog.h>
+#define PON_POFF_REASON1 0x0c
+#define PON_POFF_REASON1_PMIC_WD BIT(2)
+#define PON_POFF_REASON2 0x0d
+#define PON_POFF_REASON2_UVLO BIT(5)
+#define PON_POFF_REASON2_OTST3 BIT(6)
+
#define PON_INT_RT_STS 0x10
#define PMIC_WD_BARK_STS_BIT BIT(6)
@@ -58,9 +64,8 @@ static int pm8916_wdt_ping(struct watchdog_device *wdev)
{
struct pm8916_wdt *wdt = watchdog_get_drvdata(wdev);
- return regmap_update_bits(wdt->regmap,
- wdt->baseaddr + PON_PMIC_WD_RESET_PET,
- WATCHDOG_PET_BIT, WATCHDOG_PET_BIT);
+ return regmap_write(wdt->regmap, wdt->baseaddr + PON_PMIC_WD_RESET_PET,
+ WATCHDOG_PET_BIT);
}
static int pm8916_wdt_configure_timers(struct watchdog_device *wdev)
@@ -111,12 +116,14 @@ static irqreturn_t pm8916_wdt_isr(int irq, void *arg)
}
static const struct watchdog_info pm8916_wdt_ident = {
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE |
+ WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_POWERUNDER,
.identity = "QCOM PM8916 PON WDT",
};
static const struct watchdog_info pm8916_wdt_pt_ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE |
+ WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_POWERUNDER |
WDIOF_PRETIMEOUT,
.identity = "QCOM PM8916 PON WDT",
};
@@ -135,7 +142,9 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pm8916_wdt *wdt;
struct device *parent;
+ unsigned int val;
int err, irq;
+ u8 poff[2];
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
@@ -176,6 +185,30 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
wdt->wdev.info = &pm8916_wdt_ident;
}
+ err = regmap_bulk_read(wdt->regmap, wdt->baseaddr + PON_POFF_REASON1,
+ &poff, ARRAY_SIZE(poff));
+ if (err) {
+ dev_err(dev, "failed to read POFF reason: %d\n", err);
+ return err;
+ }
+
+ dev_dbg(dev, "POFF reason: %#x %#x\n", poff[0], poff[1]);
+ if (poff[0] & PON_POFF_REASON1_PMIC_WD)
+ wdt->wdev.bootstatus |= WDIOF_CARDRESET;
+ if (poff[1] & PON_POFF_REASON2_UVLO)
+ wdt->wdev.bootstatus |= WDIOF_POWERUNDER;
+ if (poff[1] & PON_POFF_REASON2_OTST3)
+ wdt->wdev.bootstatus |= WDIOF_OVERHEAT;
+
+ err = regmap_read(wdt->regmap, wdt->baseaddr + PON_PMIC_WD_RESET_S2_CTL2,
+ &val);
+ if (err) {
+ dev_err(dev, "failed to check if watchdog is active: %d\n", err);
+ return err;
+ }
+ if (val & S2_RESET_EN_BIT)
+ set_bit(WDOG_HW_RUNNING, &wdt->wdev.status);
+
/* Configure watchdog to hard-reset mode */
err = regmap_write(wdt->regmap,
wdt->baseaddr + PON_PMIC_WD_RESET_S2_CTL,
diff --git a/drivers/watchdog/pseries-wdt.c b/drivers/watchdog/pseries-wdt.c
new file mode 100644
index 000000000000..7f53b5293409
--- /dev/null
+++ b/drivers/watchdog/pseries-wdt.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022 International Business Machines, Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/time64.h>
+#include <linux/watchdog.h>
+
+#define DRV_NAME "pseries-wdt"
+
+/*
+ * H_WATCHDOG Input
+ *
+ * R4: "flags":
+ *
+ * Bits 48-55: "operation"
+ */
+#define PSERIES_WDTF_OP_START 0x100UL /* start timer */
+#define PSERIES_WDTF_OP_STOP 0x200UL /* stop timer */
+#define PSERIES_WDTF_OP_QUERY 0x300UL /* query timer capabilities */
+
+/*
+ * Bits 56-63: "timeoutAction" (for "Start Watchdog" only)
+ */
+#define PSERIES_WDTF_ACTION_HARD_POWEROFF 0x1UL /* poweroff */
+#define PSERIES_WDTF_ACTION_HARD_RESTART 0x2UL /* restart */
+#define PSERIES_WDTF_ACTION_DUMP_RESTART 0x3UL /* dump + restart */
+
+/*
+ * H_WATCHDOG Output
+ *
+ * R3: Return code
+ *
+ * H_SUCCESS The operation completed.
+ *
+ * H_BUSY The hypervisor is too busy; retry the operation.
+ *
+ * H_PARAMETER The given "flags" are somehow invalid. Either the
+ * "operation" or "timeoutAction" is invalid, or a
+ * reserved bit is set.
+ *
+ * H_P2 The given "watchdogNumber" is zero or exceeds the
+ * supported maximum value.
+ *
+ * H_P3 The given "timeoutInMs" is below the supported
+ * minimum value.
+ *
+ * H_NOOP The given "watchdogNumber" is already stopped.
+ *
+ * H_HARDWARE The operation failed for ineffable reasons.
+ *
+ * H_FUNCTION The H_WATCHDOG hypercall is not supported by this
+ * hypervisor.
+ *
+ * R4:
+ *
+ * - For the "Query Watchdog Capabilities" operation, a 64-bit
+ * structure:
+ */
+#define PSERIES_WDTQ_MIN_TIMEOUT(cap) (((cap) >> 48) & 0xffff)
+#define PSERIES_WDTQ_MAX_NUMBER(cap) (((cap) >> 32) & 0xffff)
+
+static const unsigned long pseries_wdt_action[] = {
+ [0] = PSERIES_WDTF_ACTION_HARD_POWEROFF,
+ [1] = PSERIES_WDTF_ACTION_HARD_RESTART,
+ [2] = PSERIES_WDTF_ACTION_DUMP_RESTART,
+};
+
+#define WATCHDOG_ACTION 1
+static unsigned int action = WATCHDOG_ACTION;
+module_param(action, uint, 0444);
+MODULE_PARM_DESC(action, "Action taken when watchdog expires (default="
+ __MODULE_STRING(WATCHDOG_ACTION) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0444);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define WATCHDOG_TIMEOUT 60
+static unsigned int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, uint, 0444);
+MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds (default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+struct pseries_wdt {
+ struct watchdog_device wd;
+ unsigned long action;
+ unsigned long num; /* Watchdog numbers are 1-based */
+};
+
+static int pseries_wdt_start(struct watchdog_device *wdd)
+{
+ struct pseries_wdt *pw = watchdog_get_drvdata(wdd);
+ struct device *dev = wdd->parent;
+ unsigned long flags, msecs;
+ long rc;
+
+ flags = pw->action | PSERIES_WDTF_OP_START;
+ msecs = wdd->timeout * MSEC_PER_SEC;
+ rc = plpar_hcall_norets(H_WATCHDOG, flags, pw->num, msecs);
+ if (rc != H_SUCCESS) {
+ dev_crit(dev, "H_WATCHDOG: %ld: failed to start timer %lu",
+ rc, pw->num);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pseries_wdt_stop(struct watchdog_device *wdd)
+{
+ struct pseries_wdt *pw = watchdog_get_drvdata(wdd);
+ struct device *dev = wdd->parent;
+ long rc;
+
+ rc = plpar_hcall_norets(H_WATCHDOG, PSERIES_WDTF_OP_STOP, pw->num);
+ if (rc != H_SUCCESS && rc != H_NOOP) {
+ dev_crit(dev, "H_WATCHDOG: %ld: failed to stop timer %lu",
+ rc, pw->num);
+ return -EIO;
+ }
+ return 0;
+}
+
+static struct watchdog_info pseries_wdt_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT
+ | WDIOF_PRETIMEOUT,
+};
+
+static const struct watchdog_ops pseries_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = pseries_wdt_start,
+ .stop = pseries_wdt_stop,
+};
+
+static int pseries_wdt_probe(struct platform_device *pdev)
+{
+ unsigned long ret[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct pseries_wdt *pw;
+ unsigned long cap;
+ long msecs, rc;
+ int err;
+
+ rc = plpar_hcall(H_WATCHDOG, ret, PSERIES_WDTF_OP_QUERY);
+ if (rc == H_FUNCTION)
+ return -ENODEV;
+ if (rc != H_SUCCESS)
+ return -EIO;
+ cap = ret[0];
+
+ pw = devm_kzalloc(&pdev->dev, sizeof(*pw), GFP_KERNEL);
+ if (!pw)
+ return -ENOMEM;
+
+ /*
+ * Assume watchdogNumber 1 for now. If we ever support
+ * multiple timers we will need to devise a way to choose a
+ * distinct watchdogNumber for each platform device at device
+ * registration time.
+ */
+ pw->num = 1;
+ if (PSERIES_WDTQ_MAX_NUMBER(cap) < pw->num)
+ return -ENODEV;
+
+ if (action >= ARRAY_SIZE(pseries_wdt_action))
+ return -EINVAL;
+ pw->action = pseries_wdt_action[action];
+
+ pw->wd.parent = &pdev->dev;
+ pw->wd.info = &pseries_wdt_info;
+ pw->wd.ops = &pseries_wdt_ops;
+ msecs = PSERIES_WDTQ_MIN_TIMEOUT(cap);
+ pw->wd.min_timeout = DIV_ROUND_UP(msecs, MSEC_PER_SEC);
+ pw->wd.max_timeout = UINT_MAX / 1000; /* from linux/watchdog.h */
+ pw->wd.timeout = timeout;
+ if (watchdog_init_timeout(&pw->wd, 0, NULL))
+ return -EINVAL;
+ watchdog_set_nowayout(&pw->wd, nowayout);
+ watchdog_stop_on_reboot(&pw->wd);
+ watchdog_stop_on_unregister(&pw->wd);
+ watchdog_set_drvdata(&pw->wd, pw);
+
+ err = devm_watchdog_register_device(&pdev->dev, &pw->wd);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, &pw->wd);
+
+ return 0;
+}
+
+static int pseries_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct watchdog_device *wd = platform_get_drvdata(pdev);
+
+ if (watchdog_active(wd))
+ return pseries_wdt_stop(wd);
+ return 0;
+}
+
+static int pseries_wdt_resume(struct platform_device *pdev)
+{
+ struct watchdog_device *wd = platform_get_drvdata(pdev);
+
+ if (watchdog_active(wd))
+ return pseries_wdt_start(wd);
+ return 0;
+}
+
+static const struct platform_device_id pseries_wdt_id[] = {
+ { .name = "pseries-wdt" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, pseries_wdt_id);
+
+static struct platform_driver pseries_wdt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .id_table = pseries_wdt_id,
+ .probe = pseries_wdt_probe,
+ .resume = pseries_wdt_resume,
+ .suspend = pseries_wdt_suspend,
+};
+module_platform_driver(pseries_wdt_driver);
+
+MODULE_AUTHOR("Alexey Kardashevskiy");
+MODULE_AUTHOR("Scott Cheloha");
+MODULE_DESCRIPTION("POWER Architecture Platform Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/realtek_otto_wdt.c b/drivers/watchdog/realtek_otto_wdt.c
index 60058a0c3ec4..2a5298c5e8e4 100644
--- a/drivers/watchdog/realtek_otto_wdt.c
+++ b/drivers/watchdog/realtek_otto_wdt.c
@@ -366,6 +366,7 @@ static const struct of_device_id otto_wdt_ids[] = {
{ .compatible = "realtek,rtl8380-wdt" },
{ .compatible = "realtek,rtl8390-wdt" },
{ .compatible = "realtek,rtl9300-wdt" },
+ { .compatible = "realtek,rtl9310-wdt" },
{ }
};
MODULE_DEVICE_TABLE(of, otto_wdt_ids);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 6db22f2e3a4f..95919392927f 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -845,8 +845,6 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
s3c2410wdt_stop(&wdt->wdt_device);
}
-#ifdef CONFIG_PM_SLEEP
-
static int s3c2410wdt_suspend(struct device *dev)
{
int ret;
@@ -885,10 +883,9 @@ static int s3c2410wdt_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops, s3c2410wdt_suspend,
- s3c2410wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops,
+ s3c2410wdt_suspend, s3c2410wdt_resume);
static struct platform_driver s3c2410wdt_driver = {
.probe = s3c2410wdt_probe,
@@ -897,7 +894,7 @@ static struct platform_driver s3c2410wdt_driver = {
.id_table = s3c2410_wdt_ids,
.driver = {
.name = "s3c2410-wdt",
- .pm = &s3c2410wdt_pm_ops,
+ .pm = pm_sleep_ptr(&s3c2410wdt_pm_ops),
.of_match_table = of_match_ptr(s3c2410_wdt_match),
},
};
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index ec20ad4e534f..aeee934ca51b 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -339,7 +339,6 @@ static const struct of_device_id sama5d4_wdt_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
-#ifdef CONFIG_PM_SLEEP
static int sama5d4_wdt_suspend_late(struct device *dev)
{
struct sama5d4_wdt *wdt = dev_get_drvdata(dev);
@@ -366,18 +365,17 @@ static int sama5d4_wdt_resume_early(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sama5d4_wdt_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(sama5d4_wdt_suspend_late,
- sama5d4_wdt_resume_early)
+ LATE_SYSTEM_SLEEP_PM_OPS(sama5d4_wdt_suspend_late,
+ sama5d4_wdt_resume_early)
};
static struct platform_driver sama5d4_wdt_driver = {
.probe = sama5d4_wdt_probe,
.driver = {
.name = "sama5d4_wdt",
- .pm = &sama5d4_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&sama5d4_wdt_pm_ops),
.of_match_table = sama5d4_wdt_of_match,
}
};
diff --git a/drivers/watchdog/simatic-ipc-wdt.c b/drivers/watchdog/simatic-ipc-wdt.c
index 8bac793c63fb..6599695dc672 100644
--- a/drivers/watchdog/simatic-ipc-wdt.c
+++ b/drivers/watchdog/simatic-ipc-wdt.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_data/x86/p2sb.h>
#include <linux/platform_data/x86/simatic-ipc-base.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
@@ -54,9 +55,9 @@ static struct resource io_resource_trigger =
DEFINE_RES_IO_NAMED(WD_TRIGGER_IOADR, SZ_1,
KBUILD_MODNAME " WD_TRIGGER_IOADR");
-/* the actual start will be discovered with pci, 0 is a placeholder */
+/* the actual start will be discovered with p2sb, 0 is a placeholder */
static struct resource mem_resource =
- DEFINE_RES_MEM_NAMED(0, SZ_4, "WD_RESET_BASE_ADR");
+ DEFINE_RES_MEM_NAMED(0, 0, "WD_RESET_BASE_ADR");
static u32 wd_timeout_table[] = {2, 4, 6, 8, 16, 32, 48, 64 };
static void __iomem *wd_reset_base_addr;
@@ -150,6 +151,7 @@ static int simatic_ipc_wdt_probe(struct platform_device *pdev)
struct simatic_ipc_platform *plat = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct resource *res;
+ int ret;
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_227E:
@@ -190,15 +192,14 @@ static int simatic_ipc_wdt_probe(struct platform_device *pdev)
if (plat->devmode == SIMATIC_IPC_DEVICE_427E) {
res = &mem_resource;
- /* get GPIO base from PCI */
- res->start = simatic_ipc_get_membase0(PCI_DEVFN(0x1f, 1));
- if (res->start == 0)
- return -ENODEV;
+ ret = p2sb_bar(NULL, 0, res);
+ if (ret)
+ return ret;
/* do the final address calculation */
res->start = res->start + (GPIO_COMMUNITY0_PORT_ID << 16) +
PAD_CFG_DW0_GPP_A_23;
- res->end += res->start;
+ res->end = res->start + SZ_4 - 1;
wd_reset_base_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(wd_reset_base_addr))
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 86ffb58fbc85..ae54dd33e233 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -402,6 +402,7 @@ out:
iounmap(addr);
release_resource(res);
+ kfree(res);
return ret;
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index f9479a3fe2a6..78ba36689eec 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/char/watchdog/sp805-wdt.c
*
@@ -341,6 +342,10 @@ static const struct amba_id sp805_wdt_ids[] = {
.id = 0x00141805,
.mask = 0x00ffffff,
},
+ {
+ .id = 0x001bb824,
+ .mask = 0x00ffffff,
+ },
{ 0, 0 },
};
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 14ab6559c748..39abecdb9dd1 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -248,7 +248,6 @@ static int st_wdog_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int st_wdog_suspend(struct device *dev)
{
struct st_wdog *st_wdog = watchdog_get_drvdata(&st_wdog_dev);
@@ -285,16 +284,14 @@ static int st_wdog_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(st_wdog_pm_ops,
- st_wdog_suspend,
- st_wdog_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_wdog_pm_ops,
+ st_wdog_suspend, st_wdog_resume);
static struct platform_driver st_wdog_driver = {
.driver = {
.name = "st-lpc-wdt",
- .pm = &st_wdog_pm_ops,
+ .pm = pm_sleep_ptr(&st_wdog_pm_ops),
.of_match_table = st_wdog_match,
},
.probe = st_wdog_probe,
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index dfe06e506cad..d5de6c0657a5 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -230,8 +230,7 @@ static int tegra_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tegra_wdt_runtime_suspend(struct device *dev)
+static int tegra_wdt_suspend(struct device *dev)
{
struct tegra_wdt *wdt = dev_get_drvdata(dev);
@@ -241,7 +240,7 @@ static int tegra_wdt_runtime_suspend(struct device *dev)
return 0;
}
-static int tegra_wdt_runtime_resume(struct device *dev)
+static int tegra_wdt_resume(struct device *dev)
{
struct tegra_wdt *wdt = dev_get_drvdata(dev);
@@ -250,7 +249,6 @@ static int tegra_wdt_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct of_device_id tegra_wdt_of_match[] = {
{ .compatible = "nvidia,tegra30-timer", },
@@ -258,16 +256,14 @@ static const struct of_device_id tegra_wdt_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_wdt_of_match);
-static const struct dev_pm_ops tegra_wdt_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_wdt_runtime_suspend,
- tegra_wdt_runtime_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(tegra_wdt_pm_ops,
+ tegra_wdt_suspend, tegra_wdt_resume);
static struct platform_driver tegra_wdt_driver = {
.probe = tegra_wdt_probe,
.driver = {
.name = "tegra-wdt",
- .pm = &tegra_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&tegra_wdt_pm_ops),
.of_match_table = tegra_wdt_of_match,
},
};
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e6f95e99156d..aeadaa07c891 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -467,7 +467,6 @@ static int wdat_wdt_probe(struct platform_device *pdev)
return devm_watchdog_register_device(dev, &wdat->wdd);
}
-#ifdef CONFIG_PM_SLEEP
static int wdat_wdt_suspend_noirq(struct device *dev)
{
struct wdat_wdt *wdat = dev_get_drvdata(dev);
@@ -528,18 +527,16 @@ static int wdat_wdt_resume_noirq(struct device *dev)
return wdat_wdt_start(&wdat->wdd);
}
-#endif
static const struct dev_pm_ops wdat_wdt_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(wdat_wdt_suspend_noirq,
- wdat_wdt_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(wdat_wdt_suspend_noirq, wdat_wdt_resume_noirq)
};
static struct platform_driver wdat_wdt_driver = {
.probe = wdat_wdt_probe,
.driver = {
.name = "wdat_wdt",
- .pm = &wdat_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&wdat_wdt_pm_ops),
},
};
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 5e8321f43cbd..c443f04aaad7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -45,6 +45,7 @@
#include <asm/irq.h>
#include <asm/io_apic.h>
#include <asm/i8259.h>
+#include <asm/xen/cpuid.h>
#include <asm/xen/pci.h>
#endif
#include <asm/sync_bitops.h>
@@ -2184,6 +2185,7 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
.irq_ack = ack_dynirq,
};
+#ifdef CONFIG_X86
#ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event
* channel notifications because we can receive vector callbacks on any
@@ -2196,11 +2198,48 @@ void xen_setup_callback_vector(void)
callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
if (xen_set_callback_via(callback_via)) {
pr_err("Request for Xen HVM callback vector failed\n");
- xen_have_vector_callback = 0;
+ xen_have_vector_callback = false;
}
}
}
+/*
+ * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
+ * fallback to the global vector-type callback.
+ */
+static __init void xen_init_setup_upcall_vector(void)
+{
+ if (!xen_have_vector_callback)
+ return;
+
+ if ((cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_UPCALL_VECTOR) &&
+ !xen_set_upcall_vector(0))
+ xen_percpu_upcall = true;
+ else if (xen_feature(XENFEAT_hvm_callback_vector))
+ xen_setup_callback_vector();
+ else
+ xen_have_vector_callback = false;
+}
+
+int xen_set_upcall_vector(unsigned int cpu)
+{
+ int rc;
+ xen_hvm_evtchn_upcall_vector_t op = {
+ .vector = HYPERVISOR_CALLBACK_VECTOR,
+ .vcpu = per_cpu(xen_vcpu_id, cpu),
+ };
+
+ rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &op);
+ if (rc)
+ return rc;
+
+ /* Trick toolstack to think we are enlightened. */
+ if (!cpu)
+ rc = xen_set_callback_via(1);
+
+ return rc;
+}
+
static __init void xen_alloc_callback_vector(void)
{
if (!xen_have_vector_callback)
@@ -2211,8 +2250,11 @@ static __init void xen_alloc_callback_vector(void)
}
#else
void xen_setup_callback_vector(void) {}
+static inline void xen_init_setup_upcall_vector(void) {}
+int xen_set_upcall_vector(unsigned int cpu) {}
static inline void xen_alloc_callback_vector(void) {}
-#endif
+#endif /* CONFIG_XEN_PVHVM */
+#endif /* CONFIG_X86 */
bool xen_fifo_events = true;
module_param_named(fifo_events, xen_fifo_events, bool, 0);
@@ -2272,10 +2314,9 @@ void __init xen_init_IRQ(void)
if (xen_initial_domain())
pci_xen_initial_domain();
}
- if (xen_feature(XENFEAT_hvm_callback_vector)) {
- xen_setup_callback_vector();
- xen_alloc_callback_vector();
- }
+ xen_init_setup_upcall_vector();
+ xen_alloc_callback_vector();
+
if (xen_hvm_domain()) {
native_init_IRQ();
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 3369734108af..e88e8f6f0a33 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -581,27 +581,30 @@ static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num,
struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{
- unsigned int i;
+ unsigned int i, off = 0;
- for (i = 0; i < num; i++) {
+ for (i = 0; i < num; ) {
unsigned int requested;
int page_count;
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
- PAGE_SIZE);
+ PAGE_SIZE) - off;
if (requested > nr_pages)
return -ENOSPC;
page_count = pin_user_pages_fast(
- (unsigned long) kbufs[i].uptr,
+ (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
requested, FOLL_WRITE, pages);
- if (page_count < 0)
- return page_count;
+ if (page_count <= 0)
+ return page_count ? : -EFAULT;
*pinned += page_count;
nr_pages -= page_count;
pages += page_count;
+
+ off = (requested == page_count) ? 0 : off + page_count;
+ i += !off;
}
return 0;
@@ -677,10 +680,8 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
}
rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
- if (rc < 0) {
- nr_pages = pinned;
+ if (rc < 0)
goto out;
- }
for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
@@ -692,7 +693,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
xen_preemptible_hcall_end();
out:
- unlock_pages(pages, nr_pages);
+ unlock_pages(pages, pinned);
kfree(xbufs);
kfree(pages);
kfree(kbufs);
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 3fbc21466a93..84e014490950 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -159,7 +159,7 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
return XEN_PCI_ERR_op_failed;
}
- /* The value the guest needs is actually the IDT vector, not the
+ /* The value the guest needs is actually the IDT vector, not
* the local domain's IRQ number. */
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 7a0c93acc2c5..d3dcda344989 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1121,7 +1121,7 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
"%s: writing %s", __func__, state);
return;
}
- strlcpy(phy, val, VSCSI_NAMELEN);
+ strscpy(phy, val, VSCSI_NAMELEN);
kfree(val);
/* virtual SCSI device */
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 597af455a522..0792fda49a15 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -128,7 +128,7 @@ static ssize_t xenbus_file_read(struct file *filp,
{
struct xenbus_file_priv *u = filp->private_data;
struct read_buffer *rb;
- unsigned i;
+ ssize_t i;
int ret;
mutex_lock(&u->reply_mutex);
@@ -148,7 +148,7 @@ again:
rb = list_entry(u->read_buffers.next, struct read_buffer, list);
i = 0;
while (i < len) {
- unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
+ size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 5abded97e1a7..9c09f89d8278 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -305,7 +305,7 @@ static int __init xenbus_probe_backend_init(void)
register_xenstore_notifier(&xenstore_notifier);
- if (register_shrinker(&backend_memory_shrinker))
+ if (register_shrinker(&backend_memory_shrinker, "xen-backend"))
pr_warn("shrinker registration failed\n");
return 0;
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 07b010a68fcf..f44d5a64351e 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -40,7 +40,7 @@ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
return -EINVAL;
}
- strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
+ strscpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
if (!strchr(bus_id, '/')) {
pr_warn("bus_id %s no slash\n", bus_id);
return -EINVAL;