summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/device_pm.c15
-rw-r--r--drivers/acpi/irq.c8
-rw-r--r--drivers/acpi/resource.c16
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/thermal.c211
-rw-r--r--drivers/acpi/viot.c1
-rw-r--r--drivers/acpi/x86/s2idle.c12
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/base/memory.c6
-rw-r--r--drivers/base/node.c141
-rw-r--r--drivers/base/platform-msi.c1
-rw-r--r--drivers/base/power/domain.c4
-rw-r--r--drivers/bcma/driver_gpio.c2
-rw-r--r--drivers/block/virtio_blk.c110
-rw-r--r--drivers/block/zram/zram_drv.c38
-rw-r--r--drivers/block/zram/zram_drv.h15
-rw-r--r--drivers/char/hw_random/arm_smccc_trng.c4
-rw-r--r--drivers/char/hw_random/core.c55
-rw-r--r--drivers/char/hw_random/imx-rngc.c51
-rw-r--r--drivers/char/ipmi/Kconfig6
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c16
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c8
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c29
-rw-r--r--drivers/char/ipmi/kcs_bmc_cdev_ipmi.c4
-rw-r--r--drivers/char/ipmi/kcs_bmc_serio.c4
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/random.c139
-rw-r--r--drivers/char/tpm/tpm_ppi.c2
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/clocksource/exynos_mct.c83
-rw-r--r--drivers/clocksource/renesas-ostm.c2
-rw-r--r--drivers/clocksource/timer-gxp.c7
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c6
-rw-r--r--drivers/clocksource/timer-sun4i.c3
-rw-r--r--drivers/clocksource/timer-ti-dm.c681
-rw-r--r--drivers/crypto/Kconfig3
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c16
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c6
-rw-r--r--drivers/crypto/aspeed/Kconfig48
-rw-r--r--drivers/crypto/aspeed/Makefile7
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c1133
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c1391
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c284
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h298
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c6
-rw-r--r--drivers/crypto/bcm/cipher.c4
-rw-r--r--drivers/crypto/bcm/cipher.h2
-rw-r--r--drivers/crypto/cavium/cpt/cpt_hw_types.h2
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c30
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c6
-rw-r--r--drivers/crypto/ccp/sev-dev.c78
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h8
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c250
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c216
-rw-r--r--drivers/crypto/hisilicon/qm.c906
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h34
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c456
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c160
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h3
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c134
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c266
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c60
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c67
-rw-r--r--drivers/crypto/keembay/Kconfig4
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c24
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c8
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c4
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c5
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c10
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c2
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h3
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c18
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c24
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c56
-rw-r--r--drivers/crypto/qce/aead.c4
-rw-r--r--drivers/crypto/qce/sha.c8
-rw-r--r--drivers/crypto/qce/skcipher.c8
-rw-r--r--drivers/crypto/qcom-rng.c7
-rw-r--r--drivers/crypto/sahara.c22
-rw-r--r--drivers/dax/kmem.c42
-rw-r--r--drivers/firmware/dmi_scan.c10
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile1
-rw-r--r--drivers/gpio/Kconfig14
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-adp5588.c446
-rw-r--r--drivers/gpio/gpio-mlxbf2.c6
-rw-r--r--drivers/gpio/gpio-rockchip.c7
-rw-r--r--drivers/gpio/gpiolib-acpi.c15
-rw-r--r--drivers/gpio/gpiolib-acpi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c165
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c63
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c73
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h140
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h3
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c2
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_proto.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c78
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h46
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c205
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c212
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h16
-rw-r--r--drivers/hid/hid-debug.c3
-rw-r--r--drivers/hv/connection.c33
-rw-r--r--drivers/hv/vmbus_drv.c20
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c9
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h7
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c13
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c1
-rw-r--r--drivers/i2c/i2c-core-acpi.c40
-rw-r--r--drivers/i2c/i2c-core-base.c6
-rw-r--r--drivers/i2c/i2c-core.h4
-rw-r--r--drivers/input/ff-core.c3
-rw-r--r--drivers/input/ff-memless.c3
-rw-r--r--drivers/input/gameport/emu10k1-gp.c3
-rw-r--r--drivers/input/gameport/lightning.c3
-rw-r--r--drivers/input/gameport/ns558.c3
-rw-r--r--drivers/input/joydev.c2
-rw-r--r--drivers/input/joystick/a3d.c3
-rw-r--r--drivers/input/joystick/adc-joystick.c65
-rw-r--r--drivers/input/joystick/adi.c3
-rw-r--r--drivers/input/joystick/amijoy.c3
-rw-r--r--drivers/input/joystick/analog.c3
-rw-r--r--drivers/input/joystick/cobra.c3
-rw-r--r--drivers/input/joystick/db9.c3
-rw-r--r--drivers/input/joystick/gamecon.c3
-rw-r--r--drivers/input/joystick/gf2k.c3
-rw-r--r--drivers/input/joystick/grip.c3
-rw-r--r--drivers/input/joystick/guillemot.c3
-rw-r--r--drivers/input/joystick/interact.c3
-rw-r--r--drivers/input/joystick/joydump.c3
-rw-r--r--drivers/input/joystick/magellan.c3
-rw-r--r--drivers/input/joystick/sidewinder.c3
-rw-r--r--drivers/input/joystick/spaceball.c3
-rw-r--r--drivers/input/joystick/spaceorb.c3
-rw-r--r--drivers/input/joystick/stinger.c3
-rw-r--r--drivers/input/joystick/tmdc.c3
-rw-r--r--drivers/input/joystick/turbografx.c3
-rw-r--r--drivers/input/joystick/twidjoy.c3
-rw-r--r--drivers/input/joystick/warrior.c3
-rw-r--r--drivers/input/joystick/xpad.c455
-rw-r--r--drivers/input/joystick/zhenhua.c3
-rw-r--r--drivers/input/keyboard/Kconfig18
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5588-keys.c726
-rw-r--r--drivers/input/keyboard/amikbd.c3
-rw-r--r--drivers/input/keyboard/applespi.c4
-rw-r--r--drivers/input/keyboard/atakbd.c3
-rw-r--r--drivers/input/keyboard/atkbd.c16
-rw-r--r--drivers/input/keyboard/clps711x-keypad.c13
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c1
-rw-r--r--drivers/input/keyboard/imx_keypad.c1
-rw-r--r--drivers/input/keyboard/lkkbd.c11
-rw-r--r--drivers/input/keyboard/lm8333.c8
-rw-r--r--drivers/input/keyboard/matrix_keypad.c7
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c46
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c21
-rw-r--r--drivers/input/keyboard/newtonkbd.c3
-rw-r--r--drivers/input/keyboard/pinephone-keyboard.c468
-rw-r--r--drivers/input/keyboard/st-keyscan.c10
-rw-r--r--drivers/input/keyboard/stowaway.c3
-rw-r--r--drivers/input/keyboard/sunkbd.c3
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c2
-rw-r--r--drivers/input/keyboard/xtkbd.c3
-rw-r--r--drivers/input/misc/Kconfig27
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ibm-panel.c200
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/iqs7222.c16
-rw-r--r--drivers/input/misc/keyspan_remote.c2
-rw-r--r--drivers/input/misc/rt5120-pwrkey.c120
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c1
-rw-r--r--drivers/input/misc/twl4030-vibra.c13
-rw-r--r--drivers/input/mouse/elan_i2c_core.c7
-rw-r--r--drivers/input/mouse/hgpk.c4
-rw-r--r--drivers/input/mouse/inport.c3
-rw-r--r--drivers/input/mouse/logibm.c3
-rw-r--r--drivers/input/mouse/pc110pad.c3
-rw-r--r--drivers/input/mouse/psmouse-base.c22
-rw-r--r--drivers/input/mouse/sermouse.c3
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/mouse/synaptics_usb.c2
-rw-r--r--drivers/input/mouse/vsxxxaa.c7
-rw-r--r--drivers/input/rmi4/rmi_f03.c2
-rw-r--r--drivers/input/rmi4/rmi_f34.c32
-rw-r--r--drivers/input/rmi4/rmi_f34.h17
-rw-r--r--drivers/input/rmi4/rmi_f34v7.c349
-rw-r--r--drivers/input/rmi4/rmi_f54.c8
-rw-r--r--drivers/input/serio/altera_ps2.c4
-rw-r--r--drivers/input/serio/ambakmi.c4
-rw-r--r--drivers/input/serio/ams_delta_serio.c4
-rw-r--r--drivers/input/serio/apbps2.c2
-rw-r--r--drivers/input/serio/ct82c710.c5
-rw-r--r--drivers/input/serio/gscps2.c2
-rw-r--r--drivers/input/serio/hyperv-keyboard.c4
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h (renamed from drivers/input/serio/i8042-x86ia64io.h)18
-rw-r--r--drivers/input/serio/i8042-sparcio.h27
-rw-r--r--drivers/input/serio/i8042.c14
-rw-r--r--drivers/input/serio/i8042.h4
-rw-r--r--drivers/input/serio/libps2.c5
-rw-r--r--drivers/input/serio/olpc_apsp.c8
-rw-r--r--drivers/input/serio/parkbd.c2
-rw-r--r--drivers/input/serio/pcips2.c4
-rw-r--r--drivers/input/serio/ps2-gpio.c4
-rw-r--r--drivers/input/serio/ps2mult.c2
-rw-r--r--drivers/input/serio/q40kbd.c7
-rw-r--r--drivers/input/serio/rpckbd.c7
-rw-r--r--drivers/input/serio/sa1111ps2.c4
-rw-r--r--drivers/input/serio/serio.c3
-rw-r--r--drivers/input/serio/serport.c2
-rw-r--r--drivers/input/serio/sun4i-ps2.c4
-rw-r--r--drivers/input/tablet/acecad.c5
-rw-r--r--drivers/input/tablet/aiptek.c20
-rw-r--r--drivers/input/tablet/hanwang.c5
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c2
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c8
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c155
-rw-r--r--drivers/input/touchscreen/chipone_icn8505.c30
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c12
-rw-r--r--drivers/input/touchscreen/gunze.c3
-rw-r--r--drivers/input/touchscreen/sur40.c6
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c2
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c6
-rw-r--r--drivers/iommu/Kconfig16
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd/Kconfig1
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h1
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h10
-rw-r--r--drivers/iommu/amd/init.c47
-rw-r--r--drivers/iommu/amd/io_pgtable.c76
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c415
-rw-r--r--drivers/iommu/amd/iommu.c164
-rw-r--r--drivers/iommu/apple-dart.c57
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c62
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c97
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c6
-rw-r--r--drivers/iommu/dma-iommu.c18
-rw-r--r--drivers/iommu/dma-iommu.h42
-rw-r--r--drivers/iommu/exynos-iommu.c9
-rw-r--r--drivers/iommu/fsl_pamu_domain.c6
-rw-r--r--drivers/iommu/intel/Kconfig6
-rw-r--r--drivers/iommu/intel/cap_audit.c4
-rw-r--r--drivers/iommu/intel/iommu.c95
-rw-r--r--drivers/iommu/intel/iommu.h7
-rw-r--r--drivers/iommu/intel/irq_remapping.c6
-rw-r--r--drivers/iommu/intel/pasid.c12
-rw-r--r--drivers/iommu/intel/svm.c62
-rw-r--r--drivers/iommu/io-pgtable-arm.c71
-rw-r--r--drivers/iommu/io-pgtable-dart.c469
-rw-r--r--drivers/iommu/io-pgtable.c4
-rw-r--r--drivers/iommu/iommu.c154
-rw-r--r--drivers/iommu/iova.c13
-rw-r--r--drivers/iommu/ipmmu-vmsa.c35
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c45
-rw-r--r--drivers/iommu/mtk_iommu_v1.c13
-rw-r--r--drivers/iommu/omap-iommu-debug.c6
-rw-r--r--drivers/iommu/omap-iommu.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/iommu/s390-iommu.c15
-rw-r--r--drivers/iommu/sprd-iommu.c5
-rw-r--r--drivers/iommu/sun50i-iommu.c2
-rw-r--r--drivers/iommu/tegra-smmu.c29
-rw-r--r--drivers/iommu/virtio-iommu.c32
-rw-r--r--drivers/irqchip/Kconfig17
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c453
-rw-r--r--drivers/irqchip/irq-ls-extirq.c87
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c2
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c134
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c3
-rw-r--r--drivers/macintosh/therm_windtunnel.c6
-rw-r--r--drivers/media/pci/pt3/pt3.c4
-rw-r--r--drivers/misc/cxl/fault.c45
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/core/card.h6
-rw-r--r--drivers/mmc/core/quirks.h6
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c21
-rw-r--r--drivers/mmc/host/sdhci-sprd.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c79
-rw-r--r--drivers/net/ethernet/adi/adin1110.c13
-rw-r--r--drivers/net/ethernet/broadcom/Makefile5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c5
-rw-r--r--drivers/net/ethernet/mediatek/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c5
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c19
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/micrel.c9
-rw-r--r--drivers/net/phy/sfp-bus.c3
-rw-r--r--drivers/net/pse-pd/Kconfig1
-rw-r--r--drivers/net/virtio_net.c48
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c10
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/of/address.c4
-rw-r--r--drivers/of/base.c7
-rw-r--r--drivers/of/device.c9
-rw-r--r--drivers/of/fdt.c17
-rw-r--r--drivers/of/irq.c3
-rw-r--r--drivers/of/of_private.h5
-rw-r--r--drivers/of/unittest.c5
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c33
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c164
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c128
-rw-r--r--drivers/pci/controller/pci-aardvark.c4
-rw-r--r--drivers/pci/controller/pci-ftpci100.c21
-rw-r--r--drivers/pci/controller/pci-mvebu.c13
-rw-r--r--drivers/pci/controller/pci-tegra.c11
-rw-r--r--drivers/pci/controller/pcie-apple.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c2
-rw-r--r--drivers/pci/controller/pcie-mt7621.c17
-rw-r--r--drivers/pci/msi/msi.c2
-rw-r--r--drivers/pci/p2pdma.c2
-rw-r--r--drivers/pci/pci-bridge-emul.c48
-rw-r--r--drivers/pci/pci-bridge-emul.h2
-rw-r--r--drivers/pci/pci-driver.c30
-rw-r--r--drivers/pci/pci-sysfs.c108
-rw-r--r--drivers/pci/pci.c51
-rw-r--r--drivers/pci/pci.h63
-rw-r--r--drivers/pci/pcie/aspm.c258
-rw-r--r--drivers/pci/pcie/dpc.c15
-rw-r--r--drivers/pci/pcie/ptm.c300
-rw-r--r--drivers/pci/probe.c13
-rw-r--r--drivers/pci/quirks.c36
-rw-r--r--drivers/pci/setup-bus.c290
-rw-r--r--drivers/pci/setup-res.c11
-rw-r--r--drivers/pci/xen-pcifront.c161
-rw-r--r--drivers/perf/arm_spe_pmu.c4
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c8
-rw-r--r--drivers/pinctrl/Kconfig37
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c11
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6318.c121
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63268.c139
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6328.c91
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6358.c20
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6362.c121
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6368.c91
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63xx.h2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c4
-rw-r--r--drivers/pinctrl/berlin/berlin.c2
-rw-r--r--drivers/pinctrl/freescale/Kconfig12
-rw-r--r--drivers/pinctrl/mediatek/Kconfig12
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8188.c1673
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8188.h2259
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c26
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c295
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c29
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c117
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.h30
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c8
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c5
-rw-r--r--drivers/pinctrl/pinctrl-amd.c34
-rw-r--r--drivers/pinctrl/pinctrl-at91.c105
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c1419
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c31
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c3
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c5
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c27
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c346
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h1
-rw-r--r--drivers/pinctrl/pinctrl-st.c34
-rw-r--r--drivers/pinctrl/qcom/Kconfig57
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c207
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c240
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c79
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c8
-rw-r--r--drivers/pinctrl/starfive/Kconfig18
-rw-r--r--drivers/pinctrl/starfive/Makefile3
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c (renamed from drivers/pinctrl/pinctrl-starfive.c)10
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c5
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c9
-rw-r--r--drivers/platform/Kconfig2
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/loongarch/Kconfig31
-rw-r--r--drivers/platform/loongarch/Makefile1
-rw-r--r--drivers/platform/loongarch/loongson-laptop.c624
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c3
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c6
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c7
-rw-r--r--drivers/powercap/idle_inject.c2
-rw-r--r--drivers/ptp/ptp_ocp.c1
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/hmcdrv_cache.c2
-rw-r--r--drivers/s390/char/tape_class.c4
-rw-r--r--drivers/s390/char/zcore.c1
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c12
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c92
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h11
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c109
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h6
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/qeth_ethtool.c4
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_spi.c7
-rw-r--r--drivers/scsi/storvsc_drv.c11
-rw-r--r--drivers/ssb/driver_gpio.c6
-rw-r--r--drivers/tee/optee/call.c18
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/imx_sc_thermal.c68
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c2
-rw-r--r--drivers/thermal/rcar_thermal.c2
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--drivers/thermal/thermal_of.c44
-rw-r--r--drivers/thermal/thermal_sysfs.c8
-rw-r--r--drivers/usb/core/urb.c2
-rw-r--r--drivers/vdpa/vdpa.c73
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c12
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c2
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c5
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c22
-rw-r--r--drivers/vfio/Kconfig1
-rw-r--r--drivers/vfio/Makefile7
-rw-r--r--drivers/vfio/container.c680
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c89
-rw-r--r--drivers/vfio/iova_bitmap.c422
-rw-r--r--drivers/vfio/mdev/mdev_core.c195
-rw-r--r--drivers/vfio/mdev/mdev_driver.c7
-rw-r--r--drivers/vfio/mdev/mdev_private.h32
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c190
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c136
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h7
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c995
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h63
-rw-r--r--drivers/vfio/pci/mlx5/main.c55
-rw-r--r--drivers/vfio/pci/vfio_pci.c22
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c1105
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c8
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c34
-rw-r--r--drivers/vfio/pci/vfio_pci_priv.h104
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c2
-rw-r--r--drivers/vfio/platform/vfio_amba.c72
-rw-r--r--drivers/vfio/platform/vfio_platform.c66
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c71
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h18
-rw-r--r--drivers/vfio/vfio.h62
-rw-r--r--drivers/vfio/vfio_iommu_type1.c1
-rw-r--r--drivers/vfio/vfio_main.c1392
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/video/fbdev/arkfb.c8
-rw-r--r--drivers/video/fbdev/controlfb.c7
-rw-r--r--drivers/video/fbdev/gbefb.c20
-rw-r--r--drivers/video/fbdev/imxfb.c1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c6
-rw-r--r--drivers/video/fbdev/smscufx.c14
-rw-r--r--drivers/video/fbdev/tridentfb.c11
-rw-r--r--drivers/video/fbdev/udlfb.c2
-rw-r--r--drivers/video/fbdev/uvesafb.c10
-rw-r--r--drivers/video/fbdev/vga16fb.c1
-rw-r--r--drivers/virtio/virtio_pci_common.c3
-rw-r--r--drivers/virtio/virtio_ring.c18
-rw-r--r--drivers/watchdog/Kconfig23
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c2
-rw-r--r--drivers/watchdog/aspeed_wdt.c12
-rw-r--r--drivers/watchdog/bd9576_wdt.c51
-rw-r--r--drivers/watchdog/booke_wdt.c8
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/exar_wdt.c427
-rw-r--r--drivers/watchdog/ftwdt010_wdt.c25
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c212
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c24
-rw-r--r--drivers/watchdog/npcm_wdt.c16
-rw-r--r--drivers/watchdog/rti_wdt.c3
-rw-r--r--drivers/watchdog/rzg2l_wdt.c39
-rw-r--r--drivers/watchdog/s3c2410_wdt.c41
-rw-r--r--drivers/watchdog/sa1100_wdt.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c13
-rw-r--r--drivers/watchdog/twl4030_wdt.c1
-rw-r--r--drivers/watchdog/w83627hf_wdt.c12
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/watchdog/watchdog_dev.c6
-rw-r--r--drivers/watchdog/wdat_wdt.c5
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/gntdev-common.h3
-rw-r--r--drivers/xen/gntdev.c80
-rw-r--r--drivers/xen/grant-dma-ops.c112
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
668 files changed, 26754 insertions, 9666 deletions
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index d91ad378c00d..80ad530583c9 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -985,7 +985,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
ghes_estatus_cache_add(generic, estatus);
}
- if (task_work_pending && current->mm != &init_mm) {
+ if (task_work_pending && current->mm) {
estatus_node->task_work.func = ghes_kick_task_work;
estatus_node->task_work_cpu = smp_processor_id();
ret = task_work_add(current, &estatus_node->task_work,
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index d594effe905f..97450f4003cc 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -687,7 +687,22 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
d_min = ret;
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
&& adev->wakeup.sleep_state >= target_state;
+ } else if (device_may_wakeup(dev) && dev->power.wakeirq) {
+ /*
+ * The ACPI subsystem doesn't manage the wake bit for IRQs
+ * defined with ExclusiveAndWake and SharedAndWake. Instead we
+ * expect them to be managed via the PM subsystem. Drivers
+ * should call dev_pm_set_wake_irq to register an IRQ as a wake
+ * source.
+ *
+ * If a device has a wake IRQ attached we need to check the
+ * _S0W method to get the correct wake D-state. Otherwise we
+ * end up putting the device into D3Cold which will more than
+ * likely disable wake functionality.
+ */
+ wakeup = true;
} else {
+ /* ACPI GPE is specified in _PRW. */
wakeup = adev->wakeup.flags.valid;
}
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 4db5bb587599..1cc4647f78b8 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -147,6 +147,7 @@ struct acpi_irq_parse_one_ctx {
* @polarity: polarity attributes of hwirq
* @polarity: polarity attributes of hwirq
* @shareable: shareable attributes of hwirq
+ * @wake_capable: wake capable attribute of hwirq
* @ctx: acpi_irq_parse_one_ctx updated by this function
*
* Description:
@@ -156,12 +157,13 @@ struct acpi_irq_parse_one_ctx {
static inline void acpi_irq_parse_one_match(struct fwnode_handle *fwnode,
u32 hwirq, u8 triggering,
u8 polarity, u8 shareable,
+ u8 wake_capable,
struct acpi_irq_parse_one_ctx *ctx)
{
if (!fwnode)
return;
ctx->rc = 0;
- *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+ *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable);
ctx->fwspec->fwnode = fwnode;
ctx->fwspec->param[0] = hwirq;
ctx->fwspec->param[1] = acpi_dev_get_irq_type(triggering, polarity);
@@ -204,7 +206,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
irq->triggering, irq->polarity,
- irq->shareable, ctx);
+ irq->shareable, irq->wake_capable, ctx);
return AE_CTRL_TERMINATE;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
eirq = &ares->data.extended_irq;
@@ -218,7 +220,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
eirq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
eirq->triggering, eirq->polarity,
- eirq->shareable, ctx);
+ eirq->shareable, eirq->wake_capable, ctx);
return AE_CTRL_TERMINATE;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 514d89656dde..6f9489edfb4e 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -336,8 +336,9 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
* @triggering: Triggering type as provided by ACPI.
* @polarity: Interrupt polarity as provided by ACPI.
* @shareable: Whether or not the interrupt is shareable.
+ * @wake_capable: Wake capability as provided by ACPI.
*/
-unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable)
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable)
{
unsigned long flags;
@@ -351,6 +352,9 @@ unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable)
if (shareable == ACPI_SHARED)
flags |= IORESOURCE_IRQ_SHAREABLE;
+ if (wake_capable == ACPI_WAKE_CAPABLE)
+ flags |= IORESOURCE_IRQ_WAKECAPABLE;
+
return flags | IORESOURCE_IRQ;
}
EXPORT_SYMBOL_GPL(acpi_dev_irq_flags);
@@ -468,7 +472,7 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 triggering, u8 polarity, u8 shareable,
- bool check_override)
+ u8 wake_capable, bool check_override)
{
int irq, p, t;
@@ -501,7 +505,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
}
}
- res->flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+ res->flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable);
irq = acpi_register_gsi(NULL, gsi, triggering, polarity);
if (irq >= 0) {
res->start = irq;
@@ -549,7 +553,8 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
- irq->shareable, true);
+ irq->shareable, irq->wake_capable,
+ true);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;
@@ -560,7 +565,8 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
if (is_gsi(ext_irq))
acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
ext_irq->triggering, ext_irq->polarity,
- ext_irq->shareable, false);
+ ext_irq->shareable, ext_irq->wake_capable,
+ false);
else
irqresource_disabled(res, 0);
break;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index ad4b2987b3d6..0b557c0d405e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -1088,6 +1088,14 @@ int __init acpi_sleep_init(void)
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_FIRMWARE,
acpi_power_off, NULL);
+
+ /*
+ * Windows uses S5 for reboot, so some BIOSes depend on it to
+ * perform proper reboot.
+ */
+ register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE,
+ SYS_OFF_PRIO_FIRMWARE,
+ acpi_power_off_prepare, NULL);
} else {
acpi_no_s5 = true;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 539660ef93c7..40b07057983e 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -158,7 +158,7 @@ struct acpi_thermal_flags {
};
struct acpi_thermal {
- struct acpi_device * device;
+ struct acpi_device *device;
acpi_bus_id name;
unsigned long temperature;
unsigned long last_temperature;
@@ -262,7 +262,7 @@ do { \
static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
{
- acpi_status status = AE_OK;
+ acpi_status status;
unsigned long long tmp;
struct acpi_handle_list devices;
int valid = 0;
@@ -270,8 +270,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Critical Shutdown */
if (flag & ACPI_TRIPS_CRITICAL) {
- status = acpi_evaluate_integer(tz->device->handle,
- "_CRT", NULL, &tmp);
+ status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL, &tmp);
tz->trips.critical.temperature = tmp;
/*
* Treat freezing temperatures as invalid as well; some
@@ -284,8 +283,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
acpi_handle_debug(tz->device->handle,
"No critical threshold\n");
} else if (tmp <= 2732) {
- pr_info(FW_BUG "Invalid critical threshold (%llu)\n",
- tmp);
+ pr_info(FW_BUG "Invalid critical threshold (%llu)\n", tmp);
tz->trips.critical.flags.valid = 0;
} else {
tz->trips.critical.flags.valid = 1;
@@ -312,8 +310,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Critical Sleep (optional) */
if (flag & ACPI_TRIPS_HOT) {
- status = acpi_evaluate_integer(tz->device->handle,
- "_HOT", NULL, &tmp);
+ status = acpi_evaluate_integer(tz->device->handle, "_HOT", NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.hot.flags.valid = 0;
acpi_handle_debug(tz->device->handle,
@@ -329,7 +326,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Passive (optional) */
if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.flags.valid) ||
- (flag == ACPI_TRIPS_INIT)) {
+ flag == ACPI_TRIPS_INIT) {
valid = tz->trips.passive.flags.valid;
if (psv == -1) {
status = AE_SUPPORT;
@@ -338,32 +335,31 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = AE_OK;
} else {
status = acpi_evaluate_integer(tz->device->handle,
- "_PSV", NULL, &tmp);
+ "_PSV", NULL, &tmp);
}
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
tz->trips.passive.flags.valid = 0;
- else {
+ } else {
tz->trips.passive.temperature = tmp;
tz->trips.passive.flags.valid = 1;
if (flag == ACPI_TRIPS_INIT) {
- status = acpi_evaluate_integer(
- tz->device->handle, "_TC1",
- NULL, &tmp);
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_TC1", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
else
tz->trips.passive.tc1 = tmp;
- status = acpi_evaluate_integer(
- tz->device->handle, "_TC2",
- NULL, &tmp);
+
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_TC2", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
else
tz->trips.passive.tc2 = tmp;
- status = acpi_evaluate_integer(
- tz->device->handle, "_TSP",
- NULL, &tmp);
+
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_TSP", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
else
@@ -374,25 +370,25 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.flags.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
- NULL, &devices);
+ NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid passive threshold\n");
tz->trips.passive.flags.valid = 0;
- }
- else
+ } else {
tz->trips.passive.flags.valid = 1;
+ }
if (memcmp(&tz->trips.passive.devices, &devices,
- sizeof(struct acpi_handle_list))) {
+ sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.passive.devices, &devices,
- sizeof(struct acpi_handle_list));
+ sizeof(struct acpi_handle_list));
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
if (valid != tz->trips.passive.flags.valid)
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
}
/* Active (optional) */
@@ -403,29 +399,31 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (act == -1)
break; /* disable all active trip points */
- if ((flag == ACPI_TRIPS_INIT) || ((flag & ACPI_TRIPS_ACTIVE) &&
- tz->trips.active[i].flags.valid)) {
+ if (flag == ACPI_TRIPS_INIT || ((flag & ACPI_TRIPS_ACTIVE) &&
+ tz->trips.active[i].flags.valid)) {
status = acpi_evaluate_integer(tz->device->handle,
- name, NULL, &tmp);
+ name, NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.active[i].flags.valid = 0;
if (i == 0)
break;
+
if (act <= 0)
break;
+
if (i == 1)
- tz->trips.active[0].temperature =
- celsius_to_deci_kelvin(act);
+ tz->trips.active[0].temperature = celsius_to_deci_kelvin(act);
else
/*
* Don't allow override higher than
* the next higher trip point
*/
- tz->trips.active[i - 1].temperature =
- (tz->trips.active[i - 2].temperature <
+ tz->trips.active[i-1].temperature =
+ (tz->trips.active[i-2].temperature <
celsius_to_deci_kelvin(act) ?
- tz->trips.active[i - 2].temperature :
+ tz->trips.active[i-2].temperature :
celsius_to_deci_kelvin(act));
+
break;
} else {
tz->trips.active[i].temperature = tmp;
@@ -434,22 +432,22 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
name[2] = 'L';
- if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid ) {
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle,
- name, NULL, &devices);
+ name, NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid active%d threshold\n", i);
tz->trips.active[i].flags.valid = 0;
- }
- else
+ } else {
tz->trips.active[i].flags.valid = 1;
+ }
if (memcmp(&tz->trips.active[i].devices, &devices,
- sizeof(struct acpi_handle_list))) {
+ sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.active[i].devices, &devices,
- sizeof(struct acpi_handle_list));
+ sizeof(struct acpi_handle_list));
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
@@ -464,9 +462,9 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (flag & ACPI_TRIPS_DEVICES) {
memset(&devices, 0, sizeof(devices));
status = acpi_evaluate_reference(tz->device->handle, "_TZD",
- NULL, &devices);
- if (ACPI_SUCCESS(status)
- && memcmp(&tz->devices, &devices, sizeof(devices))) {
+ NULL, &devices);
+ if (ACPI_SUCCESS(status) &&
+ memcmp(&tz->devices, &devices, sizeof(devices))) {
tz->devices = devices;
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
@@ -548,8 +546,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal,
trip--;
}
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
- tz->trips.active[i].flags.valid; i++) {
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid; i++) {
if (!trip) {
*type = THERMAL_TRIP_ACTIVE;
return 0;
@@ -572,8 +569,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.critical.flags.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.critical.temperature,
- tz->kelvin_offset);
+ tz->trips.critical.temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -582,8 +579,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.hot.flags.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.hot.temperature,
- tz->kelvin_offset);
+ tz->trips.hot.temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -592,8 +589,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.passive.flags.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.passive.temperature,
- tz->kelvin_offset);
+ tz->trips.passive.temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -603,8 +600,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
tz->trips.active[i].flags.valid; i++) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.active[i].temperature,
- tz->kelvin_offset);
+ tz->trips.active[i].temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -620,15 +617,16 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
if (tz->trips.critical.flags.valid) {
*temperature = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.critical.temperature,
- tz->kelvin_offset);
+ tz->trips.critical.temperature,
+ tz->kelvin_offset);
return 0;
- } else
- return -EINVAL;
+ }
+
+ return -EINVAL;
}
static int thermal_get_trend(struct thermal_zone_device *thermal,
- int trip, enum thermal_trend *trend)
+ int trip, enum thermal_trend *trend)
{
struct acpi_thermal *tz = thermal->devdata;
enum thermal_trip_type type;
@@ -657,9 +655,8 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
* tz->temperature has already been updated by generic thermal layer,
* before this callback being invoked
*/
- i = (tz->trips.passive.tc1 * (tz->temperature - tz->last_temperature))
- + (tz->trips.passive.tc2
- * (tz->temperature - tz->trips.passive.temperature));
+ i = tz->trips.passive.tc1 * (tz->temperature - tz->last_temperature) +
+ tz->trips.passive.tc2 * (tz->temperature - tz->trips.passive.temperature);
if (i > 0)
*trend = THERMAL_TREND_RAISING;
@@ -667,6 +664,7 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
*trend = THERMAL_TREND_DROPPING;
else
*trend = THERMAL_TREND_STABLE;
+
return 0;
}
@@ -691,8 +689,8 @@ static void acpi_thermal_zone_device_critical(struct thermal_zone_device *therma
}
static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev,
- bool bind)
+ struct thermal_cooling_device *cdev,
+ bool bind)
{
struct acpi_device *device = cdev->devdata;
struct acpi_thermal *tz = thermal->devdata;
@@ -711,22 +709,23 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
if (tz->trips.passive.flags.valid) {
trip++;
- for (i = 0; i < tz->trips.passive.devices.count;
- i++) {
+ for (i = 0; i < tz->trips.passive.devices.count; i++) {
handle = tz->trips.passive.devices.handles[i];
dev = acpi_fetch_acpi_dev(handle);
if (dev != device)
continue;
+
if (bind)
- result =
- thermal_zone_bind_cooling_device
- (thermal, trip, cdev,
- THERMAL_NO_LIMIT, THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
+ result = thermal_zone_bind_cooling_device(
+ thermal, trip, cdev,
+ THERMAL_NO_LIMIT,
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
else
result =
- thermal_zone_unbind_cooling_device
- (thermal, trip, cdev);
+ thermal_zone_unbind_cooling_device(
+ thermal, trip, cdev);
+
if (result)
goto failed;
}
@@ -735,22 +734,24 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
if (!tz->trips.active[i].flags.valid)
break;
+
trip++;
- for (j = 0;
- j < tz->trips.active[i].devices.count;
- j++) {
+ for (j = 0; j < tz->trips.active[i].devices.count; j++) {
handle = tz->trips.active[i].devices.handles[j];
dev = acpi_fetch_acpi_dev(handle);
if (dev != device)
continue;
+
if (bind)
- result = thermal_zone_bind_cooling_device
- (thermal, trip, cdev,
- THERMAL_NO_LIMIT, THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
+ result = thermal_zone_bind_cooling_device(
+ thermal, trip, cdev,
+ THERMAL_NO_LIMIT,
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
else
- result = thermal_zone_unbind_cooling_device
- (thermal, trip, cdev);
+ result = thermal_zone_unbind_cooling_device(
+ thermal, trip, cdev);
+
if (result)
goto failed;
}
@@ -762,14 +763,14 @@ failed:
static int
acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
+ struct thermal_cooling_device *cdev)
{
return acpi_thermal_cooling_device_cb(thermal, cdev, true);
}
static int
acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
+ struct thermal_cooling_device *cdev)
{
return acpi_thermal_cooling_device_cb(thermal, cdev, false);
}
@@ -802,20 +803,20 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
if (tz->trips.passive.flags.valid)
trips++;
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
- tz->trips.active[i].flags.valid; i++, trips++);
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid;
+ i++, trips++);
if (tz->trips.passive.flags.valid)
- tz->thermal_zone =
- thermal_zone_device_register("acpitz", trips, 0, tz,
- &acpi_thermal_zone_ops, NULL,
- tz->trips.passive.tsp*100,
- tz->polling_frequency*100);
+ tz->thermal_zone = thermal_zone_device_register("acpitz", trips, 0, tz,
+ &acpi_thermal_zone_ops, NULL,
+ tz->trips.passive.tsp * 100,
+ tz->polling_frequency * 100);
else
tz->thermal_zone =
thermal_zone_device_register("acpitz", trips, 0, tz,
- &acpi_thermal_zone_ops, NULL,
- 0, tz->polling_frequency*100);
+ &acpi_thermal_zone_ops, NULL,
+ 0, tz->polling_frequency * 100);
+
if (IS_ERR(tz->thermal_zone))
return -ENODEV;
@@ -881,7 +882,6 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
{
struct acpi_thermal *tz = acpi_driver_data(device);
-
if (!tz)
return;
@@ -893,13 +893,13 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event, 0);
+ dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event, 0);
+ dev_name(&device->dev), event, 0);
break;
default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
@@ -942,8 +942,7 @@ static void acpi_thermal_aml_dependency_fix(struct acpi_thermal *tz)
static int acpi_thermal_get_info(struct acpi_thermal *tz)
{
- int result = 0;
-
+ int result;
if (!tz)
return -EINVAL;
@@ -1020,9 +1019,8 @@ static void acpi_thermal_check_fn(struct work_struct *work)
static int acpi_thermal_add(struct acpi_device *device)
{
- int result = 0;
- struct acpi_thermal *tz = NULL;
-
+ struct acpi_thermal *tz;
+ int result;
if (!device)
return -EINVAL;
@@ -1063,7 +1061,7 @@ end:
static int acpi_thermal_remove(struct acpi_device *device)
{
- struct acpi_thermal *tz = NULL;
+ struct acpi_thermal *tz;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1099,6 +1097,7 @@ static int acpi_thermal_resume(struct device *dev)
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
if (!tz->trips.active[i].flags.valid)
break;
+
tz->trips.active[i].flags.enabled = 1;
for (j = 0; j < tz->trips.active[i].devices.count; j++) {
result = acpi_bus_update_power(
@@ -1119,7 +1118,6 @@ static int acpi_thermal_resume(struct device *dev)
#endif
static int thermal_act(const struct dmi_system_id *d) {
-
if (act == 0) {
pr_notice("%s detected: disabling all active thermal trip points\n",
d->ident);
@@ -1128,14 +1126,12 @@ static int thermal_act(const struct dmi_system_id *d) {
return 0;
}
static int thermal_nocrt(const struct dmi_system_id *d) {
-
pr_notice("%s detected: disabling all critical thermal trip point actions.\n",
d->ident);
nocrt = 1;
return 0;
}
static int thermal_tzp(const struct dmi_system_id *d) {
-
if (tzp == 0) {
pr_notice("%s detected: enabling thermal zone polling\n",
d->ident);
@@ -1144,7 +1140,6 @@ static int thermal_tzp(const struct dmi_system_id *d) {
return 0;
}
static int thermal_psv(const struct dmi_system_id *d) {
-
if (psv == 0) {
pr_notice("%s detected: disabling all passive thermal trip points\n",
d->ident);
@@ -1195,7 +1190,7 @@ static const struct dmi_system_id thermal_dmi_table[] __initconst = {
static int __init acpi_thermal_init(void)
{
- int result = 0;
+ int result;
dmi_check_system(thermal_dmi_table);
@@ -1222,8 +1217,6 @@ static void __exit acpi_thermal_exit(void)
{
acpi_bus_unregister_driver(&acpi_thermal_driver);
destroy_workqueue(acpi_thermal_pm_queue);
-
- return;
}
module_init(acpi_thermal_init);
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index 6132092dab2a..ed752cbbe636 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -19,7 +19,6 @@
#define pr_fmt(fmt) "ACPI: VIOT: " fmt
#include <linux/acpi_viot.h>
-#include <linux/dma-iommu.h>
#include <linux/fwnode.h>
#include <linux/iommu.h>
#include <linux/list.h>
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 42f249070c09..5350c73564b6 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -654,12 +654,14 @@ void __init acpi_s2idle_setup(void)
int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
{
+ unsigned int sleep_flags;
+
if (!lps0_device_handle || sleep_no_lps0)
return -ENODEV;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
list_add(&arg->list_node, &lps0_s2idle_devops_head);
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
return 0;
}
@@ -667,12 +669,14 @@ EXPORT_SYMBOL_GPL(acpi_register_lps0_dev);
void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
{
+ unsigned int sleep_flags;
+
if (!lps0_device_handle || sleep_no_lps0)
return;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
list_del(&arg->list_node);
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
}
EXPORT_SYMBOL_GPL(acpi_unregister_lps0_dev);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 6428f6be69e3..880224ec6abb 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4259,10 +4259,9 @@ static int binder_wait_for_work(struct binder_thread *thread,
struct binder_proc *proc = thread->proc;
int ret = 0;
- freezer_do_not_count();
binder_inner_proc_lock(proc);
for (;;) {
- prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
if (binder_has_work_ilocked(thread, do_proc_work))
break;
if (do_proc_work)
@@ -4279,7 +4278,6 @@ static int binder_wait_for_work(struct binder_thread *thread,
}
finish_wait(&thread->wait, &wait);
binder_inner_proc_unlock(proc);
- freezer_count();
return ret;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index bc60c9cd3230..9aa0da991cfb 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -869,12 +869,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
}
}
-/* return true if the memory block is offlined, otherwise, return false */
-bool is_memblock_offlined(struct memory_block *mem)
-{
- return mem->state == MEM_OFFLINE;
-}
-
static struct attribute *memory_root_attrs[] = {
#ifdef CONFIG_ARCH_MEMORY_PROBE
&dev_attr_probe.attr,
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 432d40a5f910..faf3597a96da 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/hugetlb.h>
static struct bus_type node_subsys = {
.name = "node",
@@ -589,64 +590,9 @@ static const struct attribute_group *node_dev_groups[] = {
NULL
};
-#ifdef CONFIG_HUGETLBFS
-/*
- * hugetlbfs per node attributes registration interface:
- * When/if hugetlb[fs] subsystem initializes [sometime after this module],
- * it will register its per node attributes for all online nodes with
- * memory. It will also call register_hugetlbfs_with_node(), below, to
- * register its attribute registration functions with this node driver.
- * Once these hooks have been initialized, the node driver will call into
- * the hugetlb module to [un]register attributes for hot-plugged nodes.
- */
-static node_registration_func_t __hugetlb_register_node;
-static node_registration_func_t __hugetlb_unregister_node;
-
-static inline bool hugetlb_register_node(struct node *node)
-{
- if (__hugetlb_register_node &&
- node_state(node->dev.id, N_MEMORY)) {
- __hugetlb_register_node(node);
- return true;
- }
- return false;
-}
-
-static inline void hugetlb_unregister_node(struct node *node)
-{
- if (__hugetlb_unregister_node)
- __hugetlb_unregister_node(node);
-}
-
-void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister)
-{
- __hugetlb_register_node = doregister;
- __hugetlb_unregister_node = unregister;
-}
-#else
-static inline void hugetlb_register_node(struct node *node) {}
-
-static inline void hugetlb_unregister_node(struct node *node) {}
-#endif
-
static void node_device_release(struct device *dev)
{
- struct node *node = to_node(dev);
-
-#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS)
- /*
- * We schedule the work only when a memory section is
- * onlined/offlined on this node. When we come here,
- * all the memory on this node has been offlined,
- * so we won't enqueue new work to this work.
- *
- * The work is using node->node_work, so we should
- * flush work before freeing the memory.
- */
- flush_work(&node->node_work);
-#endif
- kfree(node);
+ kfree(to_node(dev));
}
/*
@@ -665,13 +611,13 @@ static int register_node(struct node *node, int num)
node->dev.groups = node_dev_groups;
error = device_register(&node->dev);
- if (error)
+ if (error) {
put_device(&node->dev);
- else {
+ } else {
hugetlb_register_node(node);
-
compaction_register_node(node);
}
+
return error;
}
@@ -684,8 +630,8 @@ static int register_node(struct node *node, int num)
*/
void unregister_node(struct node *node)
{
+ hugetlb_unregister_node(node);
compaction_unregister_node(node);
- hugetlb_unregister_node(node); /* no-op, if memoryless node */
node_remove_accesses(node);
node_remove_caches(node);
device_unregister(&node->dev);
@@ -907,74 +853,8 @@ void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
(void *)&nid, func);
return;
}
-
-#ifdef CONFIG_HUGETLBFS
-/*
- * Handle per node hstate attribute [un]registration on transistions
- * to/from memoryless state.
- */
-static void node_hugetlb_work(struct work_struct *work)
-{
- struct node *node = container_of(work, struct node, node_work);
-
- /*
- * We only get here when a node transitions to/from memoryless state.
- * We can detect which transition occurred by examining whether the
- * node has memory now. hugetlb_register_node() already check this
- * so we try to register the attributes. If that fails, then the
- * node has transitioned to memoryless, try to unregister the
- * attributes.
- */
- if (!hugetlb_register_node(node))
- hugetlb_unregister_node(node);
-}
-
-static void init_node_hugetlb_work(int nid)
-{
- INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
-}
-
-static int node_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- struct memory_notify *mnb = arg;
- int nid = mnb->status_change_nid;
-
- switch (action) {
- case MEM_ONLINE:
- case MEM_OFFLINE:
- /*
- * offload per node hstate [un]registration to a work thread
- * when transitioning to/from memoryless state.
- */
- if (nid != NUMA_NO_NODE)
- schedule_work(&node_devices[nid]->node_work);
- break;
-
- case MEM_GOING_ONLINE:
- case MEM_GOING_OFFLINE:
- case MEM_CANCEL_ONLINE:
- case MEM_CANCEL_OFFLINE:
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-#endif /* CONFIG_HUGETLBFS */
#endif /* CONFIG_MEMORY_HOTPLUG */
-#if !defined(CONFIG_MEMORY_HOTPLUG) || !defined(CONFIG_HUGETLBFS)
-static inline int node_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- return NOTIFY_OK;
-}
-
-static void init_node_hugetlb_work(int nid) { }
-
-#endif
-
int __register_one_node(int nid)
{
int error;
@@ -993,8 +873,6 @@ int __register_one_node(int nid)
}
INIT_LIST_HEAD(&node_devices[nid]->access_list);
- /* initialize work queue for memory hot plug */
- init_node_hugetlb_work(nid);
node_init_caches(nid);
return error;
@@ -1065,13 +943,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
NULL,
};
-#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
void __init node_dev_init(void)
{
- static struct notifier_block node_memory_callback_nb = {
- .notifier_call = node_memory_callback,
- .priority = NODE_CALLBACK_PRI,
- };
int ret, i;
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
@@ -1081,8 +954,6 @@ void __init node_dev_init(void)
if (ret)
panic("%s() failed to register subsystem: %d\n", __func__, ret);
- register_hotmemory_notifier(&node_memory_callback_nb);
-
/*
* Create all node devices, which will properly link the node
* to applicable memory block devices and already created cpu devices.
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 296ea673d661..12b044151298 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -138,6 +138,7 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
return domain;
}
+EXPORT_SYMBOL_GPL(platform_msi_create_irq_domain);
static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 55a10e6d4e2a..ead135c7044c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2085,8 +2085,10 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
/* Always-on domains must be powered on at initialization. */
if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
- !genpd_status_on(genpd))
+ !genpd_status_on(genpd)) {
+ pr_err("always-on PM domain %s is not on\n", genpd->name);
return -EINVAL;
+ }
/* Multiple states but no governor doesn't make sense. */
if (!gov && genpd->state_count > 1)
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index fac8ff983aec..65fb9bad1577 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -115,7 +115,7 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(gpio, &irqs, gc->ngpio)
- generic_handle_irq(irq_find_mapping(gc->irq.domain, gpio));
+ generic_handle_domain_irq_safe(gc->irq.domain, gpio);
bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
return IRQ_HANDLED;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 3f4739d52268..19da5defd734 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -130,7 +130,7 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
+static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap)
{
unsigned short segments = blk_rq_nr_discard_segments(req);
unsigned short n = 0;
@@ -240,6 +240,9 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
type = VIRTIO_BLK_T_WRITE_ZEROES;
unmap = !(req->cmd_flags & REQ_NOUNMAP);
break;
+ case REQ_OP_SECURE_ERASE:
+ type = VIRTIO_BLK_T_SECURE_ERASE;
+ break;
case REQ_OP_DRV_IN:
type = VIRTIO_BLK_T_GET_ID;
break;
@@ -251,8 +254,9 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
- if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
- if (virtblk_setup_discard_write_zeroes(req, unmap))
+ if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
+ type == VIRTIO_BLK_T_SECURE_ERASE) {
+ if (virtblk_setup_discard_write_zeroes_erase(req, unmap))
return BLK_STS_RESOURCE;
}
@@ -886,6 +890,8 @@ static int virtblk_probe(struct virtio_device *vdev)
int err, index;
u32 v, blk_size, max_size, sg_elems, opt_io_size;
+ u32 max_discard_segs = 0;
+ u32 discard_granularity = 0;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
@@ -1043,27 +1049,14 @@ static int virtblk_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
virtio_cread(vdev, struct virtio_blk_config,
- discard_sector_alignment, &v);
- if (v)
- q->limits.discard_granularity = v << SECTOR_SHIFT;
- else
- q->limits.discard_granularity = blk_size;
+ discard_sector_alignment, &discard_granularity);
virtio_cread(vdev, struct virtio_blk_config,
max_discard_sectors, &v);
blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
- &v);
-
- /*
- * max_discard_seg == 0 is out of spec but we always
- * handled it.
- */
- if (!v)
- v = sg_elems;
- blk_queue_max_discard_segments(q,
- min(v, MAX_DISCARD_SEGMENTS));
+ &max_discard_segs);
}
if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
@@ -1072,6 +1065,85 @@ static int virtblk_probe(struct virtio_device *vdev)
blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
}
+ /* The discard and secure erase limits are combined since the Linux
+ * block layer uses the same limit for both commands.
+ *
+ * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features
+ * are negotiated, we will use the minimum between the limits.
+ *
+ * discard sector alignment is set to the minimum between discard_sector_alignment
+ * and secure_erase_sector_alignment.
+ *
+ * max discard sectors is set to the minimum between max_discard_seg and
+ * max_secure_erase_seg.
+ */
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ secure_erase_sector_alignment, &v);
+
+ /* secure_erase_sector_alignment should not be zero, the device should set a
+ * valid number of sectors.
+ */
+ if (!v) {
+ dev_err(&vdev->dev,
+ "virtio_blk: secure_erase_sector_alignment can't be 0\n");
+ err = -EINVAL;
+ goto out_cleanup_disk;
+ }
+
+ discard_granularity = min_not_zero(discard_granularity, v);
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ max_secure_erase_sectors, &v);
+
+ /* max_secure_erase_sectors should not be zero, the device should set a
+ * valid number of sectors.
+ */
+ if (!v) {
+ dev_err(&vdev->dev,
+ "virtio_blk: max_secure_erase_sectors can't be 0\n");
+ err = -EINVAL;
+ goto out_cleanup_disk;
+ }
+
+ blk_queue_max_secure_erase_sectors(q, v);
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ max_secure_erase_seg, &v);
+
+ /* max_secure_erase_seg should not be zero, the device should set a
+ * valid number of segments
+ */
+ if (!v) {
+ dev_err(&vdev->dev,
+ "virtio_blk: max_secure_erase_seg can't be 0\n");
+ err = -EINVAL;
+ goto out_cleanup_disk;
+ }
+
+ max_discard_segs = min_not_zero(max_discard_segs, v);
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) ||
+ virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
+ /* max_discard_seg and discard_granularity will be 0 only
+ * if max_discard_seg and discard_sector_alignment fields in the virtio
+ * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated.
+ * In this case, we use default values.
+ */
+ if (!max_discard_segs)
+ max_discard_segs = sg_elems;
+
+ blk_queue_max_discard_segments(q,
+ min(max_discard_segs, MAX_DISCARD_SEGMENTS));
+
+ if (discard_granularity)
+ q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
+ else
+ q->limits.discard_granularity = blk_size;
+ }
+
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
@@ -1167,6 +1239,7 @@ static unsigned int features_legacy[] = {
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
+ VIRTIO_BLK_F_SECURE_ERASE,
}
;
static unsigned int features[] = {
@@ -1174,6 +1247,7 @@ static unsigned int features[] = {
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
+ VIRTIO_BLK_F_SECURE_ERASE,
};
static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e551433cd107..7c74d8cba44f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -329,8 +329,8 @@ static ssize_t idle_store(struct device *dev,
if (!sysfs_streq(buf, "all")) {
/*
- * If it did not parse as 'all' try to treat it as an integer when
- * we have memory tracking enabled.
+ * If it did not parse as 'all' try to treat it as an integer
+ * when we have memory tracking enabled.
*/
u64 age_sec;
@@ -345,7 +345,10 @@ static ssize_t idle_store(struct device *dev,
if (!init_done(zram))
goto out_unlock;
- /* A cutoff_time of 0 marks everything as idle, this is the "all" behavior */
+ /*
+ * A cutoff_time of 0 marks everything as idle, this is the
+ * "all" behavior.
+ */
mark_idle(zram, cutoff_time);
rv = len;
@@ -1410,9 +1413,19 @@ compress_again:
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
- if (!IS_ERR((void *)handle))
+ if (IS_ERR((void *)handle))
+ return PTR_ERR((void *)handle);
+
+ if (comp_len != PAGE_SIZE)
goto compress_again;
- return PTR_ERR((void *)handle);
+ /*
+ * If the page is not compressible, you need to acquire the
+ * lock and execute the code below. The zcomp_stream_get()
+ * call is needed to disable the cpu hotplug and grab the
+ * zstrm buffer back. It is necessary that the dereferencing
+ * of the zstrm variable below occurs correctly.
+ */
+ zstrm = zcomp_stream_get(zram->comp);
}
alloced_pages = zs_get_total_pages(zram->mem_pool);
@@ -1710,9 +1723,6 @@ out:
static void zram_reset_device(struct zram *zram)
{
- struct zcomp *comp;
- u64 disksize;
-
down_write(&zram->init_lock);
zram->limit_pages = 0;
@@ -1722,17 +1732,15 @@ static void zram_reset_device(struct zram *zram)
return;
}
- comp = zram->comp;
- disksize = zram->disksize;
- zram->disksize = 0;
-
set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0, 0);
/* I/O operation under all of CPU are done so let's free */
- zram_meta_free(zram, disksize);
+ zram_meta_free(zram, zram->disksize);
+ zram->disksize = 0;
memset(&zram->stats, 0, sizeof(zram->stats));
- zcomp_destroy(comp);
+ zcomp_destroy(zram->comp);
+ zram->comp = NULL;
reset_bdev(zram);
up_write(&zram->init_lock);
@@ -2126,6 +2134,8 @@ static int __init zram_init(void)
{
int ret;
+ BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG);
+
ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
zcomp_cpu_up_prepare, zcomp_cpu_dead);
if (ret < 0)
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 80c3b43b4828..a2bda53020fd 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -30,16 +30,15 @@
/*
- * The lower ZRAM_FLAG_SHIFT bits of table.flags is for
- * object size (excluding header), the higher bits is for
- * zram_pageflags.
+ * ZRAM is mainly used for memory efficiency so we want to keep memory
+ * footprint small and thus squeeze size and zram pageflags into a flags
+ * member. The lower ZRAM_FLAG_SHIFT bits is for object size (excluding
+ * header), which cannot be larger than PAGE_SIZE (requiring PAGE_SHIFT
+ * bits), the higher bits are for zram_pageflags.
*
- * zram is mainly used for memory efficiency so we want to keep memory
- * footprint small so we can squeeze size and flags into a field.
- * The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header),
- * the higher bits is for zram_pageflags.
+ * We use BUILD_BUG_ON() to make sure that zram pageflags don't overflow.
*/
-#define ZRAM_FLAG_SHIFT 24
+#define ZRAM_FLAG_SHIFT (PAGE_SHIFT + 1)
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
index b24ac39a903b..e34c3ea692b6 100644
--- a/drivers/char/hw_random/arm_smccc_trng.c
+++ b/drivers/char/hw_random/arm_smccc_trng.c
@@ -71,8 +71,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
MAX_BITS_PER_CALL);
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
- if ((int)res.a0 < 0)
- return (int)res.a0;
switch ((int)res.a0) {
case SMCCC_RET_SUCCESS:
@@ -88,6 +86,8 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
return copied;
cond_resched();
break;
+ default:
+ return -EIO;
}
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 16f227b995e8..cc002b0c2f0c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(default_quality,
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
-static void hwrng_manage_rngd(struct hwrng *rng);
+static int hwrng_fillfn(void *unused);
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait);
@@ -96,6 +96,15 @@ static int set_current_rng(struct hwrng *rng)
drop_current_rng();
current_rng = rng;
+ /* if necessary, start hwrng thread */
+ if (!hwrng_fill) {
+ hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+ if (IS_ERR(hwrng_fill)) {
+ pr_err("hwrng_fill thread creation failed\n");
+ hwrng_fill = NULL;
+ }
+ }
+
return 0;
}
@@ -167,8 +176,6 @@ skip_init:
rng->quality = 1024;
current_quality = rng->quality; /* obsolete */
- hwrng_manage_rngd(rng);
-
return 0;
}
@@ -454,10 +461,6 @@ static ssize_t rng_quality_store(struct device *dev,
/* the best available RNG may have changed */
ret = enable_best_rng();
- /* start/stop rngd if necessary */
- if (current_rng)
- hwrng_manage_rngd(current_rng);
-
out:
mutex_unlock(&rng_mutex);
return ret ? ret : len;
@@ -507,16 +510,14 @@ static int hwrng_fillfn(void *unused)
rng->quality = current_quality; /* obsolete */
quality = rng->quality;
mutex_unlock(&reading_mutex);
- put_rng(rng);
- if (!quality)
- break;
+ if (rc <= 0)
+ hwrng_msleep(rng, 10000);
- if (rc <= 0) {
- pr_warn("hwrng: no data available\n");
- msleep_interruptible(10000);
+ put_rng(rng);
+
+ if (rc <= 0)
continue;
- }
/* If we cannot credit at least one bit of entropy,
* keep track of the remainder for the next iteration
@@ -533,22 +534,6 @@ static int hwrng_fillfn(void *unused)
return 0;
}
-static void hwrng_manage_rngd(struct hwrng *rng)
-{
- if (WARN_ON(!mutex_is_locked(&rng_mutex)))
- return;
-
- if (rng->quality == 0 && hwrng_fill)
- kthread_stop(hwrng_fill);
- if (rng->quality > 0 && !hwrng_fill) {
- hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (IS_ERR(hwrng_fill)) {
- pr_err("hwrng_fill thread creation failed\n");
- hwrng_fill = NULL;
- }
- }
-}
-
int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
@@ -570,6 +555,7 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
+ init_completion(&rng->dying);
if (!current_rng ||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
@@ -617,6 +603,7 @@ void hwrng_unregister(struct hwrng *rng)
old_rng = current_rng;
list_del(&rng->list);
+ complete_all(&rng->dying);
if (current_rng == rng) {
err = enable_best_rng();
if (err) {
@@ -685,6 +672,14 @@ void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
}
EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
+long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+{
+ unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+ return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
+}
+EXPORT_SYMBOL_GPL(hwrng_msleep);
+
static int __init hwrng_modinit(void)
{
int ret;
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index b05d676ca814..a1c24148ed31 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -245,7 +245,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (IS_ERR(rngc->base))
return PTR_ERR(rngc->base);
- rngc->clk = devm_clk_get(&pdev->dev, NULL);
+ rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(rngc->clk)) {
dev_err(&pdev->dev, "Can not get rng_clk\n");
return PTR_ERR(rngc->clk);
@@ -255,27 +255,14 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = clk_prepare_enable(rngc->clk);
- if (ret)
- return ret;
-
ver_id = readl(rngc->base + RNGC_VER_ID);
rng_type = ver_id >> RNGC_TYPE_SHIFT;
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
*/
- if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
- ret = -ENODEV;
- goto err;
- }
-
- ret = devm_request_irq(&pdev->dev,
- irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
- if (ret) {
- dev_err(rngc->dev, "Can't get interrupt working.\n");
- goto err;
- }
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
+ return -ENODEV;
init_completion(&rngc->rng_op_done);
@@ -290,18 +277,25 @@ static int imx_rngc_probe(struct platform_device *pdev)
imx_rngc_irq_mask_clear(rngc);
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret) {
+ dev_err(rngc->dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {
dev_err(rngc->dev, "self test failed\n");
- goto err;
+ return ret;
}
}
- ret = hwrng_register(&rngc->rng);
+ ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
if (ret) {
dev_err(&pdev->dev, "hwrng registration failed\n");
- goto err;
+ return ret;
}
dev_info(&pdev->dev,
@@ -309,22 +303,6 @@ static int imx_rngc_probe(struct platform_device *pdev)
rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
(ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
return 0;
-
-err:
- clk_disable_unprepare(rngc->clk);
-
- return ret;
-}
-
-static int __exit imx_rngc_remove(struct platform_device *pdev)
-{
- struct imx_rngc *rngc = platform_get_drvdata(pdev);
-
- hwrng_unregister(&rngc->rng);
-
- clk_disable_unprepare(rngc->clk);
-
- return 0;
}
static int __maybe_unused imx_rngc_suspend(struct device *dev)
@@ -355,11 +333,10 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
- .name = "imx_rngc",
+ .name = KBUILD_MODNAME,
.pm = &imx_rngc_pm_ops,
.of_match_table = imx_rngc_dt_ids,
},
- .remove = __exit_p(imx_rngc_remove),
};
module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index b061e6b513ed..39565cf74b2c 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -119,13 +119,13 @@ config ASPEED_KCS_IPMI_BMC
provides the access of KCS IO space for BMC side.
config NPCM7XX_KCS_IPMI_BMC
- depends on ARCH_NPCM7XX || COMPILE_TEST
+ depends on ARCH_NPCM || COMPILE_TEST
select IPMI_KCS_BMC
select REGMAP_MMIO
- tristate "NPCM7xx KCS IPMI BMC driver"
+ tristate "NPCM KCS IPMI BMC driver"
help
Provides a driver for the KCS (Keyboard Controller Style) IPMI
- interface found on Nuvoton NPCM7xx SOCs.
+ interface found on Nuvoton NPCM SOCs.
The driver implements the BMC side of the KCS contorller, it
provides the access of KCS IO space for BMC side.
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 25c010c9ec25..7c1aee5e11b7 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -218,8 +218,8 @@ static void ipmi_ipmb_send_response(struct ipmi_ipmb_dev *iidev,
{
if ((msg->data[0] >> 2) & 1) {
/*
- * It's a response being sent, we needto return a
- * response response. Fake a send msg command
+ * It's a response being sent, we need to return a
+ * response to the response. Fake a send msg command
* response with channel 0. This will always be ipmb
* direct.
*/
@@ -424,10 +424,8 @@ static void ipmi_ipmb_request_events(void *send_info)
/* We don't fetch events here. */
}
-static void ipmi_ipmb_remove(struct i2c_client *client)
+static void ipmi_ipmb_cleanup(struct ipmi_ipmb_dev *iidev)
{
- struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
-
if (iidev->slave) {
i2c_slave_unregister(iidev->slave);
if (iidev->slave != iidev->client)
@@ -436,7 +434,13 @@ static void ipmi_ipmb_remove(struct i2c_client *client)
iidev->slave = NULL;
iidev->client = NULL;
ipmi_ipmb_stop_thread(iidev);
+}
+
+static void ipmi_ipmb_remove(struct i2c_client *client)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+ ipmi_ipmb_cleanup(iidev);
ipmi_unregister_smi(iidev->intf);
}
@@ -542,7 +546,7 @@ static int ipmi_ipmb_probe(struct i2c_client *client)
out_err:
if (slave && slave != client)
i2c_unregister_device(slave);
- ipmi_ipmb_remove(client);
+ ipmi_ipmb_cleanup(iidev);
return rv;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 703433493c85..49a1707693c9 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -736,12 +736,6 @@ static void intf_free(struct kref *ref)
kfree(intf);
}
-struct watcher_entry {
- int intf_num;
- struct ipmi_smi *intf;
- struct list_head link;
-};
-
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
@@ -4357,7 +4351,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
/*
* The message starts at byte 4 which follows the
- * the Channel Byte in the "GET MESSAGE" command
+ * Channel Byte in the "GET MESSAGE" command
*/
recv_msg->msg.data_len = msg->rsp_size - 4;
memcpy(recv_msg->msg_data, &msg->rsp[4],
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 13da021e7c6b..e1072809fe31 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -2098,7 +2098,7 @@ static struct platform_driver ipmi_driver = {
.id_table = ssif_plat_ids
};
-static int init_ipmi_ssif(void)
+static int __init init_ipmi_ssif(void)
{
int i;
int rv;
@@ -2140,7 +2140,7 @@ static int init_ipmi_ssif(void)
}
module_init(init_ipmi_ssif);
-static void cleanup_ipmi_ssif(void)
+static void __exit cleanup_ipmi_ssif(void)
{
if (!initialized)
return;
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index cdc88cde1e9a..19c32bf50e0e 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -207,17 +207,24 @@ static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask,
}
/*
- * AST_usrGuide_KCS.pdf
- * 2. Background:
- * we note D for Data, and C for Cmd/Status, default rules are
- * A. KCS1 / KCS2 ( D / C:X / X+4 )
- * D / C : CA0h / CA4h
- * D / C : CA8h / CACh
- * B. KCS3 ( D / C:XX2h / XX3h )
- * D / C : CA2h / CA3h
- * D / C : CB2h / CB3h
- * C. KCS4
- * D / C : CA4h / CA5h
+ * We note D for Data, and C for Cmd/Status, default rules are
+ *
+ * 1. Only the D address is given:
+ * A. KCS1/KCS2 (D/C: X/X+4)
+ * D/C: CA0h/CA4h
+ * D/C: CA8h/CACh
+ * B. KCS3 (D/C: XX2/XX3h)
+ * D/C: CA2h/CA3h
+ * C. KCS4 (D/C: X/X+1)
+ * D/C: CA4h/CA5h
+ *
+ * 2. Both the D/C addresses are given:
+ * A. KCS1/KCS2/KCS4 (D/C: X/Y)
+ * D/C: CA0h/CA1h
+ * D/C: CA8h/CA9h
+ * D/C: CA4h/CA5h
+ * B. KCS3 (D/C: XX2/XX3h)
+ * D/C: CA2h/CA3h
*/
static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs)
{
diff --git a/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
index 486834a962c3..cf670e891966 100644
--- a/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
+++ b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
@@ -548,7 +548,7 @@ static struct kcs_bmc_driver kcs_bmc_ipmi_driver = {
.ops = &kcs_bmc_ipmi_driver_ops,
};
-static int kcs_bmc_ipmi_init(void)
+static int __init kcs_bmc_ipmi_init(void)
{
kcs_bmc_register_driver(&kcs_bmc_ipmi_driver);
@@ -556,7 +556,7 @@ static int kcs_bmc_ipmi_init(void)
}
module_init(kcs_bmc_ipmi_init);
-static void kcs_bmc_ipmi_exit(void)
+static void __exit kcs_bmc_ipmi_exit(void)
{
kcs_bmc_unregister_driver(&kcs_bmc_ipmi_driver);
}
diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c
index 7e2067628a6c..1793358be782 100644
--- a/drivers/char/ipmi/kcs_bmc_serio.c
+++ b/drivers/char/ipmi/kcs_bmc_serio.c
@@ -140,7 +140,7 @@ static struct kcs_bmc_driver kcs_bmc_serio_driver = {
.ops = &kcs_bmc_serio_driver_ops,
};
-static int kcs_bmc_serio_init(void)
+static int __init kcs_bmc_serio_init(void)
{
kcs_bmc_register_driver(&kcs_bmc_serio_driver);
@@ -148,7 +148,7 @@ static int kcs_bmc_serio_init(void)
}
module_init(kcs_bmc_serio_init);
-static void kcs_bmc_serio_exit(void)
+static void __exit kcs_bmc_serio_exit(void)
{
kcs_bmc_unregister_driver(&kcs_bmc_serio_driver);
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 32a932a065a6..5611d127363e 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -712,8 +712,8 @@ static const struct memdev {
#endif
[5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT },
[7] = { "full", 0666, &full_fops, 0 },
- [8] = { "random", 0666, &random_fops, 0 },
- [9] = { "urandom", 0666, &urandom_fops, 0 },
+ [8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
+ [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
#ifdef CONFIG_PRINTK
[11] = { "kmsg", 0644, &kmsg_fops, 0 },
#endif
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 79d7d4e4e582..01acf235f263 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -96,8 +96,8 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
/*
* Returns whether or not the input pool has been seeded and thus guaranteed
* to supply cryptographically secure random numbers. This applies to: the
- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
- * ,u64,int,long} family of functions.
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
+ * u16,u32,u64,int,long} family of functions.
*
* Returns: true if the input pool has been seeded.
* false if the input pool has not been seeded.
@@ -119,9 +119,9 @@ static void try_to_generate_entropy(void);
/*
* Wait for the input pool to be seeded and thus guaranteed to supply
* cryptographically secure random numbers. This applies to: the /dev/urandom
- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
- * family of functions. Using any of these functions without first calling
- * this function forfeits the guarantee of security.
+ * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
+ * int,long} family of functions. Using any of these functions without first
+ * calling this function forfeits the guarantee of security.
*
* Returns: 0 if the input pool has been seeded.
* -ERESTARTSYS if the function was interrupted by a signal.
@@ -157,6 +157,8 @@ EXPORT_SYMBOL(wait_for_random_bytes);
* There are a few exported interfaces for use by other drivers:
*
* void get_random_bytes(void *buf, size_t len)
+ * u8 get_random_u8()
+ * u16 get_random_u16()
* u32 get_random_u32()
* u64 get_random_u64()
* unsigned int get_random_int()
@@ -164,10 +166,10 @@ EXPORT_SYMBOL(wait_for_random_bytes);
*
* These interfaces will return the requested number of random bytes
* into the given buffer or as a return value. This is equivalent to
- * a read from /dev/urandom. The u32, u64, int, and long family of
- * functions may be higher performance for one-off random integers,
- * because they do a bit of buffering and do not invoke reseeding
- * until the buffer is emptied.
+ * a read from /dev/urandom. The u8, u16, u32, u64, int, and long
+ * family of functions may be higher performance for one-off random
+ * integers, because they do a bit of buffering and do not invoke
+ * reseeding until the buffer is emptied.
*
*********************************************************************/
@@ -260,25 +262,23 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
}
/*
- * Return whether the crng seed is considered to be sufficiently old
- * that a reseeding is needed. This happens if the last reseeding
- * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
+ * Return the interval until the next reseeding, which is normally
+ * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
* proportional to the uptime.
*/
-static bool crng_has_old_seed(void)
+static unsigned int crng_reseed_interval(void)
{
static bool early_boot = true;
- unsigned long interval = CRNG_RESEED_INTERVAL;
if (unlikely(READ_ONCE(early_boot))) {
time64_t uptime = ktime_get_seconds();
if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
WRITE_ONCE(early_boot, false);
else
- interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
- (unsigned int)uptime / 2 * HZ);
+ return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
+ (unsigned int)uptime / 2 * HZ);
}
- return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
+ return CRNG_RESEED_INTERVAL;
}
/*
@@ -320,7 +320,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
* If the base_crng is old enough, we reseed, which in turn bumps the
* generation counter that we check below.
*/
- if (unlikely(crng_has_old_seed()))
+ if (unlikely(time_is_before_jiffies(READ_ONCE(base_crng.birth) + crng_reseed_interval())))
crng_reseed();
local_lock_irqsave(&crngs.lock, flags);
@@ -384,11 +384,11 @@ static void _get_random_bytes(void *buf, size_t len)
}
/*
- * This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for key generation, seeding
- * TCP sequence numbers, etc. In order to ensure that the randomness
- * by this function is okay, the function wait_for_random_bytes()
- * should be called and return 0 at least once at any point prior.
+ * This function is the exported kernel interface. It returns some number of
+ * good random numbers, suitable for key generation, seeding TCP sequence
+ * numbers, etc. In order to ensure that the randomness returned by this
+ * function is okay, the function wait_for_random_bytes() should be called and
+ * return 0 at least once at any point prior.
*/
void get_random_bytes(void *buf, size_t len)
{
@@ -506,8 +506,10 @@ type get_random_ ##type(void) \
} \
EXPORT_SYMBOL(get_random_ ##type);
-DEFINE_BATCHED_ENTROPY(u64)
+DEFINE_BATCHED_ENTROPY(u8)
+DEFINE_BATCHED_ENTROPY(u16)
DEFINE_BATCHED_ENTROPY(u32)
+DEFINE_BATCHED_ENTROPY(u64)
#ifdef CONFIG_SMP
/*
@@ -522,6 +524,8 @@ int __cold random_prepare_cpu(unsigned int cpu)
* randomness.
*/
per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+ per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
return 0;
@@ -774,18 +778,13 @@ static int random_pm_notification(struct notifier_block *nb, unsigned long actio
static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
/*
- * The first collection of entropy occurs at system boot while interrupts
- * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
- * utsname(), and the command line. Depending on the above configuration knob,
- * RDSEED may be considered sufficient for initialization. Note that much
- * earlier setup may already have pushed entropy into the input pool by the
- * time we get here.
+ * This is called extremely early, before time keeping functionality is
+ * available, but arch randomness is. Interrupts are not yet enabled.
*/
-int __init random_init(const char *command_line)
+void __init random_init_early(const char *command_line)
{
- ktime_t now = ktime_get_real();
- size_t i, longs, arch_bits;
unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
+ size_t i, longs, arch_bits;
#if defined(LATENT_ENTROPY_PLUGIN)
static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
@@ -805,34 +804,49 @@ int __init random_init(const char *command_line)
i += longs;
continue;
}
- entropy[0] = random_get_entropy();
- _mix_pool_bytes(entropy, sizeof(*entropy));
arch_bits -= sizeof(*entropy) * 8;
++i;
}
- _mix_pool_bytes(&now, sizeof(now));
- _mix_pool_bytes(utsname(), sizeof(*(utsname())));
+
+ _mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
_mix_pool_bytes(command_line, strlen(command_line));
+
+ /* Reseed if already seeded by earlier phases. */
+ if (crng_ready())
+ crng_reseed();
+ else if (trust_cpu)
+ _credit_init_bits(arch_bits);
+}
+
+/*
+ * This is called a little bit after the prior function, and now there is
+ * access to timestamps counters. Interrupts are not yet enabled.
+ */
+void __init random_init(void)
+{
+ unsigned long entropy = random_get_entropy();
+ ktime_t now = ktime_get_real();
+
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(&entropy, sizeof(entropy));
add_latent_entropy();
/*
- * If we were initialized by the bootloader before jump labels are
- * initialized, then we should enable the static branch here, where
+ * If we were initialized by the cpu or bootloader before jump labels
+ * are initialized, then we should enable the static branch here, where
* it's guaranteed that jump labels have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
+ /* Reseed if already seeded by earlier phases. */
if (crng_ready())
crng_reseed();
- else if (trust_cpu)
- _credit_init_bits(arch_bits);
WARN_ON(register_pm_notifier(&pm_notifier));
- WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG "
- "entropy collection will consequently suffer.");
- return 0;
+ WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
+ "entropy collection will consequently suffer.");
}
/*
@@ -866,11 +880,11 @@ void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
credit_init_bits(entropy);
/*
- * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
- * we're not yet initialized.
+ * Throttle writing to once every reseed interval, unless we're not yet
+ * initialized or no entropy is credited.
*/
- if (!kthread_should_stop() && crng_ready())
- schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
+ if (!kthread_should_stop() && (crng_ready() || !entropy))
+ schedule_timeout_interruptible(crng_reseed_interval());
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
@@ -920,20 +934,23 @@ EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
#endif
struct fast_pool {
- struct work_struct mix;
unsigned long pool[4];
unsigned long last;
unsigned int count;
+ struct timer_list mix;
};
+static void mix_interrupt_randomness(struct timer_list *work);
+
static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
#ifdef CONFIG_64BIT
#define FASTMIX_PERM SIPHASH_PERMUTATION
- .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
+ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
#else
#define FASTMIX_PERM HSIPHASH_PERMUTATION
- .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
+ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
#endif
+ .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
};
/*
@@ -975,7 +992,7 @@ int __cold random_online_cpu(unsigned int cpu)
}
#endif
-static void mix_interrupt_randomness(struct work_struct *work)
+static void mix_interrupt_randomness(struct timer_list *work)
{
struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
/*
@@ -1006,7 +1023,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
local_irq_enable();
mix_pool_bytes(pool, sizeof(pool));
- credit_init_bits(max(1u, (count & U16_MAX) / 64));
+ credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
memzero_explicit(pool, sizeof(pool));
}
@@ -1029,10 +1046,11 @@ void add_interrupt_randomness(int irq)
if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
return;
- if (unlikely(!fast_pool->mix.func))
- INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
fast_pool->count |= MIX_INFLIGHT;
- queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
+ if (!timer_pending(&fast_pool->mix)) {
+ fast_pool->mix.expires = jiffies;
+ add_timer_on(&fast_pool->mix, raw_smp_processor_id());
+ }
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
@@ -1191,7 +1209,7 @@ static void __cold entropy_timer(struct timer_list *timer)
*/
static void __cold try_to_generate_entropy(void)
{
- enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 30 };
+ enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
struct entropy_timer_state stack;
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
@@ -1210,7 +1228,7 @@ static void __cold try_to_generate_entropy(void)
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
while (!crng_ready() && !signal_pending(current)) {
if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies + 1);
+ mod_timer(&stack.timer, jiffies);
mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
schedule();
stack.entropy = random_get_entropy();
@@ -1347,6 +1365,11 @@ static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
int ret;
+ if (!crng_ready() &&
+ ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
+ (kiocb->ki_filp->f_flags & O_NONBLOCK)))
+ return -EAGAIN;
+
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 40018a73b3cb..bc7b1b4501b3 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -380,7 +380,7 @@ void tpm_add_ppi(struct tpm_chip *chip)
TPM_PPI_FN_VERSION,
NULL, ACPI_TYPE_STRING);
if (obj) {
- strlcpy(chip->ppi_version, obj->string.pointer,
+ strscpy(chip->ppi_version, obj->string.pointer,
sizeof(chip->ppi_version));
ACPI_FREE(obj);
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4f2bb7315b67..4469e7f555e9 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -434,7 +434,7 @@ config ATMEL_TCB_CLKSRC
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64
- depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on ARCH_ARTPEC || ARCH_EXYNOS || COMPILE_TEST
help
Support for Multi Core Timer controller on Exynos SoCs.
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 9ab8221ee3c6..a7ff77550e17 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -44,8 +44,8 @@
#define CNTACR_RWVT BIT(4)
#define CNTACR_RWPT BIT(5)
-#define CNTVCT_LO 0x00
-#define CNTPCT_LO 0x08
+#define CNTPCT_LO 0x00
+#define CNTVCT_LO 0x08
#define CNTFRQ 0x10
#define CNTP_CVAL_LO 0x20
#define CNTP_CTL 0x2c
@@ -473,6 +473,8 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.desc = "ARM erratum 858921",
.read_cntpct_el0 = arm64_858921_read_cntpct_el0,
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index f29c812b70c9..bfd60093ee1c 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -33,7 +33,7 @@
#define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
#define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
#define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
-#define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x))
+#define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * (x)))
#define EXYNOS4_MCT_L_MASK (0xffffff00)
#define MCT_L_TCNTB_OFFSET (0x00)
@@ -66,6 +66,8 @@
#define MCT_L0_IRQ 4
/* Max number of IRQ as per DT binding document */
#define MCT_NR_IRQS 20
+/* Max number of local timers */
+#define MCT_NR_LOCAL (MCT_NR_IRQS - MCT_L0_IRQ)
enum {
MCT_INT_SPI,
@@ -233,9 +235,16 @@ static cycles_t exynos4_read_current_timer(void)
}
#endif
-static int __init exynos4_clocksource_init(void)
+static int __init exynos4_clocksource_init(bool frc_shared)
{
- exynos4_mct_frc_start();
+ /*
+ * When the frc is shared, the main processer should have already
+ * turned it on and we shouldn't be writing to TCON.
+ */
+ if (frc_shared)
+ mct_frc.resume = NULL;
+ else
+ exynos4_mct_frc_start();
#if defined(CONFIG_ARM)
exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
@@ -449,7 +458,6 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
- mevt->base = EXYNOS4_MCT_L_BASE(cpu);
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
evt->name = mevt->name;
@@ -520,8 +528,17 @@ static int __init exynos4_timer_resources(struct device_node *np)
return 0;
}
+/**
+ * exynos4_timer_interrupts - initialize MCT interrupts
+ * @np: device node for MCT
+ * @int_type: interrupt type, MCT_INT_PPI or MCT_INT_SPI
+ * @local_idx: array mapping CPU numbers to local timer indices
+ * @nr_local: size of @local_idx array
+ */
static int __init exynos4_timer_interrupts(struct device_node *np,
- unsigned int int_type)
+ unsigned int int_type,
+ const u32 *local_idx,
+ size_t nr_local)
{
int nr_irqs, i, err, cpu;
@@ -554,13 +571,21 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
} else {
for_each_possible_cpu(cpu) {
int mct_irq;
+ unsigned int irq_idx;
struct mct_clock_event_device *pcpu_mevt =
per_cpu_ptr(&percpu_mct_tick, cpu);
+ if (cpu >= nr_local) {
+ err = -EINVAL;
+ goto out_irq;
+ }
+
+ irq_idx = MCT_L0_IRQ + local_idx[cpu];
+
pcpu_mevt->evt.irq = -1;
- if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs))
+ if (irq_idx >= ARRAY_SIZE(mct_irqs))
break;
- mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
+ mct_irq = mct_irqs[irq_idx];
irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
if (request_irq(mct_irq,
@@ -576,6 +601,17 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
}
}
+ for_each_possible_cpu(cpu) {
+ struct mct_clock_event_device *mevt = per_cpu_ptr(&percpu_mct_tick, cpu);
+
+ if (cpu >= nr_local) {
+ err = -EINVAL;
+ goto out_irq;
+ }
+
+ mevt->base = EXYNOS4_MCT_L_BASE(local_idx[cpu]);
+ }
+
/* Install hotplug callbacks which configure the timer on this CPU */
err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
"clockevents/exynos4/mct_timer:starting",
@@ -605,20 +641,49 @@ out_irq:
static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
+ bool frc_shared = of_property_read_bool(np, "samsung,frc-shared");
+ u32 local_idx[MCT_NR_LOCAL] = {0};
+ int nr_local;
int ret;
+ nr_local = of_property_count_u32_elems(np, "samsung,local-timers");
+ if (nr_local == 0)
+ return -EINVAL;
+ if (nr_local > 0) {
+ if (nr_local > ARRAY_SIZE(local_idx))
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(np, "samsung,local-timers",
+ local_idx, nr_local);
+ if (ret)
+ return ret;
+ } else {
+ int i;
+
+ nr_local = ARRAY_SIZE(local_idx);
+ for (i = 0; i < nr_local; i++)
+ local_idx[i] = i;
+ }
+
ret = exynos4_timer_resources(np);
if (ret)
return ret;
- ret = exynos4_timer_interrupts(np, int_type);
+ ret = exynos4_timer_interrupts(np, int_type, local_idx, nr_local);
if (ret)
return ret;
- ret = exynos4_clocksource_init();
+ ret = exynos4_clocksource_init(frc_shared);
if (ret)
return ret;
+ /*
+ * When the FRC is shared with a main processor, this secondary
+ * processor cannot use the global comparator.
+ */
+ if (frc_shared)
+ return ret;
+
return exynos4_clockevent_init();
}
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 21d1392637b8..8da972dc1713 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -224,7 +224,7 @@ err_free:
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
-#ifdef CONFIG_ARCH_R9A07G044
+#ifdef CONFIG_ARCH_RZG2L
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/clocksource/timer-gxp.c b/drivers/clocksource/timer-gxp.c
index 8b38b3212388..fe4fa8d7b3f1 100644
--- a/drivers/clocksource/timer-gxp.c
+++ b/drivers/clocksource/timer-gxp.c
@@ -171,6 +171,7 @@ static int gxp_timer_probe(struct platform_device *pdev)
{
struct platform_device *gxp_watchdog_device;
struct device *dev = &pdev->dev;
+ int ret;
if (!gxp_timer) {
pr_err("Gxp Timer not initialized, cannot create watchdog");
@@ -187,7 +188,11 @@ static int gxp_timer_probe(struct platform_device *pdev)
gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
gxp_watchdog_device->dev.parent = dev;
- return platform_device_add(gxp_watchdog_device);
+ ret = platform_device_add(gxp_watchdog_device);
+ if (ret)
+ platform_device_put(gxp_watchdog_device);
+
+ return ret;
}
static const struct of_device_id gxp_timer_of_match[] = {
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 523e37662a6e..5a7a951c4efc 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -134,8 +134,10 @@ static int __init sysctr_timer_init(struct device_node *np)
if (ret)
return ret;
- /* system counter clock is divided by 3 internally */
- to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+ if (!of_property_read_bool(np, "nxp,no-divider")) {
+ /* system counter clock is divided by 3 internally */
+ to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+ }
sys_ctr_base = timer_of_base(&to_sysctr);
cmpcr = readl(sys_ctr_base + CMPCR);
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index 94dc6e42e983..e5a70aa1deb4 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -26,6 +26,7 @@
#define TIMER_IRQ_EN_REG 0x00
#define TIMER_IRQ_EN(val) BIT(val)
#define TIMER_IRQ_ST_REG 0x04
+#define TIMER_IRQ_CLEAR(val) BIT(val)
#define TIMER_CTL_REG(val) (0x10 * val + 0x10)
#define TIMER_CTL_ENABLE BIT(0)
#define TIMER_CTL_RELOAD BIT(1)
@@ -123,7 +124,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,
static void sun4i_timer_clear_interrupt(void __iomem *base)
{
- writel(TIMER_IRQ_EN(0), base + TIMER_IRQ_ST_REG);
+ writel(TIMER_IRQ_CLEAR(0), base + TIMER_IRQ_ST_REG);
}
static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 469f7c91564b..cad29ded3a48 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -33,6 +33,116 @@
#include <clocksource/timer-ti-dm.h>
+/*
+ * timer errata flags
+ *
+ * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
+ * errata prevents us from using posted mode on these devices, unless the
+ * timer counter register is never read. For more details please refer to
+ * the OMAP3/4/5 errata documents.
+ */
+#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
+
+/* posted mode types */
+#define OMAP_TIMER_NONPOSTED 0x00
+#define OMAP_TIMER_POSTED 0x01
+
+/* register offsets with the write pending bit encoded */
+#define WPSHIFT 16
+
+#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
+ | (WP_TCLR << WPSHIFT))
+
+#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
+ | (WP_TCRR << WPSHIFT))
+
+#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
+ | (WP_TLDR << WPSHIFT))
+
+#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
+ | (WP_TTGR << WPSHIFT))
+
+#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
+ | (WP_TMAR << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
+ | (WP_TPIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
+ | (WP_TNIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
+ | (WP_TCVR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
+
+struct timer_regs {
+ u32 ocp_cfg;
+ u32 tidr;
+ u32 tier;
+ u32 twer;
+ u32 tclr;
+ u32 tcrr;
+ u32 tldr;
+ u32 ttrg;
+ u32 twps;
+ u32 tmar;
+ u32 tcar1;
+ u32 tsicr;
+ u32 tcar2;
+ u32 tpir;
+ u32 tnir;
+ u32 tcvr;
+ u32 tocr;
+ u32 towr;
+};
+
+struct dmtimer {
+ struct omap_dm_timer cookie;
+ int id;
+ int irq;
+ struct clk *fclk;
+
+ void __iomem *io_base;
+ int irq_stat; /* TISR/IRQSTATUS interrupt status */
+ int irq_ena; /* irq enable */
+ int irq_dis; /* irq disable, only on v2 ip */
+ void __iomem *pend; /* write pending */
+ void __iomem *func_base; /* function register base */
+
+ atomic_t enabled;
+ unsigned long rate;
+ unsigned reserved:1;
+ unsigned posted:1;
+ unsigned omap1:1;
+ struct timer_regs context;
+ int revision;
+ u32 capability;
+ u32 errata;
+ struct platform_device *pdev;
+ struct list_head node;
+ struct notifier_block nb;
+};
+
static u32 omap_reserved_systimers;
static LIST_HEAD(omap_timer_list);
static DEFINE_SPINLOCK(dm_timer_lock);
@@ -44,27 +154,56 @@ enum {
REQUEST_BY_NODE,
};
-static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg,
- int posted)
+/**
+ * dmtimer_read - read timer registers in posted and non-posted mode
+ * @timer: timer pointer over which read operation to perform
+ * @reg: lowest byte holds the register offset
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode, write
+ * pending bit must be checked. Otherwise a read of a non completed write
+ * will produce an error.
+ */
+static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ u16 wp, offset;
+
+ wp = reg >> WPSHIFT;
+ offset = reg & 0xff;
+
+ /* Wait for a possible write pending bit in posted mode */
+ if (wp && timer->posted)
+ while (readl_relaxed(timer->pend) & wp)
cpu_relax();
- return readl_relaxed(timer->func_base + (reg & 0xff));
+ return readl_relaxed(timer->func_base + offset);
}
-static inline void __omap_dm_timer_write(struct omap_dm_timer *timer,
- u32 reg, u32 val, int posted)
+/**
+ * dmtimer_write - write timer registers in posted and non-posted mode
+ * @timer: timer pointer over which write operation is to perform
+ * @reg: lowest byte holds the register offset
+ * @value: data to write into the register
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode, the write
+ * pending bit must be checked. Otherwise a write on a register which has a
+ * pending write will be lost.
+ */
+static inline void dmtimer_write(struct dmtimer *timer, u32 reg, u32 val)
{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ u16 wp, offset;
+
+ wp = reg >> WPSHIFT;
+ offset = reg & 0xff;
+
+ /* Wait for a possible write pending bit in posted mode */
+ if (wp && timer->posted)
+ while (readl_relaxed(timer->pend) & wp)
cpu_relax();
- writel_relaxed(val, timer->func_base + (reg & 0xff));
+ writel_relaxed(val, timer->func_base + offset);
}
-static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
+static inline void __omap_dm_timer_init_regs(struct dmtimer *timer)
{
u32 tidr;
@@ -72,16 +211,16 @@ static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
tidr = readl_relaxed(timer->io_base);
if (!(tidr >> 16)) {
timer->revision = 1;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
+ timer->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->irq_dis = OMAP_TIMER_V1_INT_EN_OFFSET;
timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
timer->func_base = timer->io_base;
} else {
timer->revision = 2;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR;
+ timer->irq_stat = OMAP_TIMER_V2_IRQSTATUS - OMAP_TIMER_V2_FUNC_OFFSET;
+ timer->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET - OMAP_TIMER_V2_FUNC_OFFSET;
+ timer->irq_dis = OMAP_TIMER_V2_IRQENABLE_CLR - OMAP_TIMER_V2_FUNC_OFFSET;
timer->pend = timer->io_base +
_OMAP_TIMER_WRITE_PEND_OFFSET +
OMAP_TIMER_V2_FUNC_OFFSET;
@@ -99,35 +238,34 @@ static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
* complete. Enabling this feature can improve performance for writing to the
* timer registers.
*/
-static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
+static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
{
if (timer->posted)
return;
if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
timer->posted = OMAP_TIMER_NONPOSTED;
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0);
return;
}
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
- OMAP_TIMER_CTRL_POSTED, 0);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, OMAP_TIMER_CTRL_POSTED);
timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
timer->posted = OMAP_TIMER_POSTED;
}
-static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
- int posted, unsigned long rate)
+static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+ unsigned long rate)
{
u32 l;
- l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
l &= ~0x1;
- __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
#ifdef CONFIG_ARCH_OMAP2PLUS
/* Readback to make sure write has completed */
- __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
/*
* Wait for functional clock period x 3.5 to make sure that
* timer is stopped
@@ -137,104 +275,59 @@ static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
}
/* Ack possibly pending interrupt */
- writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat);
+ dmtimer_write(timer, timer->irq_stat, OMAP_TIMER_INT_OVERFLOW);
}
-static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer,
- unsigned int value)
+static inline void __omap_dm_timer_int_enable(struct dmtimer *timer,
+ unsigned int value)
{
- writel_relaxed(value, timer->irq_ena);
- __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
+ dmtimer_write(timer, timer->irq_ena, value);
+ dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
}
static inline unsigned int
-__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted)
+__omap_dm_timer_read_counter(struct dmtimer *timer)
{
- return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted);
+ return dmtimer_read(timer, OMAP_TIMER_COUNTER_REG);
}
-static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer,
+static inline void __omap_dm_timer_write_status(struct dmtimer *timer,
unsigned int value)
{
- writel_relaxed(value, timer->irq_stat);
+ dmtimer_write(timer, timer->irq_stat, value);
}
-/**
- * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
- * @timer: timer pointer over which read operation to perform
- * @reg: lowest byte holds the register offset
- *
- * The posted mode bit is encoded in reg. Note that in posted mode write
- * pending bit must be checked. Otherwise a read of a non completed write
- * will produce an error.
- */
-static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
+static void omap_timer_restore_context(struct dmtimer *timer)
{
- WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
- return __omap_dm_timer_read(timer, reg, timer->posted);
+ dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, timer->context.ocp_cfg);
+
+ dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, timer->context.twer);
+ dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, timer->context.tcrr);
+ dmtimer_write(timer, OMAP_TIMER_LOAD_REG, timer->context.tldr);
+ dmtimer_write(timer, OMAP_TIMER_MATCH_REG, timer->context.tmar);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, timer->context.tsicr);
+ dmtimer_write(timer, timer->irq_ena, timer->context.tier);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, timer->context.tclr);
}
-/**
- * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
- * @timer: timer pointer over which write operation is to perform
- * @reg: lowest byte holds the register offset
- * @value: data to write into the register
- *
- * The posted mode bit is encoded in reg. Note that in posted mode the write
- * pending bit must be checked. Otherwise a write on a register which has a
- * pending write will be lost.
- */
-static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
- u32 value)
+static void omap_timer_save_context(struct dmtimer *timer)
{
- WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
- __omap_dm_timer_write(timer, reg, value, timer->posted);
-}
-
-static void omap_timer_restore_context(struct omap_dm_timer *timer)
-{
- __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET,
- timer->context.ocp_cfg, 0);
-
- omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
- timer->context.twer);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
- timer->context.tcrr);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
- timer->context.tldr);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
- timer->context.tmar);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
- timer->context.tsicr);
- writel_relaxed(timer->context.tier, timer->irq_ena);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
- timer->context.tclr);
-}
-
-static void omap_timer_save_context(struct omap_dm_timer *timer)
-{
- timer->context.ocp_cfg =
- __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
-
- timer->context.tclr =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- timer->context.twer =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG);
- timer->context.tldr =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_LOAD_REG);
- timer->context.tmar =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_MATCH_REG);
- timer->context.tier = readl_relaxed(timer->irq_ena);
- timer->context.tsicr =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_IF_CTRL_REG);
+ timer->context.ocp_cfg = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
+
+ timer->context.tclr = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+ timer->context.twer = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG);
+ timer->context.tldr = dmtimer_read(timer, OMAP_TIMER_LOAD_REG);
+ timer->context.tmar = dmtimer_read(timer, OMAP_TIMER_MATCH_REG);
+ timer->context.tier = dmtimer_read(timer, timer->irq_ena);
+ timer->context.tsicr = dmtimer_read(timer, OMAP_TIMER_IF_CTRL_REG);
}
static int omap_timer_context_notifier(struct notifier_block *nb,
unsigned long cmd, void *v)
{
- struct omap_dm_timer *timer;
+ struct dmtimer *timer;
- timer = container_of(nb, struct omap_dm_timer, nb);
+ timer = container_of(nb, struct dmtimer, nb);
switch (cmd) {
case CPU_CLUSTER_PM_ENTER:
@@ -256,18 +349,17 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int omap_dm_timer_reset(struct omap_dm_timer *timer)
+static int omap_dm_timer_reset(struct dmtimer *timer)
{
u32 l, timeout = 100000;
if (timer->revision != 1)
return -EINVAL;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
do {
- l = __omap_dm_timer_read(timer,
- OMAP_TIMER_V1_SYS_STAT_OFFSET, 0);
+ l = dmtimer_read(timer, OMAP_TIMER_V1_SYS_STAT_OFFSET);
} while (!l && timeout--);
if (!timeout) {
@@ -276,22 +368,38 @@ static int omap_dm_timer_reset(struct omap_dm_timer *timer)
}
/* Configure timer for smart-idle mode */
- l = __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
+ l = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
l |= 0x2 << 0x3;
- __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l, 0);
+ dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l);
timer->posted = 0;
return 0;
}
-static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
+/*
+ * Functions exposed to PWM and remoteproc drivers via platform_data.
+ * Do not use these in the driver, these will get deprecated and will
+ * will be replaced by Linux generic framework functions such as
+ * chained interrupts and clock framework.
+ */
+static struct dmtimer *to_dmtimer(struct omap_dm_timer *cookie)
+{
+ if (!cookie)
+ return NULL;
+
+ return container_of(cookie, struct dmtimer, cookie);
+}
+
+static int omap_dm_timer_set_source(struct omap_dm_timer *cookie, int source)
{
int ret;
const char *parent_name;
struct clk *parent;
struct dmtimer_platform_data *pdata;
+ struct dmtimer *timer;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer) || IS_ERR(timer->fclk))
return -EINVAL;
@@ -316,7 +424,7 @@ static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
* use the clock framework to set the parent clock. To be removed
* once OMAP1 migrated to using clock framework for dmtimers
*/
- if (pdata && pdata->set_timer_src)
+ if (timer->omap1 && pdata && pdata->set_timer_src)
return pdata->set_timer_src(timer->pdev, source);
#if defined(CONFIG_COMMON_CLK)
@@ -341,44 +449,44 @@ static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
return ret;
}
-static void omap_dm_timer_enable(struct omap_dm_timer *timer)
+static void omap_dm_timer_enable(struct omap_dm_timer *cookie)
{
- pm_runtime_get_sync(&timer->pdev->dev);
+ struct dmtimer *timer = to_dmtimer(cookie);
+ struct device *dev = &timer->pdev->dev;
+ int rc;
+
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ dev_err(dev, "could not enable timer\n");
}
-static void omap_dm_timer_disable(struct omap_dm_timer *timer)
+static void omap_dm_timer_disable(struct omap_dm_timer *cookie)
{
- pm_runtime_put_sync(&timer->pdev->dev);
+ struct dmtimer *timer = to_dmtimer(cookie);
+ struct device *dev = &timer->pdev->dev;
+
+ pm_runtime_put_sync(dev);
}
-static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
+static int omap_dm_timer_prepare(struct dmtimer *timer)
{
+ struct device *dev = &timer->pdev->dev;
int rc;
- /*
- * FIXME: OMAP1 devices do not use the clock framework for dmtimers so
- * do not call clk_get() for these devices.
- */
- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
- timer->fclk = clk_get(&timer->pdev->dev, "fck");
- if (WARN_ON_ONCE(IS_ERR(timer->fclk))) {
- dev_err(&timer->pdev->dev, ": No fclk handle.\n");
- return -EINVAL;
- }
- }
-
- omap_dm_timer_enable(timer);
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
if (timer->capability & OMAP_TIMER_NEEDS_RESET) {
rc = omap_dm_timer_reset(timer);
if (rc) {
- omap_dm_timer_disable(timer);
+ pm_runtime_put_sync(dev);
return rc;
}
}
__omap_dm_timer_enable_posted(timer);
- omap_dm_timer_disable(timer);
+ pm_runtime_put_sync(dev);
return 0;
}
@@ -388,19 +496,9 @@ static inline u32 omap_dm_timer_reserved_systimer(int id)
return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
}
-int omap_dm_timer_reserve_systimer(int id)
+static struct dmtimer *_omap_dm_timer_request(int req_type, void *data)
{
- if (omap_dm_timer_reserved_systimer(id))
- return -ENODEV;
-
- omap_reserved_systimers |= (1 << (id - 1));
-
- return 0;
-}
-
-static struct omap_dm_timer *_omap_dm_timer_request(int req_type, void *data)
-{
- struct omap_dm_timer *timer = NULL, *t;
+ struct dmtimer *timer = NULL, *t;
struct device_node *np = NULL;
unsigned long flags;
u32 cap = 0;
@@ -484,11 +582,19 @@ found:
static struct omap_dm_timer *omap_dm_timer_request(void)
{
- return _omap_dm_timer_request(REQUEST_ANY, NULL);
+ struct dmtimer *timer;
+
+ timer = _omap_dm_timer_request(REQUEST_ANY, NULL);
+ if (!timer)
+ return NULL;
+
+ return &timer->cookie;
}
static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
{
+ struct dmtimer *timer;
+
/* Requesting timer by ID is not supported when device tree is used */
if (of_have_populated_dt()) {
pr_warn("%s: Please use omap_dm_timer_request_by_node()\n",
@@ -496,21 +602,11 @@ static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
return NULL;
}
- return _omap_dm_timer_request(REQUEST_BY_ID, &id);
-}
+ timer = _omap_dm_timer_request(REQUEST_BY_ID, &id);
+ if (!timer)
+ return NULL;
-/**
- * omap_dm_timer_request_by_cap - Request a timer by capability
- * @cap: Bit mask of capabilities to match
- *
- * Find a timer based upon capabilities bit mask. Callers of this function
- * should use the definitions found in the plat/dmtimer.h file under the
- * comment "timer capabilities used in hwmod database". Returns pointer to
- * timer handle on success and a NULL pointer on failure.
- */
-struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap)
-{
- return _omap_dm_timer_request(REQUEST_BY_CAP, &cap);
+ return &timer->cookie;
}
/**
@@ -522,26 +618,34 @@ struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap)
*/
static struct omap_dm_timer *omap_dm_timer_request_by_node(struct device_node *np)
{
+ struct dmtimer *timer;
+
if (!np)
return NULL;
- return _omap_dm_timer_request(REQUEST_BY_NODE, np);
+ timer = _omap_dm_timer_request(REQUEST_BY_NODE, np);
+ if (!timer)
+ return NULL;
+
+ return &timer->cookie;
}
-static int omap_dm_timer_free(struct omap_dm_timer *timer)
+static int omap_dm_timer_free(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- clk_put(timer->fclk);
-
WARN_ON(!timer->reserved);
timer->reserved = 0;
return 0;
}
-int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
+int omap_dm_timer_get_irq(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer = to_dmtimer(cookie);
if (timer)
return timer->irq;
return -EINVAL;
@@ -550,7 +654,7 @@ int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
#if defined(CONFIG_ARCH_OMAP1)
#include <linux/soc/ti/omap1-io.h>
-static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
+static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
{
return NULL;
}
@@ -562,7 +666,7 @@ static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
int i = 0;
- struct omap_dm_timer *timer = NULL;
+ struct dmtimer *timer = NULL;
unsigned long flags;
/* If ARMXOR cannot be idled this function call is unnecessary */
@@ -574,7 +678,7 @@ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
list_for_each_entry(timer, &omap_timer_list, node) {
u32 l;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
inputmask &= ~(1 << 1);
@@ -590,8 +694,10 @@ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
#else
-static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
+static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer = to_dmtimer(cookie);
+
if (timer && !IS_ERR(timer->fclk))
return timer->fclk;
return NULL;
@@ -606,95 +712,125 @@ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
#endif
-int omap_dm_timer_trigger(struct omap_dm_timer *timer)
-{
- if (unlikely(!timer || !atomic_read(&timer->enabled))) {
- pr_err("%s: timer not available or enabled.\n", __func__);
- return -EINVAL;
- }
-
- omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
- return 0;
-}
-
-static int omap_dm_timer_start(struct omap_dm_timer *timer)
+static int omap_dm_timer_start(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
+ dev = &timer->pdev->dev;
+
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (!(l & OMAP_TIMER_CTRL_ST)) {
l |= OMAP_TIMER_CTRL_ST;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
}
return 0;
}
-static int omap_dm_timer_stop(struct omap_dm_timer *timer)
+static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+ struct device *dev;
unsigned long rate = 0;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET))
+ dev = &timer->pdev->dev;
+
+ if (!timer->omap1)
rate = clk_get_rate(timer->fclk);
- __omap_dm_timer_stop(timer, timer->posted, rate);
+ __omap_dm_timer_stop(timer, rate);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_load(struct omap_dm_timer *timer,
+static int omap_dm_timer_set_load(struct omap_dm_timer *cookie,
unsigned int load)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ dmtimer_write(timer, OMAP_TIMER_LOAD_REG, load);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
+static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
unsigned int match)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (enable)
l |= OMAP_TIMER_CTRL_CE;
else
l &= ~OMAP_TIMER_CTRL_CE;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_MATCH_REG, match);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
+static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
int toggle, int trigger, int autoreload)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
OMAP_TIMER_CTRL_PT | (0x03 << 10) | OMAP_TIMER_CTRL_AR);
if (def_on)
@@ -704,57 +840,86 @@ static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
l |= trigger << 10;
if (autoreload)
l |= OMAP_TIMER_CTRL_AR;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *timer)
+static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- omap_dm_timer_disable(timer);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+
+ pm_runtime_put_sync(dev);
return l;
}
-static int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer,
- int prescaler)
+static int omap_dm_timer_set_prescaler(struct omap_dm_timer *cookie,
+ int prescaler)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer) || prescaler < -1 || prescaler > 7)
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
if (prescaler >= 0) {
l |= OMAP_TIMER_CTRL_PRE;
l |= prescaler << 2;
}
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
+static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
unsigned int value)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
__omap_dm_timer_int_enable(timer, value);
- omap_dm_timer_disable(timer);
+ pm_runtime_put_sync(dev);
+
return 0;
}
@@ -765,42 +930,55 @@ static int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
*
* Disables the specified timer interrupts for a timer.
*/
-static int omap_dm_timer_set_int_disable(struct omap_dm_timer *timer, u32 mask)
+static int omap_dm_timer_set_int_disable(struct omap_dm_timer *cookie, u32 mask)
{
+ struct dmtimer *timer;
+ struct device *dev;
u32 l = mask;
+ int rc;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
if (timer->revision == 1)
- l = readl_relaxed(timer->irq_ena) & ~mask;
+ l = dmtimer_read(timer, timer->irq_ena) & ~mask;
+
+ dmtimer_write(timer, timer->irq_dis, l);
+ l = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
+ dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
- writel_relaxed(l, timer->irq_dis);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
+static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
unsigned int l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return 0;
}
- l = readl_relaxed(timer->irq_stat);
+ l = dmtimer_read(timer, timer->irq_stat);
return l;
}
-static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
+static int omap_dm_timer_write_status(struct omap_dm_timer *cookie, unsigned int value)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled)))
return -EINVAL;
@@ -809,49 +987,39 @@ static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int
return 0;
}
-static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
+static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not iavailable or enabled.\n", __func__);
return 0;
}
- return __omap_dm_timer_read_counter(timer, timer->posted);
+ return __omap_dm_timer_read_counter(timer);
}
-static int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
+static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return -EINVAL;
}
- omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
+ dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
/* Save the context */
timer->context.tcrr = value;
return 0;
}
-int omap_dm_timers_active(void)
-{
- struct omap_dm_timer *timer;
-
- list_for_each_entry(timer, &omap_timer_list, node) {
- if (!timer->reserved)
- continue;
-
- if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
- OMAP_TIMER_CTRL_ST) {
- return 1;
- }
- }
- return 0;
-}
-
static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
{
- struct omap_dm_timer *timer = dev_get_drvdata(dev);
+ struct dmtimer *timer = dev_get_drvdata(dev);
atomic_set(&timer->enabled, 0);
@@ -865,7 +1033,7 @@ static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
static int __maybe_unused omap_dm_timer_runtime_resume(struct device *dev)
{
- struct omap_dm_timer *timer = dev_get_drvdata(dev);
+ struct dmtimer *timer = dev_get_drvdata(dev);
if (!(timer->capability & OMAP_TIMER_ALWON) && timer->func_base)
omap_timer_restore_context(timer);
@@ -892,7 +1060,7 @@ static const struct of_device_id omap_timer_match[];
static int omap_dm_timer_probe(struct platform_device *pdev)
{
unsigned long flags;
- struct omap_dm_timer *timer;
+ struct dmtimer *timer;
struct device *dev = &pdev->dev;
const struct dmtimer_platform_data *pdata;
int ret;
@@ -916,7 +1084,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
if (timer->irq < 0)
return timer->irq;
- timer->fclk = ERR_PTR(-ENODEV);
timer->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->io_base))
return PTR_ERR(timer->io_base);
@@ -938,6 +1105,17 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
}
+ timer->omap1 = timer->capability & OMAP_TIMER_NEEDS_RESET;
+
+ /* OMAP1 devices do not yet use the clock framework for dmtimers */
+ if (!timer->omap1) {
+ timer->fclk = devm_clk_get(dev, "fck");
+ if (IS_ERR(timer->fclk))
+ return PTR_ERR(timer->fclk);
+ } else {
+ timer->fclk = ERR_PTR(-ENODEV);
+ }
+
if (!(timer->capability & OMAP_TIMER_ALWON)) {
timer->nb.notifier_call = omap_timer_context_notifier;
cpu_pm_register_notifier(&timer->nb);
@@ -950,11 +1128,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
if (!timer->reserved) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
__func__);
- goto err_get_sync;
+ goto err_disable;
}
__omap_dm_timer_init_regs(timer);
pm_runtime_put(dev);
@@ -969,8 +1147,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
return 0;
-err_get_sync:
- pm_runtime_put_noidle(dev);
+err_disable:
pm_runtime_disable(dev);
return ret;
}
@@ -985,7 +1162,7 @@ err_get_sync:
*/
static int omap_dm_timer_remove(struct platform_device *pdev)
{
- struct omap_dm_timer *timer;
+ struct dmtimer *timer;
unsigned long flags;
int ret = -EINVAL;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 3e6aa319920b..55e75fbb658e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -802,9 +802,7 @@ source "drivers/crypto/amlogic/Kconfig"
config CRYPTO_DEV_SA2UL
tristate "Support for TI security accelerator"
depends on ARCH_K3 || COMPILE_TEST
- select ARM64_CRYPTO
select CRYPTO_AES
- select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_SHA1
@@ -818,5 +816,6 @@ config CRYPTO_DEV_SA2UL
acceleration for cryptographic algorithms on these devices.
source "drivers/crypto/keembay/Kconfig"
+source "drivers/crypto/aspeed/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index f81703a86b98..116de173a66c 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 44b8fc4b786d..006e40133c28 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -235,7 +235,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
-static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
+static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v)
{
unsigned int i;
@@ -266,19 +266,7 @@ static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
}
return 0;
}
-
-static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sun4i_ss_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations sun4i_ss_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sun4i_ss_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs);
/*
* Power management strategy: The device is suspended unless a TFM exists for
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index 19cd2e52f89d..c4b0a8b58842 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -54,11 +54,9 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
goto err_dst;
}
- err = pm_runtime_get_sync(ce->dev);
- if (err < 0) {
- pm_runtime_put_noidle(ce->dev);
+ err = pm_runtime_resume_and_get(ce->dev);
+ if (err < 0)
goto err_pm;
- }
mutex_lock(&ce->rnglock);
chan = &ce->chanlist[flow];
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index e79514fce731..af017a087ebf 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -177,7 +177,7 @@ static int meson_cipher(struct skcipher_request *areq)
if (areq->src == areq->dst) {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_BIDIRECTIONAL);
- if (nr_sgs < 0) {
+ if (!nr_sgs) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
@@ -186,14 +186,14 @@ static int meson_cipher(struct skcipher_request *areq)
} else {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
- if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
+ if (!nr_sgs || nr_sgs > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
}
nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
DMA_FROM_DEVICE);
- if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
+ if (!nr_sgd || nr_sgd > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
err = -EINVAL;
goto theend;
diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig
new file mode 100644
index 000000000000..ae2710ae8d8f
--- /dev/null
+++ b/drivers/crypto/aspeed/Kconfig
@@ -0,0 +1,48 @@
+config CRYPTO_DEV_ASPEED
+ tristate "Support for Aspeed cryptographic engine driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select CRYPTO_ENGINE
+ help
+ Hash and Crypto Engine (HACE) is designed to accelerate the
+ throughput of hash data digest, encryption and decryption.
+
+ Select y here to have support for the cryptographic driver
+ available on Aspeed SoC.
+
+config CRYPTO_DEV_ASPEED_DEBUG
+ bool "Enable Aspeed crypto debug messages"
+ depends on CRYPTO_DEV_ASPEED
+ help
+ Print Aspeed crypto debugging messages if you use this
+ option to ask for those messages.
+ Avoid enabling this option for production build to
+ minimize driver timing.
+
+config CRYPTO_DEV_ASPEED_HACE_HASH
+ bool "Enable Aspeed Hash & Crypto Engine (HACE) hash"
+ depends on CRYPTO_DEV_ASPEED
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_HMAC
+ help
+ Select here to enable Aspeed Hash & Crypto Engine (HACE)
+ hash driver.
+ Supports multiple message digest standards, including
+ SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, and so on.
+
+config CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto"
+ depends on CRYPTO_DEV_ASPEED
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CFB
+ select CRYPTO_OFB
+ select CRYPTO_CTR
+ help
+ Select here to enable Aspeed Hash & Crypto Engine (HACE)
+ crypto driver.
+ Supports AES/DES symmetric-key encryption and decryption
+ with ECB/CBC/CFB/OFB/CTR options.
diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile
new file mode 100644
index 000000000000..a0ed40ddaad1
--- /dev/null
+++ b/drivers/crypto/aspeed/Makefile
@@ -0,0 +1,7 @@
+hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o
+hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o
+
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o
+aspeed_crypto-objs := aspeed-hace.o \
+ $(hace-hash-y) \
+ $(hace-crypto-y)
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
new file mode 100644
index 000000000000..ef73b0028b4d
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -0,0 +1,1133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG
+#define CIPHER_DBG(h, fmt, ...) \
+ dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define CIPHER_DBG(h, fmt, ...) \
+ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+static int aspeed_crypto_do_fallback(struct skcipher_request *areq)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+
+ if (rctx->enc_cmd & HACE_CMD_ENCRYPT)
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+
+ return err;
+}
+
+static bool aspeed_crypto_need_fallback(struct skcipher_request *areq)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+
+ if (areq->cryptlen == 0)
+ return true;
+
+ if ((rctx->enc_cmd & HACE_CMD_DES_SELECT) &&
+ !IS_ALIGNED(areq->cryptlen, DES_BLOCK_SIZE))
+ return true;
+
+ if ((!(rctx->enc_cmd & HACE_CMD_DES_SELECT)) &&
+ !IS_ALIGNED(areq->cryptlen, AES_BLOCK_SIZE))
+ return true;
+
+ return false;
+}
+
+static int aspeed_hace_crypto_handle_queue(struct aspeed_hace_dev *hace_dev,
+ struct skcipher_request *req)
+{
+ if (hace_dev->version == AST2500_VERSION &&
+ aspeed_crypto_need_fallback(req)) {
+ CIPHER_DBG(hace_dev, "SW fallback\n");
+ return aspeed_crypto_do_fallback(req);
+ }
+
+ return crypto_transfer_skcipher_request_to_engine(
+ hace_dev->crypt_engine_crypto, req);
+}
+
+static int aspeed_crypto_do_request(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ struct aspeed_engine_crypto *crypto_engine;
+ int rc;
+
+ crypto_engine = &hace_dev->crypto_engine;
+ crypto_engine->req = req;
+ crypto_engine->flags |= CRYPTO_FLAGS_BUSY;
+
+ rc = ctx->start(hace_dev);
+
+ if (rc != -EINPROGRESS)
+ return -EIO;
+
+ return 0;
+}
+
+static int aspeed_sk_complete(struct aspeed_hace_dev *hace_dev, int err)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+ if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+ memcpy(req->iv, crypto_engine->cipher_ctx +
+ DES_KEY_SIZE, DES_KEY_SIZE);
+ else
+ memcpy(req->iv, crypto_engine->cipher_ctx,
+ AES_BLOCK_SIZE);
+ }
+
+ crypto_engine->flags &= ~CRYPTO_FLAGS_BUSY;
+
+ crypto_finalize_skcipher_request(hace_dev->crypt_engine_crypto, req,
+ err);
+
+ return err;
+}
+
+static int aspeed_sk_transfer_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct device *dev = hace_dev->dev;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE);
+ }
+
+ return aspeed_sk_complete(hace_dev, 0);
+}
+
+static int aspeed_sk_transfer(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *out_sg;
+ int nbytes = 0;
+ int rc = 0;
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ out_sg = req->dst;
+
+ /* Copy output buffer to dst scatter-gather lists */
+ nbytes = sg_copy_from_buffer(out_sg, rctx->dst_nents,
+ crypto_engine->cipher_addr, req->cryptlen);
+ if (!nbytes) {
+ dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n",
+ "nbytes", nbytes, "cryptlen", req->cryptlen);
+ rc = -EINVAL;
+ }
+
+ CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n",
+ "nbytes", nbytes, "req->cryptlen", req->cryptlen,
+ "nb_out_sg", rctx->dst_nents,
+ "cipher addr", crypto_engine->cipher_addr);
+
+ return aspeed_sk_complete(hace_dev, rc);
+}
+
+static int aspeed_sk_start(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *in_sg;
+ int nbytes;
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ in_sg = req->src;
+
+ nbytes = sg_copy_to_buffer(in_sg, rctx->src_nents,
+ crypto_engine->cipher_addr, req->cryptlen);
+
+ CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n",
+ "nbytes", nbytes, "req->cryptlen", req->cryptlen,
+ "nb_in_sg", rctx->src_nents,
+ "cipher addr", crypto_engine->cipher_addr);
+
+ if (!nbytes) {
+ dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n",
+ "nbytes", nbytes, "cryptlen", req->cryptlen);
+ return -EINVAL;
+ }
+
+ crypto_engine->resume = aspeed_sk_transfer;
+
+ /* Trigger engines */
+ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+ ASPEED_HACE_SRC);
+ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+ ASPEED_HACE_DEST);
+ ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+ ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+ return -EINPROGRESS;
+}
+
+static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_sg_list *src_list, *dst_list;
+ dma_addr_t src_dma_addr, dst_dma_addr;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *s;
+ int src_sg_len;
+ int dst_sg_len;
+ int total, i;
+ int rc;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL |
+ HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN;
+
+ /* BIDIRECTIONAL */
+ if (req->dst == req->src) {
+ src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+ rctx->src_nents, DMA_BIDIRECTIONAL);
+ dst_sg_len = src_sg_len;
+ if (!src_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ return -EINVAL;
+ }
+
+ } else {
+ src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+ rctx->src_nents, DMA_TO_DEVICE);
+ if (!src_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ return -EINVAL;
+ }
+
+ dst_sg_len = dma_map_sg(hace_dev->dev, req->dst,
+ rctx->dst_nents, DMA_FROM_DEVICE);
+ if (!dst_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() dst error\n");
+ rc = -EINVAL;
+ goto free_req_src;
+ }
+ }
+
+ src_list = (struct aspeed_sg_list *)crypto_engine->cipher_addr;
+ src_dma_addr = crypto_engine->cipher_dma_addr;
+ total = req->cryptlen;
+
+ for_each_sg(req->src, s, src_sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (total > len)
+ total -= len;
+ else {
+ /* last sg list */
+ len = total;
+ len |= BIT(31);
+ total = 0;
+ }
+
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
+ }
+
+ if (total != 0) {
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ if (req->dst == req->src) {
+ dst_list = src_list;
+ dst_dma_addr = src_dma_addr;
+
+ } else {
+ dst_list = (struct aspeed_sg_list *)crypto_engine->dst_sg_addr;
+ dst_dma_addr = crypto_engine->dst_sg_dma_addr;
+ total = req->cryptlen;
+
+ for_each_sg(req->dst, s, dst_sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (total > len)
+ total -= len;
+ else {
+ /* last sg list */
+ len = total;
+ len |= BIT(31);
+ total = 0;
+ }
+
+ dst_list[i].phy_addr = cpu_to_le32(phy_addr);
+ dst_list[i].len = cpu_to_le32(len);
+
+ }
+
+ dst_list[dst_sg_len].phy_addr = 0;
+ dst_list[dst_sg_len].len = 0;
+ }
+
+ if (total != 0) {
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ crypto_engine->resume = aspeed_sk_transfer_sg;
+
+ /* Memory barrier to ensure all data setup before engine starts */
+ mb();
+
+ /* Trigger engines */
+ ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC);
+ ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST);
+ ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+ ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+ return -EINPROGRESS;
+
+free_req:
+ if (req->dst == req->src) {
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+ DMA_BIDIRECTIONAL);
+
+ } else {
+ dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+ DMA_TO_DEVICE);
+ }
+
+ return rc;
+
+free_req_src:
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct crypto_skcipher *cipher;
+ struct aspeed_cipher_ctx *ctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ cipher = crypto_skcipher_reqtfm(req);
+ ctx = crypto_skcipher_ctx(cipher);
+
+ /* enable interrupt */
+ rctx->enc_cmd |= HACE_CMD_ISR_EN;
+
+ rctx->dst_nents = sg_nents(req->dst);
+ rctx->src_nents = sg_nents(req->src);
+
+ ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma,
+ ASPEED_HACE_CONTEXT);
+
+ if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+ if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+ memcpy(crypto_engine->cipher_ctx + DES_BLOCK_SIZE,
+ req->iv, DES_BLOCK_SIZE);
+ else
+ memcpy(crypto_engine->cipher_ctx, req->iv,
+ AES_BLOCK_SIZE);
+ }
+
+ if (hace_dev->version == AST2600_VERSION) {
+ memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len);
+
+ return aspeed_sk_start_sg(hace_dev);
+ }
+
+ memcpy(crypto_engine->cipher_ctx + 16, ctx->key, AES_MAX_KEYLENGTH);
+
+ return aspeed_sk_start(hace_dev);
+}
+
+static int aspeed_des_crypt(struct skcipher_request *req, u32 cmd)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) {
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE))
+ return -EINVAL;
+ }
+
+ rctx->enc_cmd = cmd | HACE_CMD_DES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE |
+ HACE_CMD_DES | HACE_CMD_CONTEXT_LOAD_ENABLE |
+ HACE_CMD_CONTEXT_SAVE_ENABLE;
+
+ return aspeed_hace_crypto_handle_queue(hace_dev, req);
+}
+
+static int aspeed_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ int rc;
+
+ CIPHER_DBG(hace_dev, "keylen: %d bits\n", keylen);
+
+ if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
+ dev_warn(hace_dev->dev, "invalid keylen: %d bits\n", keylen);
+ return -EINVAL;
+ }
+
+ if (keylen == DES_KEY_SIZE) {
+ rc = crypto_des_verify_key(tfm, key);
+ if (rc)
+ return rc;
+
+ } else if (keylen == DES3_EDE_KEY_SIZE) {
+ rc = crypto_des3_ede_verify_key(tfm, key);
+ if (rc)
+ return rc;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_len = keylen;
+
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int aspeed_tdes_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_des_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_aes_crypt(struct skcipher_request *req, u32 cmd)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK;
+
+ if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) {
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+ }
+
+ CIPHER_DBG(hace_dev, "%s\n",
+ (cmd & HACE_CMD_ENCRYPT) ? "encrypt" : "decrypt");
+
+ cmd |= HACE_CMD_AES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE |
+ HACE_CMD_CONTEXT_LOAD_ENABLE | HACE_CMD_CONTEXT_SAVE_ENABLE;
+
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ cmd |= HACE_CMD_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ cmd |= HACE_CMD_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ cmd |= HACE_CMD_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rctx->enc_cmd = cmd;
+
+ return aspeed_hace_crypto_handle_queue(hace_dev, req);
+}
+
+static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ struct crypto_aes_ctx gen_aes_key;
+
+ CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8));
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ if (ctx->hace_dev->version == AST2500_VERSION) {
+ aes_expandkey(&gen_aes_key, key, keylen);
+ memcpy(ctx->key, gen_aes_key.key_enc, AES_MAX_KEYLENGTH);
+
+ } else {
+ memcpy(ctx->key, key, keylen);
+ }
+
+ ctx->key_len = keylen;
+
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int aspeed_aes_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR);
+}
+
+static int aspeed_aes_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR);
+}
+
+static int aspeed_aes_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB);
+}
+
+static int aspeed_aes_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB);
+}
+
+static int aspeed_aes_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB);
+}
+
+static int aspeed_aes_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB);
+}
+
+static int aspeed_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC);
+}
+
+static int aspeed_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC);
+}
+
+static int aspeed_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB);
+}
+
+static int aspeed_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB);
+}
+
+static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct aspeed_hace_alg *crypto_alg;
+
+
+ crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher);
+ ctx->hace_dev = crypto_alg->hace_dev;
+ ctx->start = aspeed_hace_skcipher_trigger;
+
+ CIPHER_DBG(ctx->hace_dev, "%s\n", name);
+
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_err(ctx->hace_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm));
+
+ ctx->enginectx.op.do_one_request = aspeed_crypto_do_request;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+
+ CIPHER_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(&tfm->base));
+ crypto_free_skcipher(ctx->fallback_tfm);
+}
+
+static struct aspeed_hace_alg aspeed_crypto_algs[] = {
+ {
+ .alg.skcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ecb_encrypt,
+ .decrypt = aspeed_aes_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "aspeed-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_cbc_encrypt,
+ .decrypt = aspeed_aes_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "aspeed-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_cfb_encrypt,
+ .decrypt = aspeed_aes_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "aspeed-cfb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ofb_encrypt,
+ .decrypt = aspeed_aes_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "aspeed-ofb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ecb_encrypt,
+ .decrypt = aspeed_des_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "aspeed-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_cbc_encrypt,
+ .decrypt = aspeed_des_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "aspeed-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_cfb_encrypt,
+ .decrypt = aspeed_des_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(des)",
+ .cra_driver_name = "aspeed-cfb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ofb_encrypt,
+ .decrypt = aspeed_des_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(des)",
+ .cra_driver_name = "aspeed-ofb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ecb_encrypt,
+ .decrypt = aspeed_tdes_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "aspeed-ecb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_cbc_encrypt,
+ .decrypt = aspeed_tdes_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "aspeed-cbc-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_cfb_encrypt,
+ .decrypt = aspeed_tdes_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(des3_ede)",
+ .cra_driver_name = "aspeed-cfb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ofb_encrypt,
+ .decrypt = aspeed_tdes_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(des3_ede)",
+ .cra_driver_name = "aspeed-ofb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ctr_encrypt,
+ .decrypt = aspeed_aes_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "aspeed-ctr-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ctr_encrypt,
+ .decrypt = aspeed_des_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(des)",
+ .cra_driver_name = "aspeed-ctr-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ctr_encrypt,
+ .decrypt = aspeed_tdes_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(des3_ede)",
+ .cra_driver_name = "aspeed-ctr-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+
+};
+
+void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++)
+ crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++)
+ crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+}
+
+void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int rc, i;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) {
+ aspeed_crypto_algs[i].hace_dev = hace_dev;
+ rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+ if (rc) {
+ CIPHER_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_crypto_algs[i].alg.skcipher.base.cra_name);
+ }
+ }
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) {
+ aspeed_crypto_algs_g6[i].hace_dev = hace_dev;
+ rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+ if (rc) {
+ CIPHER_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name);
+ }
+ }
+}
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
new file mode 100644
index 000000000000..935135229ebd
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -0,0 +1,1391 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define AHASH_DBG(h, fmt, ...) \
+ dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define AHASH_DBG(h, fmt, ...) \
+ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/* Initialization Vectors for SHA-family */
+static const __be32 sha1_iv[8] = {
+ cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
+ cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
+ cpu_to_be32(SHA1_H4), 0, 0, 0
+};
+
+static const __be32 sha224_iv[8] = {
+ cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
+ cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
+ cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
+ cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
+};
+
+static const __be32 sha256_iv[8] = {
+ cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
+ cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
+ cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
+ cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
+};
+
+static const __be64 sha384_iv[8] = {
+ cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
+ cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
+ cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
+ cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7)
+};
+
+static const __be64 sha512_iv[8] = {
+ cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
+ cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
+ cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
+ cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
+};
+
+static const __be32 sha512_224_iv[16] = {
+ cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL),
+ cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL),
+ cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL),
+ cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL),
+ cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL),
+ cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL),
+ cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL),
+ cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL)
+};
+
+static const __be32 sha512_256_iv[16] = {
+ cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL),
+ cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL),
+ cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL),
+ cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL),
+ cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL),
+ cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL),
+ cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL),
+ cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL)
+};
+
+/* The purpose of this padding is to ensure that the padded message is a
+ * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
+ * The bit "1" is appended at the end of the message followed by
+ * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
+ * 128 bits block (SHA384/SHA512) equals to the message length in bits
+ * is appended.
+ *
+ * For SHA1/SHA224/SHA256, padlen is calculated as followed:
+ * - if message length < 56 bytes then padlen = 56 - message length
+ * - else padlen = 64 + 56 - message length
+ *
+ * For SHA384/SHA512, padlen is calculated as followed:
+ * - if message length < 112 bytes then padlen = 112 - message length
+ * - else padlen = 128 + 112 - message length
+ */
+static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ struct aspeed_sham_reqctx *rctx)
+{
+ unsigned int index, padlen;
+ __be64 bits[2];
+
+ AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
+
+ switch (rctx->flags & SHA_FLAGS_MASK) {
+ case SHA_FLAGS_SHA1:
+ case SHA_FLAGS_SHA224:
+ case SHA_FLAGS_SHA256:
+ bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
+ index = rctx->bufcnt & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+ *(rctx->buffer + rctx->bufcnt) = 0x80;
+ memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+ memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
+ rctx->bufcnt += padlen + 8;
+ break;
+ default:
+ bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
+ bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
+ rctx->digcnt[0] >> 61);
+ index = rctx->bufcnt & 0x7f;
+ padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
+ *(rctx->buffer + rctx->bufcnt) = 0x80;
+ memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+ memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
+ rctx->bufcnt += padlen + 16;
+ break;
+ }
+}
+
+/*
+ * Prepare DMA buffer before hardware engine
+ * processing.
+ */
+static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ int length, remain;
+
+ length = rctx->total + rctx->bufcnt;
+ remain = length % rctx->block_size;
+
+ AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
+
+ if (rctx->bufcnt)
+ memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
+
+ if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
+ scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
+ rctx->bufcnt, rctx->src_sg,
+ rctx->offset, rctx->total - remain, 0);
+ rctx->offset += rctx->total - remain;
+
+ } else {
+ dev_warn(hace_dev->dev, "Hash data length is too large\n");
+ return -EINVAL;
+ }
+
+ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
+ rctx->offset, remain, 0);
+
+ rctx->bufcnt = remain;
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ return -ENOMEM;
+ }
+
+ hash_engine->src_length = length - remain;
+ hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return 0;
+}
+
+/*
+ * Prepare DMA buffer as SG list buffer before
+ * hardware engine processing.
+ */
+static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct aspeed_sg_list *src_list;
+ struct scatterlist *s;
+ int length, remain, sg_len, i;
+ int rc = 0;
+
+ remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
+ length = rctx->total + rctx->bufcnt - remain;
+
+ AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
+ "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
+ "length", length, "remain", remain);
+
+ sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+ if (!sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto free_src_sg;
+ }
+
+ if (rctx->bufcnt != 0) {
+ u32 phy_addr;
+ u32 len;
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ phy_addr = rctx->buffer_dma_addr;
+ len = rctx->bufcnt;
+ length -= len;
+
+ /* Last sg list */
+ if (length == 0)
+ len |= HASH_SG_LAST_LIST;
+
+ src_list[0].phy_addr = cpu_to_le32(phy_addr);
+ src_list[0].len = cpu_to_le32(len);
+ src_list++;
+ }
+
+ if (length != 0) {
+ for_each_sg(rctx->src_sg, s, sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (length > len)
+ length -= len;
+ else {
+ /* Last sg list */
+ len = length;
+ len |= HASH_SG_LAST_LIST;
+ length = 0;
+ }
+
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
+ }
+ }
+
+ if (length != 0) {
+ rc = -EINVAL;
+ goto free_rctx_buffer;
+ }
+
+ rctx->offset = rctx->total - remain;
+ hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+ hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return 0;
+
+free_rctx_buffer:
+ if (rctx->bufcnt != 0)
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+free_src_sg:
+ dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
+
+ crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
+
+ return 0;
+}
+
+/*
+ * Copy digest to the corresponding request result.
+ * This function will be called at final() stage.
+ */
+static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+
+ memcpy(req->result, rctx->digest, rctx->digsize);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+/*
+ * Trigger hardware engines to do the math.
+ */
+static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
+ aspeed_hace_fn_t resume)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
+ &hash_engine->src_dma, &hash_engine->digest_dma,
+ hash_engine->src_length);
+
+ rctx->cmd |= HASH_CMD_INT_ENABLE;
+ hash_engine->resume = resume;
+
+ ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
+ ast_hace_write(hace_dev, hash_engine->digest_dma,
+ ASPEED_HACE_HASH_DIGEST_BUFF);
+ ast_hace_write(hace_dev, hash_engine->digest_dma,
+ ASPEED_HACE_HASH_KEY_BUFF);
+ ast_hace_write(hace_dev, hash_engine->src_length,
+ ASPEED_HACE_HASH_DATA_LEN);
+
+ /* Memory barrier to ensure all data setup before engine starts */
+ mb();
+
+ ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
+
+ return -EINPROGRESS;
+}
+
+/*
+ * HMAC resume aims to do the second pass produces
+ * the final HMAC code derived from the inner hash
+ * result and the outer key.
+ */
+static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+ int rc = 0;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+
+ /* o key pad + hash sum 1 */
+ memcpy(rctx->buffer, bctx->opad, rctx->block_size);
+ memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
+
+ rctx->bufcnt = rctx->block_size + rctx->digsize;
+ rctx->digcnt[0] = rctx->block_size + rctx->digsize;
+
+ aspeed_ahash_fill_padding(hace_dev, rctx);
+ memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
+
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ hash_engine->src_dma = rctx->buffer_dma_addr;
+ hash_engine->src_length = rctx->bufcnt;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ int rc = 0;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ aspeed_ahash_fill_padding(hace_dev, rctx);
+
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ hash_engine->src_dma = rctx->buffer_dma_addr;
+ hash_engine->src_length = rctx->bufcnt;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ if (rctx->flags & SHA_FLAGS_HMAC)
+ return aspeed_hace_ahash_trigger(hace_dev,
+ aspeed_ahash_hmac_resume);
+
+ return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+
+ if (rctx->bufcnt != 0)
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
+ rctx->total - rctx->offset, 0);
+
+ rctx->bufcnt = rctx->total - rctx->offset;
+ rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ return aspeed_ahash_req_final(hace_dev);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ return aspeed_ahash_req_final(hace_dev);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ aspeed_hace_fn_t resume;
+ int ret;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ if (hace_dev->version == AST2600_VERSION) {
+ rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
+ resume = aspeed_ahash_update_resume_sg;
+
+ } else {
+ resume = aspeed_ahash_update_resume;
+ }
+
+ ret = hash_engine->dma_prepare(hace_dev);
+ if (ret)
+ return ret;
+
+ return aspeed_hace_ahash_trigger(hace_dev, resume);
+}
+
+static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
+ struct ahash_request *req)
+{
+ return crypto_transfer_hash_request_to_engine(
+ hace_dev->crypt_engine_hash, req);
+}
+
+static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_engine_hash *hash_engine;
+ int ret = 0;
+
+ hash_engine = &hace_dev->hash_engine;
+ hash_engine->flags |= CRYPTO_FLAGS_BUSY;
+
+ if (rctx->op == SHA_OP_UPDATE)
+ ret = aspeed_ahash_req_update(hace_dev);
+ else if (rctx->op == SHA_OP_FINAL)
+ ret = aspeed_ahash_req_final(hace_dev);
+
+ if (ret != -EINPROGRESS)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
+ void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_engine_hash *hash_engine;
+
+ hash_engine = &hace_dev->hash_engine;
+ hash_engine->req = req;
+
+ if (hace_dev->version == AST2600_VERSION)
+ hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
+ else
+ hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
+
+ return 0;
+}
+
+static int aspeed_sham_update(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+ rctx->total = req->nbytes;
+ rctx->src_sg = req->src;
+ rctx->offset = 0;
+ rctx->src_nents = sg_nents(req->src);
+ rctx->op = SHA_OP_UPDATE;
+
+ rctx->digcnt[0] += rctx->total;
+ if (rctx->digcnt[0] < rctx->total)
+ rctx->digcnt[1]++;
+
+ if (rctx->bufcnt + rctx->total < rctx->block_size) {
+ scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
+ rctx->src_sg, rctx->offset,
+ rctx->total, 0);
+ rctx->bufcnt += rctx->total;
+
+ return 0;
+ }
+
+ return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
+static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
+ return crypto_shash_digest(shash, data, len, out);
+}
+
+static int aspeed_sham_final(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
+ req->nbytes, rctx->total);
+ rctx->op = SHA_OP_FINAL;
+
+ return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
+static int aspeed_sham_finup(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ int rc1, rc2;
+
+ AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+ rctx->flags |= SHA_FLAGS_FINUP;
+
+ rc1 = aspeed_sham_update(req);
+ if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
+ return rc1;
+
+ /*
+ * final() has to be always called to cleanup resources
+ * even if update() failed, except EINPROGRESS
+ */
+ rc2 = aspeed_sham_final(req);
+
+ return rc1 ? : rc2;
+}
+
+static int aspeed_sham_init(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ AHASH_DBG(hace_dev, "%s: digest size:%d\n",
+ crypto_tfm_alg_name(&tfm->base),
+ crypto_ahash_digestsize(tfm));
+
+ rctx->cmd = HASH_CMD_ACC_MODE;
+ rctx->flags = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA1;
+ rctx->digsize = SHA1_DIGEST_SIZE;
+ rctx->block_size = SHA1_BLOCK_SIZE;
+ rctx->sha_iv = sha1_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha1_iv, rctx->ivsize);
+ break;
+ case SHA224_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA224;
+ rctx->digsize = SHA224_DIGEST_SIZE;
+ rctx->block_size = SHA224_BLOCK_SIZE;
+ rctx->sha_iv = sha224_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha224_iv, rctx->ivsize);
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA256;
+ rctx->digsize = SHA256_DIGEST_SIZE;
+ rctx->block_size = SHA256_BLOCK_SIZE;
+ rctx->sha_iv = sha256_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha256_iv, rctx->ivsize);
+ break;
+ case SHA384_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA384;
+ rctx->digsize = SHA384_DIGEST_SIZE;
+ rctx->block_size = SHA384_BLOCK_SIZE;
+ rctx->sha_iv = (const __be32 *)sha384_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha384_iv, rctx->ivsize);
+ break;
+ case SHA512_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512;
+ rctx->digsize = SHA512_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = (const __be32 *)sha512_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_iv, rctx->ivsize);
+ break;
+ default:
+ dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+ crypto_ahash_digestsize(tfm));
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->total = 0;
+ rctx->digcnt[0] = 0;
+ rctx->digcnt[1] = 0;
+
+ /* HMAC init */
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ rctx->digcnt[0] = rctx->block_size;
+ rctx->bufcnt = rctx->block_size;
+ memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
+ rctx->flags |= SHA_FLAGS_HMAC;
+ }
+
+ return 0;
+}
+
+static int aspeed_sha512s_init(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm));
+
+ rctx->cmd = HASH_CMD_ACC_MODE;
+ rctx->flags = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA224_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512_224;
+ rctx->digsize = SHA224_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = sha512_224_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_224_iv, rctx->ivsize);
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512_256;
+ rctx->digsize = SHA256_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = sha512_256_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_256_iv, rctx->ivsize);
+ break;
+ default:
+ dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+ crypto_ahash_digestsize(tfm));
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->total = 0;
+ rctx->digcnt[0] = 0;
+ rctx->digcnt[1] = 0;
+
+ /* HMAC init */
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ rctx->digcnt[0] = rctx->block_size;
+ rctx->bufcnt = rctx->block_size;
+ memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
+ rctx->flags |= SHA_FLAGS_HMAC;
+ }
+
+ return 0;
+}
+
+static int aspeed_sham_digest(struct ahash_request *req)
+{
+ return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
+}
+
+static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+ int ds = crypto_shash_digestsize(bctx->shash);
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int err = 0;
+ int i;
+
+ AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
+ keylen);
+
+ if (keylen > bs) {
+ err = aspeed_sham_shash_digest(bctx->shash,
+ crypto_shash_get_flags(bctx->shash),
+ key, keylen, bctx->ipad);
+ if (err)
+ return err;
+ keylen = ds;
+
+ } else {
+ memcpy(bctx->ipad, key, keylen);
+ }
+
+ memset(bctx->ipad + keylen, 0, bs - keylen);
+ memcpy(bctx->opad, bctx->ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ bctx->ipad[i] ^= HMAC_IPAD_VALUE;
+ bctx->opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ return err;
+}
+
+static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
+{
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct aspeed_hace_alg *ast_alg;
+
+ ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash);
+ tctx->hace_dev = ast_alg->hace_dev;
+ tctx->flags = 0;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct aspeed_sham_reqctx));
+
+ if (ast_alg->alg_base) {
+ /* hmac related */
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ tctx->flags |= SHA_FLAGS_HMAC;
+ bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(bctx->shash)) {
+ dev_warn(ast_alg->hace_dev->dev,
+ "base driver '%s' could not be loaded.\n",
+ ast_alg->alg_base);
+ return PTR_ERR(bctx->shash);
+ }
+ }
+
+ tctx->enginectx.op.do_one_request = aspeed_ahash_do_request;
+ tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request;
+ tctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
+{
+ struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
+
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ crypto_free_shash(bctx->shash);
+ }
+}
+
+static int aspeed_sham_export(struct ahash_request *req, void *out)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int aspeed_sham_import(struct ahash_request *req, const void *in)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static struct aspeed_hace_alg aspeed_ahash_algs[] = {
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "aspeed-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "aspeed-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "aspeed-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha1",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "aspeed-hmac-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha224",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "aspeed-hmac-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha256",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "aspeed-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+};
+
+static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "aspeed-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "aspeed-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512_224",
+ .cra_driver_name = "aspeed-sha512_224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512_256",
+ .cra_driver_name = "aspeed-sha512_256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha384",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "aspeed-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "aspeed-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512_224",
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512_224)",
+ .cra_driver_name = "aspeed-hmac-sha512_224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512_256",
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512_256)",
+ .cra_driver_name = "aspeed-hmac-sha512_256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+};
+
+void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
+ crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
+ crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+}
+
+void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int rc, i;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
+ aspeed_ahash_algs[i].hace_dev = hace_dev;
+ rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
+ if (rc) {
+ AHASH_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name);
+ }
+ }
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
+ aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
+ rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+ if (rc) {
+ AHASH_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name);
+ }
+ }
+}
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
new file mode 100644
index 000000000000..656cb92c8bb6
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define HACE_DBG(d, fmt, ...) \
+ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define HACE_DBG(d, fmt, ...) \
+ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/* HACE interrupt service routine */
+static irqreturn_t aspeed_hace_irq(int irq, void *dev)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev;
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ u32 sts;
+
+ sts = ast_hace_read(hace_dev, ASPEED_HACE_STS);
+ ast_hace_write(hace_dev, sts, ASPEED_HACE_STS);
+
+ HACE_DBG(hace_dev, "irq status: 0x%x\n", sts);
+
+ if (sts & HACE_HASH_ISR) {
+ if (hash_engine->flags & CRYPTO_FLAGS_BUSY)
+ tasklet_schedule(&hash_engine->done_task);
+ else
+ dev_warn(hace_dev->dev, "HASH no active requests.\n");
+ }
+
+ if (sts & HACE_CRYPTO_ISR) {
+ if (crypto_engine->flags & CRYPTO_FLAGS_BUSY)
+ tasklet_schedule(&crypto_engine->done_task);
+ else
+ dev_warn(hace_dev->dev, "CRYPTO no active requests.\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void aspeed_hace_crypto_done_task(unsigned long data)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+
+ crypto_engine->resume(hace_dev);
+}
+
+static void aspeed_hace_hash_done_task(unsigned long data)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+ hash_engine->resume(hace_dev);
+}
+
+static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev)
+{
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+ aspeed_register_hace_hash_algs(hace_dev);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ aspeed_register_hace_crypto_algs(hace_dev);
+#endif
+}
+
+static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev)
+{
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+ aspeed_unregister_hace_hash_algs(hace_dev);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ aspeed_unregister_hace_crypto_algs(hace_dev);
+#endif
+}
+
+static const struct of_device_id aspeed_hace_of_matches[] = {
+ { .compatible = "aspeed,ast2500-hace", .data = (void *)5, },
+ { .compatible = "aspeed,ast2600-hace", .data = (void *)6, },
+ {},
+};
+
+static int aspeed_hace_probe(struct platform_device *pdev)
+{
+ struct aspeed_engine_crypto *crypto_engine;
+ const struct of_device_id *hace_dev_id;
+ struct aspeed_engine_hash *hash_engine;
+ struct aspeed_hace_dev *hace_dev;
+ struct resource *res;
+ int rc;
+
+ hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev),
+ GFP_KERNEL);
+ if (!hace_dev)
+ return -ENOMEM;
+
+ hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev);
+ if (!hace_dev_id) {
+ dev_err(&pdev->dev, "Failed to match hace dev id\n");
+ return -EINVAL;
+ }
+
+ hace_dev->dev = &pdev->dev;
+ hace_dev->version = (unsigned long)hace_dev_id->data;
+ hash_engine = &hace_dev->hash_engine;
+ crypto_engine = &hace_dev->crypto_engine;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ platform_set_drvdata(pdev, hace_dev);
+
+ hace_dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hace_dev->regs))
+ return PTR_ERR(hace_dev->regs);
+
+ /* Get irq number and register it */
+ hace_dev->irq = platform_get_irq(pdev, 0);
+ if (hace_dev->irq < 0)
+ return -ENXIO;
+
+ rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0,
+ dev_name(&pdev->dev), hace_dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to request interrupt\n");
+ return rc;
+ }
+
+ /* Get clk and enable it */
+ hace_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hace_dev->clk)) {
+ dev_err(&pdev->dev, "Failed to get clk\n");
+ return -ENODEV;
+ }
+
+ rc = clk_prepare_enable(hace_dev->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to enable clock 0x%x\n", rc);
+ return rc;
+ }
+
+ /* Initialize crypto hardware engine structure for hash */
+ hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev,
+ true);
+ if (!hace_dev->crypt_engine_hash) {
+ rc = -ENOMEM;
+ goto clk_exit;
+ }
+
+ rc = crypto_engine_start(hace_dev->crypt_engine_hash);
+ if (rc)
+ goto err_engine_hash_start;
+
+ tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task,
+ (unsigned long)hace_dev);
+
+ /* Initialize crypto hardware engine structure for crypto */
+ hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev,
+ true);
+ if (!hace_dev->crypt_engine_crypto) {
+ rc = -ENOMEM;
+ goto err_engine_hash_start;
+ }
+
+ rc = crypto_engine_start(hace_dev->crypt_engine_crypto);
+ if (rc)
+ goto err_engine_crypto_start;
+
+ tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task,
+ (unsigned long)hace_dev);
+
+ /* Allocate DMA buffer for hash engine input used */
+ hash_engine->ahash_src_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_HASH_SRC_DMA_BUF_LEN,
+ &hash_engine->ahash_src_dma_addr,
+ GFP_KERNEL);
+ if (!hash_engine->ahash_src_addr) {
+ dev_err(&pdev->dev, "Failed to allocate dma buffer\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine context used */
+ crypto_engine->cipher_ctx =
+ dmam_alloc_coherent(&pdev->dev,
+ PAGE_SIZE,
+ &crypto_engine->cipher_ctx_dma,
+ GFP_KERNEL);
+ if (!crypto_engine->cipher_ctx) {
+ dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine input used */
+ crypto_engine->cipher_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
+ &crypto_engine->cipher_dma_addr,
+ GFP_KERNEL);
+ if (!crypto_engine->cipher_addr) {
+ dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine output used */
+ if (hace_dev->version == AST2600_VERSION) {
+ crypto_engine->dst_sg_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_CRYPTO_DST_DMA_BUF_LEN,
+ &crypto_engine->dst_sg_dma_addr,
+ GFP_KERNEL);
+ if (!crypto_engine->dst_sg_addr) {
+ dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+ }
+
+ aspeed_hace_register(hace_dev);
+
+ dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n");
+
+ return 0;
+
+err_engine_crypto_start:
+ crypto_engine_exit(hace_dev->crypt_engine_crypto);
+err_engine_hash_start:
+ crypto_engine_exit(hace_dev->crypt_engine_hash);
+clk_exit:
+ clk_disable_unprepare(hace_dev->clk);
+
+ return rc;
+}
+
+static int aspeed_hace_remove(struct platform_device *pdev)
+{
+ struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev);
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+ aspeed_hace_unregister(hace_dev);
+
+ crypto_engine_exit(hace_dev->crypt_engine_hash);
+ crypto_engine_exit(hace_dev->crypt_engine_crypto);
+
+ tasklet_kill(&hash_engine->done_task);
+ tasklet_kill(&crypto_engine->done_task);
+
+ clk_disable_unprepare(hace_dev->clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
+
+static struct platform_driver aspeed_hace_driver = {
+ .probe = aspeed_hace_probe,
+ .remove = aspeed_hace_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = aspeed_hace_of_matches,
+ },
+};
+
+module_platform_driver(aspeed_hace_driver);
+
+MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+MODULE_DESCRIPTION("Aspeed HACE driver Crypto Accelerator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
new file mode 100644
index 000000000000..f2cde23b56ae
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __ASPEED_HACE_H__
+#define __ASPEED_HACE_H__
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fips.h>
+#include <linux/dma-mapping.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+
+/*****************************
+ * *
+ * HACE register definitions *
+ * *
+ * ***************************/
+#define ASPEED_HACE_SRC 0x00 /* Crypto Data Source Base Address Register */
+#define ASPEED_HACE_DEST 0x04 /* Crypto Data Destination Base Address Register */
+#define ASPEED_HACE_CONTEXT 0x08 /* Crypto Context Buffer Base Address Register */
+#define ASPEED_HACE_DATA_LEN 0x0C /* Crypto Data Length Register */
+#define ASPEED_HACE_CMD 0x10 /* Crypto Engine Command Register */
+
+/* G5 */
+#define ASPEED_HACE_TAG 0x18 /* HACE Tag Register */
+/* G6 */
+#define ASPEED_HACE_GCM_ADD_LEN 0x14 /* Crypto AES-GCM Additional Data Length Register */
+#define ASPEED_HACE_GCM_TAG_BASE_ADDR 0x18 /* Crypto AES-GCM Tag Write Buff Base Address Reg */
+
+#define ASPEED_HACE_STS 0x1C /* HACE Status Register */
+
+#define ASPEED_HACE_HASH_SRC 0x20 /* Hash Data Source Base Address Register */
+#define ASPEED_HACE_HASH_DIGEST_BUFF 0x24 /* Hash Digest Write Buffer Base Address Register */
+#define ASPEED_HACE_HASH_KEY_BUFF 0x28 /* Hash HMAC Key Buffer Base Address Register */
+#define ASPEED_HACE_HASH_DATA_LEN 0x2C /* Hash Data Length Register */
+#define ASPEED_HACE_HASH_CMD 0x30 /* Hash Engine Command Register */
+
+/* crypto cmd */
+#define HACE_CMD_SINGLE_DES 0
+#define HACE_CMD_TRIPLE_DES BIT(17)
+#define HACE_CMD_AES_SELECT 0
+#define HACE_CMD_DES_SELECT BIT(16)
+#define HACE_CMD_ISR_EN BIT(12)
+#define HACE_CMD_CONTEXT_SAVE_ENABLE (0)
+#define HACE_CMD_CONTEXT_SAVE_DISABLE BIT(9)
+#define HACE_CMD_AES (0)
+#define HACE_CMD_DES (0)
+#define HACE_CMD_RC4 BIT(8)
+#define HACE_CMD_DECRYPT (0)
+#define HACE_CMD_ENCRYPT BIT(7)
+
+#define HACE_CMD_ECB (0x0 << 4)
+#define HACE_CMD_CBC (0x1 << 4)
+#define HACE_CMD_CFB (0x2 << 4)
+#define HACE_CMD_OFB (0x3 << 4)
+#define HACE_CMD_CTR (0x4 << 4)
+#define HACE_CMD_OP_MODE_MASK (0x7 << 4)
+
+#define HACE_CMD_AES128 (0x0 << 2)
+#define HACE_CMD_AES192 (0x1 << 2)
+#define HACE_CMD_AES256 (0x2 << 2)
+#define HACE_CMD_OP_CASCADE (0x3)
+#define HACE_CMD_OP_INDEPENDENT (0x1)
+
+/* G5 */
+#define HACE_CMD_RI_WO_DATA_ENABLE (0)
+#define HACE_CMD_RI_WO_DATA_DISABLE BIT(11)
+#define HACE_CMD_CONTEXT_LOAD_ENABLE (0)
+#define HACE_CMD_CONTEXT_LOAD_DISABLE BIT(10)
+/* G6 */
+#define HACE_CMD_AES_KEY_FROM_OTP BIT(24)
+#define HACE_CMD_GHASH_TAG_XOR_EN BIT(23)
+#define HACE_CMD_GHASH_PAD_LEN_INV BIT(22)
+#define HACE_CMD_GCM_TAG_ADDR_SEL BIT(21)
+#define HACE_CMD_MBUS_REQ_SYNC_EN BIT(20)
+#define HACE_CMD_DES_SG_CTRL BIT(19)
+#define HACE_CMD_SRC_SG_CTRL BIT(18)
+#define HACE_CMD_CTR_IV_AES_96 (0x1 << 14)
+#define HACE_CMD_CTR_IV_DES_32 (0x1 << 14)
+#define HACE_CMD_CTR_IV_AES_64 (0x2 << 14)
+#define HACE_CMD_CTR_IV_AES_32 (0x3 << 14)
+#define HACE_CMD_AES_KEY_HW_EXP BIT(13)
+#define HACE_CMD_GCM (0x5 << 4)
+
+/* interrupt status reg */
+#define HACE_CRYPTO_ISR BIT(12)
+#define HACE_HASH_ISR BIT(9)
+#define HACE_HASH_BUSY BIT(0)
+
+/* hash cmd reg */
+#define HASH_CMD_MBUS_REQ_SYNC_EN BIT(20)
+#define HASH_CMD_HASH_SRC_SG_CTRL BIT(18)
+#define HASH_CMD_SHA512_224 (0x3 << 10)
+#define HASH_CMD_SHA512_256 (0x2 << 10)
+#define HASH_CMD_SHA384 (0x1 << 10)
+#define HASH_CMD_SHA512 (0)
+#define HASH_CMD_INT_ENABLE BIT(9)
+#define HASH_CMD_HMAC (0x1 << 7)
+#define HASH_CMD_ACC_MODE (0x2 << 7)
+#define HASH_CMD_HMAC_KEY (0x3 << 7)
+#define HASH_CMD_SHA1 (0x2 << 4)
+#define HASH_CMD_SHA224 (0x4 << 4)
+#define HASH_CMD_SHA256 (0x5 << 4)
+#define HASH_CMD_SHA512_SER (0x6 << 4)
+#define HASH_CMD_SHA_SWAP (0x2 << 2)
+
+#define HASH_SG_LAST_LIST BIT(31)
+
+#define CRYPTO_FLAGS_BUSY BIT(1)
+
+#define SHA_OP_UPDATE 1
+#define SHA_OP_FINAL 2
+
+#define SHA_FLAGS_SHA1 BIT(0)
+#define SHA_FLAGS_SHA224 BIT(1)
+#define SHA_FLAGS_SHA256 BIT(2)
+#define SHA_FLAGS_SHA384 BIT(3)
+#define SHA_FLAGS_SHA512 BIT(4)
+#define SHA_FLAGS_SHA512_224 BIT(5)
+#define SHA_FLAGS_SHA512_256 BIT(6)
+#define SHA_FLAGS_HMAC BIT(8)
+#define SHA_FLAGS_FINUP BIT(9)
+#define SHA_FLAGS_MASK (0xff)
+
+#define ASPEED_CRYPTO_SRC_DMA_BUF_LEN 0xa000
+#define ASPEED_CRYPTO_DST_DMA_BUF_LEN 0xa000
+#define ASPEED_CRYPTO_GCM_TAG_OFFSET 0x9ff0
+#define ASPEED_HASH_SRC_DMA_BUF_LEN 0xa000
+#define ASPEED_HASH_QUEUE_LENGTH 50
+
+#define HACE_CMD_IV_REQUIRE (HACE_CMD_CBC | HACE_CMD_CFB | \
+ HACE_CMD_OFB | HACE_CMD_CTR)
+
+struct aspeed_hace_dev;
+
+typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *);
+
+struct aspeed_sg_list {
+ __le32 len;
+ __le32 phy_addr;
+};
+
+struct aspeed_engine_hash {
+ struct tasklet_struct done_task;
+ unsigned long flags;
+ struct ahash_request *req;
+
+ /* input buffer */
+ void *ahash_src_addr;
+ dma_addr_t ahash_src_dma_addr;
+
+ dma_addr_t src_dma;
+ dma_addr_t digest_dma;
+
+ size_t src_length;
+
+ /* callback func */
+ aspeed_hace_fn_t resume;
+ aspeed_hace_fn_t dma_prepare;
+};
+
+struct aspeed_sha_hmac_ctx {
+ struct crypto_shash *shash;
+ u8 ipad[SHA512_BLOCK_SIZE];
+ u8 opad[SHA512_BLOCK_SIZE];
+};
+
+struct aspeed_sham_ctx {
+ struct crypto_engine_ctx enginectx;
+
+ struct aspeed_hace_dev *hace_dev;
+ unsigned long flags; /* hmac flag */
+
+ struct aspeed_sha_hmac_ctx base[0];
+};
+
+struct aspeed_sham_reqctx {
+ unsigned long flags; /* final update flag should no use*/
+ unsigned long op; /* final or update */
+ u32 cmd; /* trigger cmd */
+
+ /* walk state */
+ struct scatterlist *src_sg;
+ int src_nents;
+ unsigned int offset; /* offset in current sg */
+ unsigned int total; /* per update length */
+
+ size_t digsize;
+ size_t block_size;
+ size_t ivsize;
+ const __be32 *sha_iv;
+
+ /* remain data buffer */
+ u8 buffer[SHA512_BLOCK_SIZE * 2];
+ dma_addr_t buffer_dma_addr;
+ size_t bufcnt; /* buffer counter */
+
+ /* output buffer */
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
+ dma_addr_t digest_dma_addr;
+ u64 digcnt[2];
+};
+
+struct aspeed_engine_crypto {
+ struct tasklet_struct done_task;
+ unsigned long flags;
+ struct skcipher_request *req;
+
+ /* context buffer */
+ void *cipher_ctx;
+ dma_addr_t cipher_ctx_dma;
+
+ /* input buffer, could be single/scatter-gather lists */
+ void *cipher_addr;
+ dma_addr_t cipher_dma_addr;
+
+ /* output buffer, only used in scatter-gather lists */
+ void *dst_sg_addr;
+ dma_addr_t dst_sg_dma_addr;
+
+ /* callback func */
+ aspeed_hace_fn_t resume;
+};
+
+struct aspeed_cipher_ctx {
+ struct crypto_engine_ctx enginectx;
+
+ struct aspeed_hace_dev *hace_dev;
+ int key_len;
+ u8 key[AES_MAX_KEYLENGTH];
+
+ /* callback func */
+ aspeed_hace_fn_t start;
+
+ struct crypto_skcipher *fallback_tfm;
+};
+
+struct aspeed_cipher_reqctx {
+ int enc_cmd;
+ int src_nents;
+ int dst_nents;
+
+ struct skcipher_request fallback_req; /* keep at the end */
+};
+
+struct aspeed_hace_dev {
+ void __iomem *regs;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ unsigned long version;
+
+ struct crypto_engine *crypt_engine_hash;
+ struct crypto_engine *crypt_engine_crypto;
+
+ struct aspeed_engine_hash hash_engine;
+ struct aspeed_engine_crypto crypto_engine;
+};
+
+struct aspeed_hace_alg {
+ struct aspeed_hace_dev *hace_dev;
+
+ const char *alg_base;
+
+ union {
+ struct skcipher_alg skcipher;
+ struct ahash_alg ahash;
+ } alg;
+};
+
+enum aspeed_version {
+ AST2500_VERSION = 5,
+ AST2600_VERSION
+};
+
+#define ast_hace_write(hace, val, offset) \
+ writel((val), (hace)->regs + (offset))
+#define ast_hace_read(hace, offset) \
+ readl((hace)->regs + (offset))
+
+void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+
+#endif
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 9ad188cffd0d..51c66afbe677 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1712,7 +1712,7 @@ static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
cipher_len = regk_crypto_key_256;
break;
default:
- pr_err("%s: Invalid key length %d!\n",
+ pr_err("%s: Invalid key length %zu!\n",
MODULE_NAME, ctx->key_length);
return -EINVAL;
}
@@ -2091,7 +2091,7 @@ static void artpec6_crypto_task(unsigned long data)
return;
}
- spin_lock_bh(&ac->queue_lock);
+ spin_lock(&ac->queue_lock);
list_for_each_entry_safe(req, n, &ac->pending, list) {
struct artpec6_crypto_dma_descriptors *dma = req->dma;
@@ -2128,7 +2128,7 @@ static void artpec6_crypto_task(unsigned long data)
artpec6_crypto_process_queue(ac, &complete_in_progress);
- spin_unlock_bh(&ac->queue_lock);
+ spin_unlock(&ac->queue_lock);
/* Perform the completion callbacks without holding the queue lock
* to allow new request submissions from the callbacks.
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 053315e260c2..c8c799428fe0 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1928,7 +1928,7 @@ static int ahash_enqueue(struct ahash_request *req)
/* SPU2 hardware does not compute hash of zero length data */
if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
(iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
- alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+ alg_name = crypto_ahash_alg_name(tfm);
flow_log("Doing %sfinal %s zero-len hash request in software\n",
rctx->is_final ? "" : "non-", alg_name);
err = do_shash((unsigned char *)alg_name, req->result,
@@ -2029,7 +2029,7 @@ static int ahash_init(struct ahash_request *req)
* supported by the hardware, we need to handle it in software
* by calling synchronous hash functions.
*/
- alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+ alg_name = crypto_ahash_alg_name(tfm);
hash = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(hash)) {
ret = PTR_ERR(hash);
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 71281a3bdbdc..d6d87332140a 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -231,7 +231,7 @@ struct iproc_ctx_s {
/*
* shash descriptor - needed to perform incremental hashing in
- * in software, when hw doesn't support it.
+ * software, when hw doesn't support it.
*/
struct shash_desc *shash;
diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h
index 8ec6edc69f3f..ae4791a8ec4a 100644
--- a/drivers/crypto/cavium/cpt/cpt_hw_types.h
+++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h
@@ -396,7 +396,7 @@ union cptx_vqx_misc_ena_w1s {
* Word0
* reserved_20_63:44 [63:20] Reserved.
* dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add
- * to the CPT instruction doorbell count. Readback value is the the
+ * to the CPT instruction doorbell count. Readback value is the
* current number of pending doorbell requests. If counter overflows
* CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to
* zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 8c32d0eb8fcf..6872ac344001 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -253,6 +253,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
const struct firmware *fw_entry;
struct device *dev = &cpt->pdev->dev;
struct ucode_header *ucode;
+ unsigned int code_length;
struct microcode *mcode;
int j, ret = 0;
@@ -263,11 +264,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ucode = (struct ucode_header *)fw_entry->data;
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
- mcode->code_size = ntohl(ucode->code_length) * 2;
- if (!mcode->code_size) {
+ code_length = ntohl(ucode->code_length);
+ if (code_length == 0 || code_length >= INT_MAX / 2) {
ret = -EINVAL;
goto fw_release;
}
+ mcode->code_size = code_length * 2;
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index 7df71fcebe8f..1046a746d36f 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -198,22 +198,16 @@ static int zip_decompress(const u8 *src, unsigned int slen,
/* Legacy Compress framework start */
int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_ctx_init(zip_ctx, 0);
-
- return ret;
+ return zip_ctx_init(zip_ctx, 0);
}
int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_ctx_init(zip_ctx, 1);
-
- return ret;
+ return zip_ctx_init(zip_ctx, 1);
}
void zip_free_comp_ctx(struct crypto_tfm *tfm)
@@ -227,24 +221,18 @@ int zip_comp_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_compress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_compress(src, slen, dst, dlen, zip_ctx);
}
int zip_comp_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_decompress(src, slen, dst, dlen, zip_ctx);
} /* Legacy compress framework end */
/* SCOMP framework start */
@@ -298,22 +286,16 @@ int zip_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
- ret = zip_compress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_compress(src, slen, dst, dlen, zip_ctx);
}
int zip_scomp_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
- ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_decompress(src, slen, dst, dlen, zip_ctx);
} /* SCOMP framework end */
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index ec97daf0fcb7..278636ed251a 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -64,7 +64,6 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
- int ret;
if (!ctx->u.des3.key_len)
return -EINVAL;
@@ -100,9 +99,7 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
rctx->cmd.u.des3.src_len = req->cryptlen;
rctx->cmd.u.des3.dst = req->dst;
- ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
-
- return ret;
+ return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
}
static int ccp_des3_encrypt(struct skcipher_request *req)
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 7d4b4ad1db1f..9f753cb4f5f1 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -641,6 +641,10 @@ static void ccp_dma_release(struct ccp_device *ccp)
for (i = 0; i < ccp->cmd_q_count; i++) {
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
+
+ if (dma_chan->client_count)
+ dma_release_channel(dma_chan);
+
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
@@ -766,8 +770,8 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
- dma_async_device_unregister(dma_dev);
ccp_dma_release(ccp);
+ dma_async_device_unregister(dma_dev);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 9f588c9728f8..06fc7156c04f 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -211,18 +211,24 @@ static int sev_read_init_ex_file(void)
if (IS_ERR(fp)) {
int ret = PTR_ERR(fp);
- dev_err(sev->dev,
- "SEV: could not open %s for read, error %d\n",
- init_ex_path, ret);
+ if (ret == -ENOENT) {
+ dev_info(sev->dev,
+ "SEV: %s does not exist and will be created later.\n",
+ init_ex_path);
+ ret = 0;
+ } else {
+ dev_err(sev->dev,
+ "SEV: could not open %s for read, error %d\n",
+ init_ex_path, ret);
+ }
return ret;
}
nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL);
if (nread != NV_LENGTH) {
- dev_err(sev->dev,
- "SEV: failed to read %u bytes to non volatile memory area, ret %ld\n",
+ dev_info(sev->dev,
+ "SEV: could not read %u bytes to non volatile memory area, ret %ld\n",
NV_LENGTH, nread);
- return -EIO;
}
dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread);
@@ -231,7 +237,7 @@ static int sev_read_init_ex_file(void)
return 0;
}
-static void sev_write_init_ex_file(void)
+static int sev_write_init_ex_file(void)
{
struct sev_device *sev = psp_master->sev_data;
struct file *fp;
@@ -241,14 +247,16 @@ static void sev_write_init_ex_file(void)
lockdep_assert_held(&sev_cmd_mutex);
if (!sev_init_ex_buffer)
- return;
+ return 0;
fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600);
if (IS_ERR(fp)) {
+ int ret = PTR_ERR(fp);
+
dev_err(sev->dev,
- "SEV: could not open file for write, error %ld\n",
- PTR_ERR(fp));
- return;
+ "SEV: could not open file for write, error %d\n",
+ ret);
+ return ret;
}
nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset);
@@ -259,18 +267,20 @@ static void sev_write_init_ex_file(void)
dev_err(sev->dev,
"SEV: failed to write %u bytes to non volatile memory area, ret %ld\n",
NV_LENGTH, nwrite);
- return;
+ return -EIO;
}
dev_dbg(sev->dev, "SEV: write successful to NV file\n");
+
+ return 0;
}
-static void sev_write_init_ex_file_if_required(int cmd_id)
+static int sev_write_init_ex_file_if_required(int cmd_id)
{
lockdep_assert_held(&sev_cmd_mutex);
if (!sev_init_ex_buffer)
- return;
+ return 0;
/*
* Only a few platform commands modify the SPI/NV area, but none of the
@@ -285,10 +295,10 @@ static void sev_write_init_ex_file_if_required(int cmd_id)
case SEV_CMD_PEK_GEN:
break;
default:
- return;
+ return 0;
}
- sev_write_init_ex_file();
+ return sev_write_init_ex_file();
}
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
@@ -361,7 +371,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
cmd, reg & PSP_CMDRESP_ERR_MASK);
ret = -EIO;
} else {
- sev_write_init_ex_file_if_required(cmd);
+ ret = sev_write_init_ex_file_if_required(cmd);
}
print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
@@ -410,17 +420,12 @@ static int __sev_init_locked(int *error)
static int __sev_init_ex_locked(int *error)
{
struct sev_data_init_ex data;
- int ret;
memset(&data, 0, sizeof(data));
data.length = sizeof(data);
data.nv_address = __psp_pa(sev_init_ex_buffer);
data.nv_len = NV_LENGTH;
- ret = sev_read_init_ex_file();
- if (ret)
- return ret;
-
if (sev_es_tmr) {
/*
* Do not include the encryption mask on the physical
@@ -439,7 +444,7 @@ static int __sev_platform_init_locked(int *error)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
- int rc, psp_ret = -1;
+ int rc = 0, psp_ret = -1;
int (*init_function)(int *error);
if (!psp || !psp->sev_data)
@@ -450,8 +455,15 @@ static int __sev_platform_init_locked(int *error)
if (sev->state == SEV_STATE_INIT)
return 0;
- init_function = sev_init_ex_buffer ? __sev_init_ex_locked :
- __sev_init_locked;
+ if (sev_init_ex_buffer) {
+ init_function = __sev_init_ex_locked;
+ rc = sev_read_init_ex_file();
+ if (rc)
+ return rc;
+ } else {
+ init_function = __sev_init_locked;
+ }
+
rc = init_function(&psp_ret);
if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
/*
@@ -744,6 +756,11 @@ static int sev_update_firmware(struct device *dev)
struct page *p;
u64 data_size;
+ if (!sev_version_greater_or_equal(0, 15)) {
+ dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n");
+ return -1;
+ }
+
if (sev_get_firmware(dev, &firmware) == -ENOENT) {
dev_dbg(dev, "No SEV firmware file present\n");
return -1;
@@ -776,6 +793,14 @@ static int sev_update_firmware(struct device *dev)
data->len = firmware->size;
ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+
+ /*
+ * A quirk for fixing the committed TCB version, when upgrading from
+ * earlier firmware version than 1.50.
+ */
+ if (!ret && !sev_version_greater_or_equal(1, 50))
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
else
@@ -1285,8 +1310,7 @@ void sev_pci_init(void)
if (sev_get_api_version())
goto err;
- if (sev_version_greater_or_equal(0, 15) &&
- sev_update_firmware(sev->dev) == 0)
+ if (sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
/* If an init_ex_path is provided rely on INIT_EX for PSP initialization
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 6140e4927322..9efd88f871d1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -274,7 +274,7 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
}
ret = dma_map_sg(dev, sg, *nents, direction);
- if (dma_mapping_error(dev, ret)) {
+ if (!ret) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 9a0558ed82f9..9f0b94c8e03d 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -22,7 +22,8 @@ enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
HPRE_CLUSTER2,
- HPRE_CLUSTER3
+ HPRE_CLUSTER3,
+ HPRE_CLUSTERS_NUM_MAX
};
enum hpre_ctrl_dbgfs_file {
@@ -42,9 +43,6 @@ enum hpre_dfx_dbgfs_file {
HPRE_DFX_FILE_NUM
};
-#define HPRE_CLUSTERS_NUM_V2 (HPRE_CLUSTER3 + 1)
-#define HPRE_CLUSTERS_NUM_V3 1
-#define HPRE_CLUSTERS_NUM_MAX HPRE_CLUSTERS_NUM_V2
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1)
struct hpre_debugfs_file {
@@ -105,5 +103,5 @@ struct hpre_sqe {
struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(struct hisi_qm *qm);
-
+bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg);
#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 3ba6f15deafc..ef02dadd6217 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -51,6 +51,12 @@ struct hpre_ctx;
#define HPRE_ECC_HW256_KSZ_B 32
#define HPRE_ECC_HW384_KSZ_B 48
+/* capability register mask of driver */
+#define HPRE_DRV_RSA_MASK_CAP BIT(0)
+#define HPRE_DRV_DH_MASK_CAP BIT(1)
+#define HPRE_DRV_ECDH_MASK_CAP BIT(2)
+#define HPRE_DRV_X25519_MASK_CAP BIT(5)
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -147,7 +153,7 @@ static int hpre_alloc_req_id(struct hpre_ctx *ctx)
int id;
spin_lock_irqsave(&ctx->req_lock, flags);
- id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
+ id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
spin_unlock_irqrestore(&ctx->req_lock, flags);
return id;
@@ -488,7 +494,7 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
- ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+ ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
if (ret)
hpre_stop_qp_and_put(qp);
@@ -2002,55 +2008,53 @@ static struct kpp_alg dh = {
},
};
-static struct kpp_alg ecdh_nist_p192 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p192_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p192",
- .cra_driver_name = "hpre-ecdh-nist-p192",
- .cra_module = THIS_MODULE,
- },
-};
-
-static struct kpp_alg ecdh_nist_p256 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p256_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p256",
- .cra_driver_name = "hpre-ecdh-nist-p256",
- .cra_module = THIS_MODULE,
- },
-};
-
-static struct kpp_alg ecdh_nist_p384 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p384_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p384",
- .cra_driver_name = "hpre-ecdh-nist-p384",
- .cra_module = THIS_MODULE,
- },
+static struct kpp_alg ecdh_curves[] = {
+ {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p192_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p192",
+ .cra_driver_name = "hpre-ecdh-nist-p192",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p256_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "hpre-ecdh-nist-p256",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p384_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p384",
+ .cra_driver_name = "hpre-ecdh-nist-p384",
+ .cra_module = THIS_MODULE,
+ },
+ }
};
static struct kpp_alg curve25519_alg = {
@@ -2070,78 +2074,144 @@ static struct kpp_alg curve25519_alg = {
},
};
-
-static int hpre_register_ecdh(void)
+static int hpre_register_rsa(struct hisi_qm *qm)
{
int ret;
- ret = crypto_register_kpp(&ecdh_nist_p192);
- if (ret)
- return ret;
+ if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
+ return 0;
- ret = crypto_register_kpp(&ecdh_nist_p256);
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
if (ret)
- goto unregister_ecdh_p192;
+ dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
- ret = crypto_register_kpp(&ecdh_nist_p384);
+ return ret;
+}
+
+static void hpre_unregister_rsa(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
+ return;
+
+ crypto_unregister_akcipher(&rsa);
+}
+
+static int hpre_register_dh(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
+ return 0;
+
+ ret = crypto_register_kpp(&dh);
if (ret)
- goto unregister_ecdh_p256;
+ dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hpre_unregister_dh(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
+ return;
+
+ crypto_unregister_kpp(&dh);
+}
+
+static int hpre_register_ecdh(struct hisi_qm *qm)
+{
+ int ret, i;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
+ ret = crypto_register_kpp(&ecdh_curves[i]);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
+ ecdh_curves[i].base.cra_name, ret);
+ goto unreg_kpp;
+ }
+ }
return 0;
-unregister_ecdh_p256:
- crypto_unregister_kpp(&ecdh_nist_p256);
-unregister_ecdh_p192:
- crypto_unregister_kpp(&ecdh_nist_p192);
+unreg_kpp:
+ for (--i; i >= 0; --i)
+ crypto_unregister_kpp(&ecdh_curves[i]);
+
return ret;
}
-static void hpre_unregister_ecdh(void)
+static void hpre_unregister_ecdh(struct hisi_qm *qm)
{
- crypto_unregister_kpp(&ecdh_nist_p384);
- crypto_unregister_kpp(&ecdh_nist_p256);
- crypto_unregister_kpp(&ecdh_nist_p192);
+ int i;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
+ return;
+
+ for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
+ crypto_unregister_kpp(&ecdh_curves[i]);
+}
+
+static int hpre_register_x25519(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
+ return 0;
+
+ ret = crypto_register_kpp(&curve25519_alg);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hpre_unregister_x25519(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
+ return;
+
+ crypto_unregister_kpp(&curve25519_alg);
}
int hpre_algs_register(struct hisi_qm *qm)
{
int ret;
- rsa.base.cra_flags = 0;
- ret = crypto_register_akcipher(&rsa);
+ ret = hpre_register_rsa(qm);
if (ret)
return ret;
- ret = crypto_register_kpp(&dh);
+ ret = hpre_register_dh(qm);
if (ret)
goto unreg_rsa;
- if (qm->ver >= QM_HW_V3) {
- ret = hpre_register_ecdh();
- if (ret)
- goto unreg_dh;
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- goto unreg_ecdh;
- }
- return 0;
+ ret = hpre_register_ecdh(qm);
+ if (ret)
+ goto unreg_dh;
+
+ ret = hpre_register_x25519(qm);
+ if (ret)
+ goto unreg_ecdh;
+
+ return ret;
unreg_ecdh:
- hpre_unregister_ecdh();
+ hpre_unregister_ecdh(qm);
unreg_dh:
- crypto_unregister_kpp(&dh);
+ hpre_unregister_dh(qm);
unreg_rsa:
- crypto_unregister_akcipher(&rsa);
+ hpre_unregister_rsa(qm);
return ret;
}
void hpre_algs_unregister(struct hisi_qm *qm)
{
- if (qm->ver >= QM_HW_V3) {
- crypto_unregister_kpp(&curve25519_alg);
- hpre_unregister_ecdh();
- }
-
- crypto_unregister_kpp(&dh);
- crypto_unregister_akcipher(&rsa);
+ hpre_unregister_x25519(qm);
+ hpre_unregister_ecdh(qm);
+ hpre_unregister_dh(qm);
+ hpre_unregister_rsa(qm);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 9d529df0eab9..471e5ca720f5 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -53,9 +53,7 @@
#define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410
-#define HPRE_HAC_RAS_CE_ENABLE (BIT(0) | BIT(22) | BIT(23))
#define HPRE_RAS_NFE_ENB 0x301414
-#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
#define HPRE_OOO_SHUTDOWN_SEL 0x301a3c
#define HPRE_HAC_RAS_FE_ENABLE 0
@@ -79,8 +77,6 @@
#define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
#define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
#define HPRE_BD_USR_MASK GENMASK(1, 0)
-#define HPRE_CLUSTER_CORE_MASK_V2 GENMASK(3, 0)
-#define HPRE_CLUSTER_CORE_MASK_V3 GENMASK(7, 0)
#define HPRE_PREFETCH_CFG 0x301130
#define HPRE_SVA_PREFTCH_DFX 0x30115C
#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
@@ -122,6 +118,8 @@
#define HPRE_DFX_COMMON2_LEN 0xE
#define HPRE_DFX_CORE_LEN 0x43
+#define HPRE_DEV_ALG_MAX_LEN 256
+
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -137,6 +135,38 @@ struct hpre_hw_error {
const char *msg;
};
+struct hpre_dev_alg {
+ u32 alg_msk;
+ const char *alg;
+};
+
+static const struct hpre_dev_alg hpre_dev_algs[] = {
+ {
+ .alg_msk = BIT(0),
+ .alg = "rsa\n"
+ }, {
+ .alg_msk = BIT(1),
+ .alg = "dh\n"
+ }, {
+ .alg_msk = BIT(2),
+ .alg = "ecdh\n"
+ }, {
+ .alg_msk = BIT(3),
+ .alg = "ecdsa\n"
+ }, {
+ .alg_msk = BIT(4),
+ .alg = "sm2\n"
+ }, {
+ .alg_msk = BIT(5),
+ .alg = "x25519\n"
+ }, {
+ .alg_msk = BIT(6),
+ .alg = "x448\n"
+ }, {
+ /* sentinel */
+ }
+};
+
static struct hisi_qm_list hpre_devices = {
.register_to_crypto = hpre_algs_register,
.unregister_from_crypto = hpre_algs_unregister,
@@ -147,6 +177,62 @@ static const char * const hpre_debug_file_name[] = {
[HPRE_CLUSTER_CTRL] = "cluster_ctrl",
};
+enum hpre_cap_type {
+ HPRE_QM_NFE_MASK_CAP,
+ HPRE_QM_RESET_MASK_CAP,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP,
+ HPRE_QM_CE_MASK_CAP,
+ HPRE_NFE_MASK_CAP,
+ HPRE_RESET_MASK_CAP,
+ HPRE_OOO_SHUTDOWN_MASK_CAP,
+ HPRE_CE_MASK_CAP,
+ HPRE_CLUSTER_NUM_CAP,
+ HPRE_CORE_TYPE_NUM_CAP,
+ HPRE_CORE_NUM_CAP,
+ HPRE_CLUSTER_CORE_NUM_CAP,
+ HPRE_CORE_ENABLE_BITMAP_CAP,
+ HPRE_DRV_ALG_BITMAP_CAP,
+ HPRE_DEV_ALG_BITMAP_CAP,
+ HPRE_CORE1_ALG_BITMAP_CAP,
+ HPRE_CORE2_ALG_BITMAP_CAP,
+ HPRE_CORE3_ALG_BITMAP_CAP,
+ HPRE_CORE4_ALG_BITMAP_CAP,
+ HPRE_CORE5_ALG_BITMAP_CAP,
+ HPRE_CORE6_ALG_BITMAP_CAP,
+ HPRE_CORE7_ALG_BITMAP_CAP,
+ HPRE_CORE8_ALG_BITMAP_CAP,
+ HPRE_CORE9_ALG_BITMAP_CAP,
+ HPRE_CORE10_ALG_BITMAP_CAP
+};
+
+static const struct hisi_qm_cap_info hpre_basic_info[] = {
+ {HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},
+ {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
+ {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
+ {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE},
+ {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
+ {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
+ {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+ {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
+ {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
+ {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
+ {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
+ {HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},
+ {HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},
+ {HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},
+ {HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},
+ {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
+};
+
static const struct hpre_hw_error hpre_hw_errors[] = {
{
.int_msk = BIT(0),
@@ -262,6 +348,46 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
},
};
+bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
+{
+ u32 cap_val;
+
+ cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver);
+ if (alg & cap_val)
+ return true;
+
+ return false;
+}
+
+static int hpre_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u32 alg_msk;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver);
+
+ for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++)
+ if (alg_msk & hpre_dev_algs[i].alg_msk)
+ strcat(algs, hpre_dev_algs[i].alg);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static int hpre_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -330,14 +456,12 @@ MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static inline int hpre_cluster_num(struct hisi_qm *qm)
{
- return (qm->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 :
- HPRE_CLUSTERS_NUM_V2;
+ return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
}
static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
{
- return (qm->ver >= QM_HW_V3) ?
- HPRE_CLUSTER_CORE_MASK_V3 : HPRE_CLUSTER_CORE_MASK_V2;
+ return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
}
struct hisi_qp *hpre_create_qp(u8 type)
@@ -457,7 +581,7 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -478,7 +602,7 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
@@ -630,7 +754,8 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
if (enable) {
val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
- val2 = HPRE_HAC_RAS_NFE_ENABLE;
+ val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -644,21 +769,30 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- /* disable hpre hw error interrupts */
- writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+ u32 ce, nfe;
+
+ ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ /* disable hpre hw error interrupts */
+ writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, false);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
+ u32 ce, nfe;
+
+ ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+
/* clear HPRE hw error source if having */
- writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+ writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
/* configure error type */
- writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
- writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
@@ -708,7 +842,7 @@ static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
}
-static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
+static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
int cluster_index = file->index - HPRE_CLUSTER_CTRL;
@@ -716,8 +850,6 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
HPRE_CLSTR_ADDR_INTRVL;
writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
-
- return 0;
}
static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
@@ -792,9 +924,7 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
goto err_input;
break;
case HPRE_CLUSTER_CTRL:
- ret = hpre_cluster_inqry_write(file, val);
- if (ret)
- goto err_input;
+ hpre_cluster_inqry_write(file, val);
break;
default:
ret = -EINVAL;
@@ -1006,15 +1136,13 @@ static void hpre_debugfs_exit(struct hisi_qm *qm)
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
if (pdev->revision == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
- if (pdev->revision >= QM_HW_V3)
- qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
- else
- qm->algs = "rsa\ndh";
qm->mode = uacce_mode;
qm->pdev = pdev;
qm->ver = pdev->revision;
@@ -1030,7 +1158,19 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qm_list = &hpre_devices;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to init hpre qm configures!\n");
+ return ret;
+ }
+
+ ret = hpre_set_qm_algs(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to set hpre algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static int hpre_show_last_regs_init(struct hisi_qm *qm)
@@ -1129,7 +1269,11 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
}
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1147,14 +1291,20 @@ static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
- err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
- HPRE_OOO_ECC_2BIT_ERR;
- err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
+ err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
+ err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
+ err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HPRE_WR_MSI_PORT;
err_info->acpi_rst = "HRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
}
static const struct hisi_qm_err_ini hpre_err_ini = {
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index ad83c194d664..8b387de69d22 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -22,20 +22,17 @@
#define QM_VF_AEQ_INT_MASK 0x4
#define QM_VF_EQ_INT_SOURCE 0x8
#define QM_VF_EQ_INT_MASK 0xc
-#define QM_IRQ_NUM_V1 1
-#define QM_IRQ_NUM_PF_V2 4
-#define QM_IRQ_NUM_VF_V2 2
-#define QM_IRQ_NUM_VF_V3 3
-#define QM_EQ_EVENT_IRQ_VECTOR 0
-#define QM_AEQ_EVENT_IRQ_VECTOR 1
-#define QM_CMD_EVENT_IRQ_VECTOR 2
-#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
+#define QM_IRQ_VECTOR_MASK GENMASK(15, 0)
+#define QM_IRQ_TYPE_MASK GENMASK(15, 0)
+#define QM_IRQ_TYPE_SHIFT 16
+#define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0)
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
#define QM_MB_CMD_DATA_SHIFT 32
#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
+#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
#define QM_SQ_HOP_NUM_SHIFT 0
@@ -77,6 +74,9 @@
#define QM_EQ_OVERFLOW 1
#define QM_CQE_ERROR 2
+#define QM_XQ_DEPTH_SHIFT 16
+#define QM_XQ_DEPTH_MASK GENMASK(15, 0)
+
#define QM_DOORBELL_CMD_SQ 0
#define QM_DOORBELL_CMD_CQ 1
#define QM_DOORBELL_CMD_EQ 2
@@ -86,11 +86,7 @@
#define QM_DB_CMD_SHIFT_V1 16
#define QM_DB_INDEX_SHIFT_V1 32
#define QM_DB_PRIORITY_SHIFT_V1 48
-#define QM_QUE_ISO_CFG_V 0x0030
#define QM_PAGE_SIZE 0x0034
-#define QM_QUE_ISO_EN 0x100154
-#define QM_CAPBILITY 0x100158
-#define QM_QP_NUN_MASK GENMASK(10, 0)
#define QM_QP_DB_INTERVAL 0x10000
#define QM_MEM_START_INIT 0x100040
@@ -126,7 +122,6 @@
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_ABNORMAL_INT_SOURCE 0x100000
-#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0)
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
#define QM_ABNORMAL_INT_STATUS 0x100008
@@ -144,8 +139,10 @@
#define QM_RAS_NFE_ENABLE 0x1000f4
#define QM_RAS_CE_THRESHOLD 0x1000f8
#define QM_RAS_CE_TIMES_PER_IRQ 1
-#define QM_RAS_MSI_INT_SEL 0x1040f4
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
+#define QM_ECC_MBIT BIT(2)
+#define QM_DB_TIMEOUT BIT(10)
+#define QM_OF_FIFO_OF BIT(11)
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
@@ -205,6 +202,8 @@
#define MAX_WAIT_COUNTS 1000
#define QM_CACHE_WB_START 0x204
#define QM_CACHE_WB_DONE 0x208
+#define QM_FUNC_CAPS_REG 0x3100
+#define QM_CAPBILITY_VERSION GENMASK(7, 0)
#define PCI_BAR_2 2
#define PCI_BAR_4 4
@@ -221,7 +220,6 @@
#define WAIT_PERIOD 20
#define REMOVE_WAIT_DELAY 10
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
-#define QM_EQ_DEPTH (1024 * 2)
#define QM_DRIVER_REMOVING 0
#define QM_RST_SCHED 1
@@ -270,8 +268,8 @@
((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
-#define QM_MK_CQC_DW3_V2(cqe_sz) \
- ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
+#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
+ ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
#define QM_MK_SQC_W13(priority, orders, alg_type) \
(((priority) << QM_SQ_PRIORITY_SHIFT) | \
@@ -284,8 +282,8 @@
((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
-#define QM_MK_SQC_DW3_V2(sqe_sz) \
- ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
+#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
+ ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
#define INIT_QC_COMMON(qc, base, pasid) do { \
(qc)->head = 0; \
@@ -329,6 +327,48 @@ enum qm_mb_cmd {
QM_VF_GET_QOS,
};
+enum qm_basic_type {
+ QM_TOTAL_QP_NUM_CAP = 0x0,
+ QM_FUNC_MAX_QP_CAP,
+ QM_XEQ_DEPTH_CAP,
+ QM_QP_DEPTH_CAP,
+ QM_EQ_IRQ_TYPE_CAP,
+ QM_AEQ_IRQ_TYPE_CAP,
+ QM_ABN_IRQ_TYPE_CAP,
+ QM_PF2VF_IRQ_TYPE_CAP,
+ QM_PF_IRQ_NUM_CAP,
+ QM_VF_IRQ_NUM_CAP,
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
+ {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
+ {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
+ {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1},
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
+ {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0},
+};
+
+static const struct hisi_qm_cap_info qm_basic_info[] = {
+ {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
+ {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
+ {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800},
+ {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
+ {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
+ {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
+ {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003},
+ {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002},
+ {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4},
+ {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
+};
+
struct qm_cqe {
__le32 rsvd0;
__le16 cmd_id;
@@ -421,15 +461,11 @@ struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
u8 cmd, u16 index, u8 priority);
- u32 (*get_irq_num)(struct hisi_qm *qm);
int (*debug_init)(struct hisi_qm *qm);
- void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
+ void (*hw_error_init)(struct hisi_qm *qm);
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
- int (*stop_qp)(struct hisi_qp *qp);
int (*set_msi)(struct hisi_qm *qm, bool set);
- int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
- int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
};
struct qm_dfx_item {
@@ -533,6 +569,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
{50100, 100000, 19}
};
+static void qm_irqs_unregister(struct hisi_qm *qm);
+
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
{
enum qm_state curr = atomic_read(&qm->status.flags);
@@ -623,22 +661,17 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
}
/* Check if the error causes the master ooo block */
-static int qm_check_dev_error(struct hisi_qm *qm)
+static bool qm_check_dev_error(struct hisi_qm *qm)
{
u32 val, dev_val;
if (qm->fun_type == QM_HW_VF)
- return 0;
+ return false;
- val = qm_get_hw_error_status(qm);
- dev_val = qm_get_dev_err_status(qm);
+ val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
+ dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
- if (qm->ver < QM_HW_V3)
- return (val & QM_ECC_MBIT) ||
- (dev_val & qm->err_info.ecc_2bits_mask);
-
- return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
- (dev_val & (~qm->err_info.dev_ce_mask));
+ return val || dev_val;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -728,8 +761,12 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
{
+ int ret;
+ u32 val;
+
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
+ ret = -EBUSY;
goto mb_busy;
}
@@ -737,6 +774,14 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
+ ret = -ETIMEDOUT;
+ goto mb_busy;
+ }
+
+ val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
+ if (val & QM_MB_STATUS_MASK) {
+ dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
+ ret = -EIO;
goto mb_busy;
}
@@ -744,7 +789,7 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
mb_busy:
atomic64_inc(&qm->debug.dfx.mb_err_cnt);
- return -EBUSY;
+ return ret;
}
int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
@@ -828,25 +873,52 @@ static int qm_dev_mem_reset(struct hisi_qm *qm)
POLL_TIMEOUT);
}
-static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
+/**
+ * hisi_qm_get_hw_info() - Get device information.
+ * @qm: The qm which want to get information.
+ * @info_table: Array for storing device information.
+ * @index: Index in info_table.
+ * @is_read: Whether read from reg, 0: not support read from reg.
+ *
+ * This function returns device information the caller needs.
+ */
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read)
{
- return QM_IRQ_NUM_V1;
+ u32 val;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ return info_table[index].v1_val;
+ case QM_HW_V2:
+ return info_table[index].v2_val;
+ default:
+ if (!is_read)
+ return info_table[index].v3_val;
+
+ val = readl(qm->io_base + info_table[index].offset);
+ return (val >> info_table[index].shift) & info_table[index].mask;
+ }
}
+EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
-static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
+static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
+ u16 *high_bits, enum qm_basic_type type)
{
- if (qm->fun_type == QM_HW_PF)
- return QM_IRQ_NUM_PF_V2;
- else
- return QM_IRQ_NUM_VF_V2;
+ u32 depth;
+
+ depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
+ *high_bits = depth & QM_XQ_DEPTH_MASK;
+ *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
}
-static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
+static u32 qm_get_irq_num(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF)
- return QM_IRQ_NUM_PF_V2;
+ return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
- return QM_IRQ_NUM_VF_V3;
+ return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
}
static int qm_pm_get_sync(struct hisi_qm *qm)
@@ -854,7 +926,7 @@ static int qm_pm_get_sync(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return 0;
ret = pm_runtime_resume_and_get(dev);
@@ -870,7 +942,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_mark_last_busy(dev);
@@ -879,7 +951,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
static void qm_cq_head_update(struct hisi_qp *qp)
{
- if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
+ if (qp->qp_status.cq_head == qp->cq_depth - 1) {
qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
qp->qp_status.cq_head = 0;
} else {
@@ -911,6 +983,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
{
struct hisi_qm *qm = poll_data->qm;
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
+ u16 eq_depth = qm->eq_depth;
int eqe_num = 0;
u16 cqn;
@@ -919,7 +992,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
poll_data->qp_finish_id[eqe_num] = cqn;
eqe_num++;
- if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
+ if (qm->status.eq_head == eq_depth - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
eqe = qm->eqe;
qm->status.eq_head = 0;
@@ -928,7 +1001,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
qm->status.eq_head++;
}
- if (eqe_num == (QM_EQ_DEPTH >> 1) - 1)
+ if (eqe_num == (eq_depth >> 1) - 1)
break;
}
@@ -1068,6 +1141,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
{
struct hisi_qm *qm = data;
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
+ u16 aeq_depth = qm->aeq_depth;
u32 type, qp_id;
while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
@@ -1092,7 +1166,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
break;
}
- if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
+ if (qm->status.aeq_head == aeq_depth - 1) {
qm->status.aeqc_phase = !qm->status.aeqc_phase;
aeqe = qm->aeqe;
qm->status.aeq_head = 0;
@@ -1118,24 +1192,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
return IRQ_WAKE_THREAD;
}
-static void qm_irq_unregister(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
-
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
-
- if (qm->ver > QM_HW_V1) {
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
- }
-
- if (qm->ver > QM_HW_V2)
- free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
-}
-
static void qm_init_qp_status(struct hisi_qp *qp)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -1151,7 +1207,7 @@ static void qm_init_prefetch(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
u32 page_type = 0x0;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
switch (PAGE_SIZE) {
@@ -1270,7 +1326,7 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
}
break;
case SHAPER_VFT:
- if (qm->ver >= QM_HW_V3) {
+ if (factor) {
tmp = factor->cir_b |
(factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
(factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
@@ -1288,10 +1344,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
u32 fun_num, u32 base, u32 number)
{
- struct qm_shaper_factor *factor = &qm->factor[fun_num];
+ struct qm_shaper_factor *factor = NULL;
unsigned int val;
int ret;
+ if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ factor = &qm->factor[fun_num];
+
ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
val & BIT(0), POLL_PERIOD,
POLL_TIMEOUT);
@@ -1349,7 +1408,7 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
}
/* init default shaper qos val */
- if (qm->ver >= QM_HW_V3) {
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
ret = qm_shaper_init_vft(qm, fun_num);
if (ret)
goto back_sqc_cqc;
@@ -1357,11 +1416,9 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return 0;
back_sqc_cqc:
- for (i = SQC_VFT; i <= CQC_VFT; i++) {
- ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
- if (ret)
- return ret;
- }
+ for (i = SQC_VFT; i <= CQC_VFT; i++)
+ qm_set_vft_common(qm, i, fun_num, 0, 0);
+
return ret;
}
@@ -1857,39 +1914,19 @@ static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
kfree(ctx_addr);
}
-static int dump_show(struct hisi_qm *qm, void *info,
+static void dump_show(struct hisi_qm *qm, void *info,
unsigned int info_size, char *info_name)
{
struct device *dev = &qm->pdev->dev;
- u8 *info_buf, *info_curr = info;
+ u8 *info_curr = info;
u32 i;
#define BYTE_PER_DW 4
- info_buf = kzalloc(info_size, GFP_KERNEL);
- if (!info_buf)
- return -ENOMEM;
-
- for (i = 0; i < info_size; i++, info_curr++) {
- if (i % BYTE_PER_DW == 0)
- info_buf[i + 3UL] = *info_curr;
- else if (i % BYTE_PER_DW == 1)
- info_buf[i + 1UL] = *info_curr;
- else if (i % BYTE_PER_DW == 2)
- info_buf[i - 1] = *info_curr;
- else if (i % BYTE_PER_DW == 3)
- info_buf[i - 3] = *info_curr;
- }
-
dev_info(dev, "%s DUMP\n", info_name);
- for (i = 0; i < info_size; i += BYTE_PER_DW) {
+ for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
- info_buf[i], info_buf[i + 1UL],
- info_buf[i + 2UL], info_buf[i + 3UL]);
+ *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
}
-
- kfree(info_buf);
-
- return 0;
}
static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
@@ -1929,23 +1966,18 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
if (qm->sqc) {
sqc_curr = qm->sqc + qp_id;
- ret = dump_show(qm, sqc_curr, sizeof(*sqc),
- "SOFT SQC");
- if (ret)
- dev_info(dev, "Show soft sqc failed!\n");
+ dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
}
up_read(&qm->qps_lock);
- goto err_free_ctx;
+ goto free_ctx;
}
- ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
- if (ret)
- dev_info(dev, "Show hw sqc failed!\n");
+ dump_show(qm, sqc, sizeof(*sqc), "SQC");
-err_free_ctx:
+free_ctx:
qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
- return ret;
+ return 0;
}
static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
@@ -1975,23 +2007,18 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
if (qm->cqc) {
cqc_curr = qm->cqc + qp_id;
- ret = dump_show(qm, cqc_curr, sizeof(*cqc),
- "SOFT CQC");
- if (ret)
- dev_info(dev, "Show soft cqc failed!\n");
+ dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
}
up_read(&qm->qps_lock);
- goto err_free_ctx;
+ goto free_ctx;
}
- ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
- if (ret)
- dev_info(dev, "Show hw cqc failed!\n");
+ dump_show(qm, cqc, sizeof(*cqc), "CQC");
-err_free_ctx:
+free_ctx:
qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
- return ret;
+ return 0;
}
static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
@@ -2015,9 +2042,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
if (ret)
goto err_free_ctx;
- ret = dump_show(qm, xeqc, size, name);
- if (ret)
- dev_info(dev, "Show hw %s failed!\n", name);
+ dump_show(qm, xeqc, size, name);
err_free_ctx:
qm_ctx_free(qm, size, xeqc, &xeqc_dma);
@@ -2025,7 +2050,7 @@ err_free_ctx:
}
static int q_dump_param_parse(struct hisi_qm *qm, char *s,
- u32 *e_id, u32 *q_id)
+ u32 *e_id, u32 *q_id, u16 q_depth)
{
struct device *dev = &qm->pdev->dev;
unsigned int qp_num = qm->qp_num;
@@ -2051,8 +2076,8 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
}
ret = kstrtou32(presult, 0, e_id);
- if (ret || *e_id >= QM_Q_DEPTH) {
- dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
+ if (ret || *e_id >= q_depth) {
+ dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
return -EINVAL;
}
@@ -2066,54 +2091,49 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
static int qm_sq_dump(struct hisi_qm *qm, char *s)
{
- struct device *dev = &qm->pdev->dev;
+ u16 sq_depth = qm->qp_array->cq_depth;
void *sqe, *sqe_curr;
struct hisi_qp *qp;
u32 qp_id, sqe_id;
int ret;
- ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
+ ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
if (ret)
return ret;
- sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
+ sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
if (!sqe)
return -ENOMEM;
qp = &qm->qp_array[qp_id];
- memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
+ memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
qm->debug.sqe_mask_len);
- ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
- if (ret)
- dev_info(dev, "Show sqe failed!\n");
+ dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
kfree(sqe);
- return ret;
+ return 0;
}
static int qm_cq_dump(struct hisi_qm *qm, char *s)
{
- struct device *dev = &qm->pdev->dev;
struct qm_cqe *cqe_curr;
struct hisi_qp *qp;
u32 qp_id, cqe_id;
int ret;
- ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
+ ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
if (ret)
return ret;
qp = &qm->qp_array[qp_id];
cqe_curr = qp->cqe + cqe_id;
- ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
- if (ret)
- dev_info(dev, "Show cqe failed!\n");
+ dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
- return ret;
+ return 0;
}
static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
@@ -2131,11 +2151,11 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
if (ret)
return -EINVAL;
- if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
- dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
+ if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
+ dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
return -EINVAL;
- } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
- dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
+ } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
+ dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
return -EINVAL;
}
@@ -2150,9 +2170,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
goto err_unlock;
}
- ret = dump_show(qm, xeqe, size, name);
- if (ret)
- dev_info(dev, "Show %s failed!\n", name);
+ dump_show(qm, xeqe, size, name);
err_unlock:
up_read(&qm->qps_lock);
@@ -2245,8 +2263,10 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
return ret;
/* Judge if the instance is being reset. */
- if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
- return 0;
+ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
+ ret = 0;
+ goto put_dfx_access;
+ }
if (count > QM_DBG_WRITE_LEN) {
ret = -ENOSPC;
@@ -2300,58 +2320,65 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
file->debug = &qm->debug;
}
-static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v1(struct hisi_qm *qm)
{
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_cfg(struct hisi_qm *qm)
{
- qm->error_mask = ce | nfe | fe;
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
/* clear QM hw residual error source */
- writel(QM_ABNORMAL_INT_SOURCE_CLR,
- qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
- writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
- writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
+ writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
}
-static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v2(struct hisi_qm *qm)
{
- u32 irq_enable = ce | nfe | fe;
- u32 irq_unmask = ~irq_enable;
+ u32 irq_unmask;
- qm_hw_error_cfg(qm, ce, nfe, fe);
+ qm_hw_error_cfg(qm);
+ irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
{
- writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+ u32 irq_mask = qm->error_mask;
+
+ irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+ writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v3(struct hisi_qm *qm)
{
- u32 irq_enable = ce | nfe | fe;
- u32 irq_unmask = ~irq_enable;
+ u32 irq_unmask;
- qm_hw_error_cfg(qm, ce, nfe, fe);
+ qm_hw_error_cfg(qm);
/* enable close master ooo when hardware error happened */
- writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
{
- writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+ u32 irq_mask = qm->error_mask;
+
+ irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+ writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
/* disable close master ooo when hardware error happened */
writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
@@ -2396,7 +2423,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp, val;
+ u32 error_status, tmp;
/* read err sts */
tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
@@ -2407,17 +2434,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
- /* ce error does not need to be reset */
- if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
- writel(error_status, qm->io_base +
- QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_info.nfe,
- qm->io_base + QM_RAS_NFE_ENABLE);
- return ACC_ERR_RECOVERED;
- }
+ if (error_status & qm->err_info.qm_reset_mask)
+ return ACC_ERR_NEED_RESET;
- return ACC_ERR_NEED_RESET;
+ writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -2493,7 +2514,7 @@ static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
u64 val;
u32 i;
- if (!qm->vfs_num || qm->ver < QM_HW_V3)
+ if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return 0;
while (true) {
@@ -2756,7 +2777,6 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
- .get_irq_num = qm_get_irq_num_v1,
.hw_error_init = qm_hw_error_init_v1,
.set_msi = qm_set_msi,
};
@@ -2764,7 +2784,6 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.get_vft = qm_get_vft_v2,
.qm_db = qm_db_v2,
- .get_irq_num = qm_get_irq_num_v2,
.hw_error_init = qm_hw_error_init_v2,
.hw_error_uninit = qm_hw_error_uninit_v2,
.hw_error_handle = qm_hw_error_handle_v2,
@@ -2774,14 +2793,10 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.get_vft = qm_get_vft_v2,
.qm_db = qm_db_v2,
- .get_irq_num = qm_get_irq_num_v3,
.hw_error_init = qm_hw_error_init_v3,
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
- .stop_qp = qm_stop_qp,
.set_msi = qm_set_msi_v3,
- .ping_all_vfs = qm_ping_all_vfs,
- .ping_pf = qm_ping_pf,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2789,7 +2804,7 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
- if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
+ if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1))
return NULL;
return qp->sqe + sq_tail * qp->qm->sqe_size;
@@ -2830,7 +2845,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp = &qm->qp_array[qp_id];
hisi_qm_unset_hw_reset(qp);
- memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+ memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth);
qp->event_cb = NULL;
qp->req_cb = NULL;
@@ -2911,9 +2926,9 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
- sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
+ sqc->w8 = cpu_to_le16(qp->sq_depth - 1);
} else {
- sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
+ sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
sqc->w8 = 0; /* rand_qc */
}
sqc->cq_num = cpu_to_le16(qp_id);
@@ -2954,9 +2969,9 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
if (ver == QM_HW_V1) {
cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
QM_QC_CQE_SIZE));
- cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
+ cqc->w8 = cpu_to_le16(qp->cq_depth - 1);
} else {
- cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
+ cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
cqc->w8 = 0; /* rand_qc */
}
cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
@@ -3043,13 +3058,14 @@ static void qp_stop_fail_cb(struct hisi_qp *qp)
{
int qp_used = atomic_read(&qp->qp_status.used);
u16 cur_tail = qp->qp_status.sq_tail;
- u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
+ u16 sq_depth = qp->sq_depth;
+ u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth;
struct hisi_qm *qm = qp->qm;
u16 pos;
int i;
for (i = 0; i < qp_used; i++) {
- pos = (i + cur_head) % QM_Q_DEPTH;
+ pos = (i + cur_head) % sq_depth;
qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
atomic_dec(&qp->qp_status.used);
}
@@ -3078,8 +3094,8 @@ static int qm_drain_qp(struct hisi_qp *qp)
return 0;
/* Kunpeng930 supports drain qp by device */
- if (qm->ops->stop_qp) {
- ret = qm->ops->stop_qp(qp);
+ if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
+ ret = qm_stop_qp(qp);
if (ret)
dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
return ret;
@@ -3197,7 +3213,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
- u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
+ u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
void *sqe = qm_get_avail_sqe(qp);
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
@@ -3286,7 +3302,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
{
struct hisi_qp *qp = q->priv;
- hisi_qm_cache_wb(qp->qm);
hisi_qm_release_qp(qp);
}
@@ -3310,7 +3325,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
if (qm->ver == QM_HW_V1) {
if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
return -EINVAL;
- } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
+ } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
return -EINVAL;
@@ -3387,6 +3402,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
unsigned long arg)
{
struct hisi_qp *qp = q->priv;
+ struct hisi_qp_info qp_info;
struct hisi_qp_ctx qp_ctx;
if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
@@ -3403,11 +3419,25 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
if (copy_to_user((void __user *)arg, &qp_ctx,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- } else {
- return -EINVAL;
+
+ return 0;
+ } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) {
+ if (copy_from_user(&qp_info, (void __user *)arg,
+ sizeof(struct hisi_qp_info)))
+ return -EFAULT;
+
+ qp_info.sqe_size = qp->qm->sqe_size;
+ qp_info.sq_depth = qp->sq_depth;
+ qp_info.cq_depth = qp->cq_depth;
+
+ if (copy_to_user((void __user *)arg, &qp_info,
+ sizeof(struct hisi_qp_info)))
+ return -EFAULT;
+
+ return 0;
}
- return 0;
+ return -EINVAL;
}
static const struct uacce_ops uacce_qm_ops = {
@@ -3427,6 +3457,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
struct uacce_device *uacce;
unsigned long mmio_page_nr;
unsigned long dus_page_nr;
+ u16 sq_depth, cq_depth;
struct uacce_interface interface = {
.flags = UACCE_DEV_SVA,
.ops = &uacce_qm_ops,
@@ -3453,7 +3484,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->is_vf = pdev->is_virtfn;
uacce->priv = qm;
- uacce->algs = qm->algs;
if (qm->ver == QM_HW_V1)
uacce->api_ver = HISI_QM_API_VER_BASE;
@@ -3464,15 +3494,17 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
if (qm->ver == QM_HW_V1)
mmio_page_nr = QM_DOORBELL_PAGE_NR;
- else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
+ else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
mmio_page_nr = QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
else
mmio_page_nr = qm->db_interval / PAGE_SIZE;
+ qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
+
/* Add one more page for device or qp status */
- dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >>
+ dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
+ sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >>
PAGE_SHIFT;
uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
@@ -3577,10 +3609,11 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
kfree(qm->qp_array);
}
-static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
+static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
+ u16 sq_depth, u16 cq_depth)
{
struct device *dev = &qm->pdev->dev;
- size_t off = qm->sqe_size * QM_Q_DEPTH;
+ size_t off = qm->sqe_size * sq_depth;
struct hisi_qp *qp;
int ret = -ENOMEM;
@@ -3600,6 +3633,8 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
qp->cqe = qp->qdma.va + off;
qp->cqe_dma = qp->qdma.dma + off;
qp->qdma.size = dma_size;
+ qp->sq_depth = sq_depth;
+ qp->cq_depth = cq_depth;
qp->qm = qm;
qp->qp_id = id;
@@ -3626,7 +3661,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
qm->misc_ctl = false;
- if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
+ if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
}
@@ -3636,7 +3671,7 @@ static void qm_cmd_uninit(struct hisi_qm *qm)
{
u32 val;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return;
val = readl(qm->io_base + QM_IFC_INT_MASK);
@@ -3648,7 +3683,7 @@ static void qm_cmd_init(struct hisi_qm *qm)
{
u32 val;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return;
/* Clear communication interrupt source */
@@ -3664,7 +3699,7 @@ static void qm_put_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
- if (qm->use_db_isolation)
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
iounmap(qm->db_io_base);
iounmap(qm->io_base);
@@ -3714,7 +3749,9 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm)
}
idr_destroy(&qm->qp_idr);
- kfree(qm->factor);
+
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ kfree(qm->factor);
}
/**
@@ -3740,7 +3777,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
- qm_irq_unregister(qm);
+ qm_irqs_unregister(qm);
hisi_qm_pci_uninit(qm);
if (qm->use_sva) {
uacce_remove(qm->uacce);
@@ -3841,7 +3878,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
if (qm->ver == QM_HW_V1)
eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
- eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
DMA_TO_DEVICE);
@@ -3870,7 +3907,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
- aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
DMA_TO_DEVICE);
@@ -4136,14 +4173,12 @@ DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
static void qm_hw_error_init(struct hisi_qm *qm)
{
- struct hisi_qm_err_info *err_info = &qm->err_info;
-
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
return;
}
- qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
+ qm->ops->hw_error_init(qm);
}
static void qm_hw_error_uninit(struct hisi_qm *qm)
@@ -4497,12 +4532,10 @@ static int qm_vf_read_qos(struct hisi_qm *qm)
qm->mb_qos = 0;
/* vf ping pf to get function qos */
- if (qm->ops->ping_pf) {
- ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
- if (ret) {
- pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
- return ret;
- }
+ ret = qm_ping_pf(qm, QM_VF_GET_QOS);
+ if (ret) {
+ pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
+ return ret;
}
while (true) {
@@ -4674,14 +4707,14 @@ static const struct file_operations qm_algqos_fops = {
* hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
* @qm: The qm for which we want to add debugfs files.
*
- * Create function qos debugfs files.
+ * Create function qos debugfs files, VF ping PF to get function qos.
*/
static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF)
debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
qm, &qm_algqos_fops);
- else
+ else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
qm, &qm_algqos_fops);
}
@@ -4729,7 +4762,7 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
&qm_atomic64_ops);
}
- if (qm->ver >= QM_HW_V3)
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
hisi_qm_set_algqos_init(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
@@ -4768,6 +4801,14 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
}
EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
+static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
+{
+ int i;
+
+ for (i = 1; i <= total_func; i++)
+ qm->factor[i].func_qos = QM_QOS_MAX_VAL;
+}
+
/**
* hisi_qm_sriov_enable() - enable virtual functions
* @pdev: the PCIe device
@@ -4794,7 +4835,17 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
goto err_put_sync;
}
- num_vfs = min_t(int, max_vfs, total_vfs);
+ if (max_vfs > total_vfs) {
+ pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs);
+ ret = -ERANGE;
+ goto err_put_sync;
+ }
+
+ num_vfs = max_vfs;
+
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ hisi_qm_init_vf_qos(qm, num_vfs);
+
ret = qm_vf_q_assign(qm, num_vfs);
if (ret) {
pci_err(pdev, "Can't assign queues for VF!\n");
@@ -4830,7 +4881,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
- int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
int ret;
if (pci_vfs_assigned(pdev)) {
@@ -4845,8 +4895,7 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
}
pci_disable_sriov(pdev);
- /* clear vf function shaper configure array */
- memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+
ret = qm_clear_vft_config(qm);
if (ret)
return ret;
@@ -4891,17 +4940,11 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
if (qm->err_ini->log_dev_hw_err)
qm->err_ini->log_dev_hw_err(qm, err_sts);
- /* ce error does not need to be reset */
- if ((err_sts | qm->err_info.dev_ce_mask) ==
- qm->err_info.dev_ce_mask) {
- if (qm->err_ini->clear_dev_hw_err_status)
- qm->err_ini->clear_dev_hw_err_status(qm,
- err_sts);
+ if (err_sts & qm->err_info.dev_reset_mask)
+ return ACC_ERR_NEED_RESET;
- return ACC_ERR_RECOVERED;
- }
-
- return ACC_ERR_NEED_RESET;
+ if (qm->err_ini->clear_dev_hw_err_status)
+ qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
}
return ACC_ERR_RECOVERED;
@@ -5070,8 +5113,8 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
return 0;
/* Kunpeng930 supports to notify VFs to stop before PF reset */
- if (qm->ops->ping_all_vfs) {
- ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
+ ret = qm_ping_all_vfs(qm, cmd);
if (ret)
pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
} else {
@@ -5262,8 +5305,8 @@ static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
}
/* Kunpeng930 supports to notify VFs to start after PF reset. */
- if (qm->ops->ping_all_vfs) {
- ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
+ ret = qm_ping_all_vfs(qm, cmd);
if (ret)
pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
} else {
@@ -5466,8 +5509,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
if (pdev->is_virtfn)
return PCI_ERS_RESULT_RECOVERED;
- pci_aer_clear_nonfatal_status(pdev);
-
/* reset pcie device controller */
ret = qm_controller_reset(qm);
if (ret) {
@@ -5599,51 +5640,6 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int qm_irq_register(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
- qm_irq, 0, qm->dev_name, qm);
- if (ret)
- return ret;
-
- if (qm->ver > QM_HW_V1) {
- ret = request_threaded_irq(pci_irq_vector(pdev,
- QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, qm_aeq_thread,
- 0, qm->dev_name, qm);
- if (ret)
- goto err_aeq_irq;
-
- if (qm->fun_type == QM_HW_PF) {
- ret = request_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR),
- qm_abnormal_irq, 0, qm->dev_name, qm);
- if (ret)
- goto err_abonormal_irq;
- }
- }
-
- if (qm->ver > QM_HW_V2) {
- ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
- qm_mb_cmd_irq, 0, qm->dev_name, qm);
- if (ret)
- goto err_mb_cmd_irq;
- }
-
- return 0;
-
-err_mb_cmd_irq:
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
-err_abonormal_irq:
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-err_aeq_irq:
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- return ret;
-}
/**
* hisi_qm_dev_shutdown() - Shutdown device.
@@ -5711,7 +5707,7 @@ err_prepare:
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
out:
pci_save_state(pdev);
- ret = qm->ops->ping_pf(qm, cmd);
+ ret = qm_ping_pf(qm, cmd);
if (ret)
dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
}
@@ -5729,7 +5725,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm)
cmd = QM_VF_START_FAIL;
}
- ret = qm->ops->ping_pf(qm, cmd);
+ ret = qm_ping_pf(qm, cmd);
if (ret)
dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
@@ -5924,21 +5920,193 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
+static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_abnormal_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_aeq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_aeq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
+ qm_aeq_thread, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_eq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_eq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_irqs_unregister(struct hisi_qm *qm)
+{
+ qm_unregister_mb_cmd_irq(qm);
+ qm_unregister_abnormal_irq(qm);
+ qm_unregister_aeq_irq(qm);
+ qm_unregister_eq_irq(qm);
+}
+
+static int qm_irqs_register(struct hisi_qm *qm)
+{
+ int ret;
+
+ ret = qm_register_eq_irq(qm);
+ if (ret)
+ return ret;
+
+ ret = qm_register_aeq_irq(qm);
+ if (ret)
+ goto free_eq_irq;
+
+ ret = qm_register_abnormal_irq(qm);
+ if (ret)
+ goto free_aeq_irq;
+
+ ret = qm_register_mb_cmd_irq(qm);
+ if (ret)
+ goto free_abnormal_irq;
+
+ return 0;
+
+free_abnormal_irq:
+ qm_unregister_abnormal_irq(qm);
+free_aeq_irq:
+ qm_unregister_aeq_irq(qm);
+free_eq_irq:
+ qm_unregister_eq_irq(qm);
+ return ret;
+}
+
static int qm_get_qp_num(struct hisi_qm *qm)
{
- if (qm->ver == QM_HW_V1)
- qm->ctrl_qp_num = QM_QNUM_V1;
- else if (qm->ver == QM_HW_V2)
- qm->ctrl_qp_num = QM_QNUM_V2;
- else
- qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
- QM_QP_NUN_MASK;
+ bool is_db_isolation;
- if (qm->use_db_isolation)
- qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
- QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
- else
- qm->max_qp_num = qm->ctrl_qp_num;
+ /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
+ if (qm->fun_type == QM_HW_VF) {
+ if (qm->ver != QM_HW_V1)
+ /* v2 starts to support get vft by mailbox */
+ return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+
+ return 0;
+ }
+
+ is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
+ qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
+ qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
+ QM_FUNC_MAX_QP_CAP, is_db_isolation);
/* check if qp number is valid */
if (qm->qp_num > qm->max_qp_num) {
@@ -5950,6 +6118,39 @@ static int qm_get_qp_num(struct hisi_qm *qm)
return 0;
}
+static void qm_get_hw_caps(struct hisi_qm *qm)
+{
+ const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
+ qm_cap_info_pf : qm_cap_info_vf;
+ u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
+ ARRAY_SIZE(qm_cap_info_vf);
+ u32 val, i;
+
+ /* Doorbell isolate register is a independent register. */
+ val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
+ if (val)
+ set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
+
+ if (qm->ver >= QM_HW_V3) {
+ val = readl(qm->io_base + QM_FUNC_CAPS_REG);
+ qm->cap_ver = val & QM_CAPBILITY_VERSION;
+ }
+
+ /* Get PF/VF common capbility */
+ for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) {
+ val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
+ if (val)
+ set_bit(qm_cap_info_comm[i].type, &qm->caps);
+ }
+
+ /* Get PF/VF different capbility */
+ for (i = 0; i < size; i++) {
+ val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
+ if (val)
+ set_bit(cap_info[i].type, &qm->caps);
+ }
+}
+
static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5969,16 +6170,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
- if (qm->ver > QM_HW_V2) {
- if (qm->fun_type == QM_HW_PF)
- qm->use_db_isolation = readl(qm->io_base +
- QM_QUE_ISO_EN) & BIT(0);
- else
- qm->use_db_isolation = readl(qm->io_base +
- QM_QUE_ISO_CFG_V) & BIT(0);
- }
-
- if (qm->use_db_isolation) {
+ qm_get_hw_caps(qm);
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
qm->db_interval = QM_QP_DB_INTERVAL;
qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
qm->db_io_base = ioremap(qm->db_phys_base,
@@ -5993,16 +6186,14 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
- if (qm->fun_type == QM_HW_PF) {
- ret = qm_get_qp_num(qm);
- if (ret)
- goto err_db_ioremap;
- }
+ ret = qm_get_qp_num(qm);
+ if (ret)
+ goto err_db_ioremap;
return 0;
err_db_ioremap:
- if (qm->use_db_isolation)
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
iounmap(qm->db_io_base);
err_ioremap:
iounmap(qm->io_base);
@@ -6033,11 +6224,7 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
goto err_get_pci_res;
pci_set_master(pdev);
- if (!qm->ops->get_irq_num) {
- ret = -EOPNOTSUPP;
- goto err_get_pci_res;
- }
- num_vec = qm->ops->get_irq_num(qm);
+ num_vec = qm_get_irq_num(qm);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
@@ -6080,6 +6267,7 @@ static int hisi_qm_init_work(struct hisi_qm *qm)
static int hisi_qp_alloc_memory(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
+ u16 sq_depth, cq_depth;
size_t qp_dma_size;
int i, ret;
@@ -6093,13 +6281,14 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm)
return -ENOMEM;
}
+ qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
+
/* one more page for device or qp statuses */
- qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH;
+ qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
for (i = 0; i < qm->qp_num; i++) {
qm->poll_data[i].qm = qm;
- ret = hisi_qp_memory_init(qm, qp_dma_size, i);
+ ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
if (ret)
goto err_init_qp_mem;
@@ -6116,15 +6305,18 @@ err_init_qp_mem:
static int hisi_qm_memory_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- int ret, total_func, i;
+ int ret, total_func;
size_t off = 0;
- total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
- qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
- if (!qm->factor)
- return -ENOMEM;
- for (i = 0; i < total_func; i++)
- qm->factor[i].func_qos = QM_QOS_MAX_VAL;
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
+ total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
+ qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
+ if (!qm->factor)
+ return -ENOMEM;
+
+ /* Only the PF value needs to be initialized */
+ qm->factor[0].func_qos = QM_QOS_MAX_VAL;
+ }
#define QM_INIT_BUF(qm, type, num) do { \
(qm)->type = ((qm)->qdma.va + (off)); \
@@ -6133,20 +6325,21 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
} while (0)
idr_init(&qm->qp_idr);
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
+ qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
+ QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
GFP_ATOMIC);
dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
if (!qm->qdma.va) {
- ret = -ENOMEM;
- goto err_alloc_qdma;
+ ret = -ENOMEM;
+ goto err_destroy_idr;
}
- QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
- QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, eqe, qm->eq_depth);
+ QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
QM_INIT_BUF(qm, sqc, qm->qp_num);
QM_INIT_BUF(qm, cqc, qm->qp_num);
@@ -6158,8 +6351,10 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
err_alloc_qp_array:
dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
-err_alloc_qdma:
- kfree(qm->factor);
+err_destroy_idr:
+ idr_destroy(&qm->qp_idr);
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ kfree(qm->factor);
return ret;
}
@@ -6202,17 +6397,10 @@ int hisi_qm_init(struct hisi_qm *qm)
if (ret)
return ret;
- ret = qm_irq_register(qm);
+ ret = qm_irqs_register(qm);
if (ret)
goto err_pci_init;
- if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
- /* v2 starts to support get vft by mailbox */
- ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- if (ret)
- goto err_irq_register;
- }
-
if (qm->fun_type == QM_HW_PF) {
qm_disable_clock_gate(qm);
ret = qm_dev_mem_reset(qm);
@@ -6251,7 +6439,7 @@ err_alloc_uacce:
qm->uacce = NULL;
}
err_irq_register:
- qm_irq_unregister(qm);
+ qm_irqs_unregister(qm);
err_pci_init:
hisi_qm_pci_uninit(qm);
return ret;
@@ -6302,7 +6490,7 @@ void hisi_qm_pm_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
@@ -6321,7 +6509,7 @@ void hisi_qm_pm_uninit(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_get_noresume(dev);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index d2a0bc93e752..3e57fc04b377 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -17,6 +17,7 @@ struct sec_alg_res {
dma_addr_t a_ivin_dma;
u8 *out_mac;
dma_addr_t out_mac_dma;
+ u16 depth;
};
/* Cipher request of SEC private */
@@ -115,9 +116,9 @@ struct sec_cipher_ctx {
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp *qp;
- struct sec_req *req_list[QM_Q_DEPTH];
+ struct sec_req **req_list;
struct idr req_idr;
- struct sec_alg_res res[QM_Q_DEPTH];
+ struct sec_alg_res *res;
struct sec_ctx *ctx;
spinlock_t req_lock;
struct list_head backlog;
@@ -191,8 +192,37 @@ struct sec_dev {
bool iommu_used;
};
+enum sec_cap_type {
+ SEC_QM_NFE_MASK_CAP = 0x0,
+ SEC_QM_RESET_MASK_CAP,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP,
+ SEC_QM_CE_MASK_CAP,
+ SEC_NFE_MASK_CAP,
+ SEC_RESET_MASK_CAP,
+ SEC_OOO_SHUTDOWN_MASK_CAP,
+ SEC_CE_MASK_CAP,
+ SEC_CLUSTER_NUM_CAP,
+ SEC_CORE_TYPE_NUM_CAP,
+ SEC_CORE_NUM_CAP,
+ SEC_CORES_PER_CLUSTER_NUM_CAP,
+ SEC_CORE_ENABLE_BITMAP,
+ SEC_DRV_ALG_BITMAP_LOW,
+ SEC_DRV_ALG_BITMAP_HIGH,
+ SEC_DEV_ALG_BITMAP_LOW,
+ SEC_DEV_ALG_BITMAP_HIGH,
+ SEC_CORE1_ALG_BITMAP_LOW,
+ SEC_CORE1_ALG_BITMAP_HIGH,
+ SEC_CORE2_ALG_BITMAP_LOW,
+ SEC_CORE2_ALG_BITMAP_HIGH,
+ SEC_CORE3_ALG_BITMAP_LOW,
+ SEC_CORE3_ALG_BITMAP_HIGH,
+ SEC_CORE4_ALG_BITMAP_LOW,
+ SEC_CORE4_ALG_BITMAP_HIGH,
+};
+
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
int sec_register_to_crypto(struct hisi_qm *qm);
void sec_unregister_from_crypto(struct hisi_qm *qm);
+u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 77c9f13cf69a..84ae8ddd1a13 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -59,14 +59,14 @@
#define SEC_ICV_MASK 0x000E
#define SEC_SQE_LEN_RATE_MASK 0x3
-#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
#define SEC_SGL_SGE_NR 128
#define SEC_CIPHER_AUTH 0xfe
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
#define SEC_MAX_AAD_LEN 65535
#define SEC_MAX_CCM_AAD_LEN 65279
-#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
#define SEC_PBUF_SZ 512
#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
@@ -74,11 +74,11 @@
#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
SEC_MAX_MAC_LEN * 2)
#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
-#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
-#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
- SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
-#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
- SEC_PBUF_LEFT_SZ)
+#define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \
+ SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
+ SEC_PBUF_LEFT_SZ(depth))
#define SEC_SQE_LEN_RATE 4
#define SEC_SQE_CFLAG 2
@@ -104,6 +104,16 @@
#define IV_CTR_INIT 0x1
#define IV_BYTE_OFFSET 0x8
+struct sec_skcipher {
+ u64 alg_msk;
+ struct skcipher_alg alg;
+};
+
+struct sec_aead {
+ u64 alg_msk;
+ struct aead_alg alg;
+};
+
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -128,9 +138,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
int req_id;
spin_lock_bh(&qp_ctx->req_lock);
-
- req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
- 0, QM_Q_DEPTH, GFP_ATOMIC);
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
spin_unlock_bh(&qp_ctx->req_lock);
if (unlikely(req_id < 0)) {
dev_err(req->ctx->dev, "alloc req id fail!\n");
@@ -148,7 +156,7 @@ static void sec_free_req_id(struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int req_id = req->req_id;
- if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
+ if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
dev_err(req->ctx->dev, "free request id invalid!\n");
return;
}
@@ -300,14 +308,15 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
/* Get DMA memory resources */
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
&res->c_ivin_dma, GFP_KERNEL);
if (!res->c_ivin)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
}
@@ -318,20 +327,21 @@ static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->c_ivin)
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
res->c_ivin, res->c_ivin_dma);
}
static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
&res->a_ivin_dma, GFP_KERNEL);
if (!res->a_ivin)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
}
@@ -342,20 +352,21 @@ static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->a_ivin)
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
res->a_ivin, res->a_ivin_dma);
}
static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
&res->out_mac_dma, GFP_KERNEL);
if (!res->out_mac)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].out_mac_dma = res->out_mac_dma +
i * (SEC_MAX_MAC_LEN << 1);
res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
@@ -367,14 +378,14 @@ static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->out_mac)
- dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
res->out_mac, res->out_mac_dma);
}
static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->pbuf)
- dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
res->pbuf, res->pbuf_dma);
}
@@ -384,10 +395,12 @@ static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
*/
static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
+ int size = SEC_PBUF_PAGE_NUM(q_depth);
int pbuf_page_offset;
int i, j, k;
- res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
&res->pbuf_dma, GFP_KERNEL);
if (!res->pbuf)
return -ENOMEM;
@@ -400,11 +413,11 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
* So we need SEC_PBUF_PAGE_NUM numbers of PAGE
* for the SEC_TOTAL_PBUF_SZ
*/
- for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+ for (i = 0; i <= size; i++) {
pbuf_page_offset = PAGE_SIZE * i;
for (j = 0; j < SEC_PBUF_NUM; j++) {
k = i * SEC_PBUF_NUM + j;
- if (k == QM_Q_DEPTH)
+ if (k == q_depth)
break;
res[k].pbuf = res->pbuf +
j * SEC_PBUF_PKG + pbuf_page_offset;
@@ -470,36 +483,29 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
sec_free_mac_resource(dev, qp_ctx->res);
}
-static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
- int qp_ctx_id, int alg_type)
+static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
+ u16 q_depth = qp_ctx->qp->sq_depth;
struct device *dev = ctx->dev;
- struct sec_qp_ctx *qp_ctx;
- struct hisi_qp *qp;
int ret = -ENOMEM;
- qp_ctx = &ctx->qp_ctx[qp_ctx_id];
- qp = ctx->qps[qp_ctx_id];
- qp->req_type = 0;
- qp->qp_ctx = qp_ctx;
- qp_ctx->qp = qp;
- qp_ctx->ctx = ctx;
-
- qp->req_cb = sec_req_cb;
+ qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
+ if (!qp_ctx->req_list)
+ return ret;
- spin_lock_init(&qp_ctx->req_lock);
- idr_init(&qp_ctx->req_idr);
- INIT_LIST_HEAD(&qp_ctx->backlog);
+ qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
+ if (!qp_ctx->res)
+ goto err_free_req_list;
+ qp_ctx->res->depth = q_depth;
- qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- SEC_SGL_SGE_NR);
+ qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
- goto err_destroy_idr;
+ goto err_free_res;
}
- qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- SEC_SGL_SGE_NR);
+ qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_out_pool)) {
dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
@@ -509,34 +515,72 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
if (ret)
goto err_free_c_out_pool;
- ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0)
- goto err_queue_free;
-
return 0;
-err_queue_free:
- sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_destroy_idr:
- idr_destroy(&qp_ctx->req_idr);
+err_free_res:
+ kfree(qp_ctx->res);
+err_free_req_list:
+ kfree(qp_ctx->req_list);
return ret;
}
-static void sec_release_qp_ctx(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
+static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
{
struct device *dev = ctx->dev;
- hisi_qm_stop_qp(qp_ctx->qp);
sec_alg_resource_free(ctx, qp_ctx);
-
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+ kfree(qp_ctx->res);
+ kfree(qp_ctx->req_list);
+}
+
+static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
+ int qp_ctx_id, int alg_type)
+{
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret;
+ qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp = ctx->qps[qp_ctx_id];
+ qp->req_type = 0;
+ qp->qp_ctx = qp_ctx;
+ qp_ctx->qp = qp;
+ qp_ctx->ctx = ctx;
+
+ qp->req_cb = sec_req_cb;
+
+ spin_lock_init(&qp_ctx->req_lock);
+ idr_init(&qp_ctx->req_idr);
+ INIT_LIST_HEAD(&qp_ctx->backlog);
+
+ ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx);
+ if (ret)
+ goto err_destroy_idr;
+
+ ret = hisi_qm_start_qp(qp, 0);
+ if (ret < 0)
+ goto err_resource_free;
+
+ return 0;
+
+err_resource_free:
+ sec_free_qp_ctx_resource(ctx, qp_ctx);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
+ return ret;
+}
+
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ hisi_qm_stop_qp(qp_ctx->qp);
+ sec_free_qp_ctx_resource(ctx, qp_ctx);
idr_destroy(&qp_ctx->req_idr);
}
@@ -559,7 +603,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->pbuf_supported = ctx->sec->iommu_used;
/* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = QM_Q_DEPTH >> 1;
+ ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx) {
@@ -1679,7 +1723,6 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
aead_req->out_mac,
authsize, a_req->cryptlen +
a_req->assoclen);
-
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@@ -1966,7 +2009,6 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
-
static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
struct sec_req *sreq)
{
@@ -2126,67 +2168,80 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.min_keysize = sec_min_key_size,\
.max_keysize = sec_max_key_size,\
.ivsize = iv_size,\
-},
+}
#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-static struct skcipher_alg sec_skciphers[] = {
- SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, 0)
-
- SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
- SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
- DES3_EDE_BLOCK_SIZE, 0)
-
- SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
- SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
- DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-};
-
-static struct skcipher_alg sec_skciphers_v3[] = {
- SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+static struct sec_skcipher sec_skciphers[] = {
+ {
+ .alg_msk = BIT(0),
+ .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
+ },
+ {
+ .alg_msk = BIT(1),
+ .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(2),
+ .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(3),
+ .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE,
+ SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(4),
+ .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(5),
+ .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(12),
+ .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(13),
+ .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(14),
+ .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE,
+ SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(15),
+ .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(16),
+ .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(23),
+ .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
+ },
+ {
+ .alg_msk = BIT(24),
+ .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
+ DES3_EDE_BLOCK_SIZE),
+ },
};
static int aead_iv_demension_check(struct aead_request *aead_req)
@@ -2380,90 +2435,135 @@ static int sec_aead_decrypt(struct aead_request *a_req)
.maxauthsize = max_authsize,\
}
-static struct aead_alg sec_aeads[] = {
- SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
- sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+static struct sec_aead sec_aeads[] = {
+ {
+ .alg_msk = BIT(6),
+ .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(7),
+ .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(17),
+ .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(18),
+ .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(43),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
+ sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+ },
+ {
+ .alg_msk = BIT(44),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
+ sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+ },
+ {
+ .alg_msk = BIT(45),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
+ sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+ },
+};
- SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
- sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+static void sec_unregister_skcipher(u64 alg_mask, int end)
+{
+ int i;
- SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
- sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+ for (i = 0; i < end; i++)
+ if (sec_skciphers[i].alg_msk & alg_mask)
+ crypto_unregister_skcipher(&sec_skciphers[i].alg);
+}
- SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+static int sec_register_skcipher(u64 alg_mask)
+{
+ int i, ret, count;
- SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- SEC_AIV_SIZE, AES_BLOCK_SIZE)
-};
+ count = ARRAY_SIZE(sec_skciphers);
-static struct aead_alg sec_aeads_v3[] = {
- SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ for (i = 0; i < count; i++) {
+ if (!(sec_skciphers[i].alg_msk & alg_mask))
+ continue;
- SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- SEC_AIV_SIZE, AES_BLOCK_SIZE)
-};
+ ret = crypto_register_skcipher(&sec_skciphers[i].alg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ sec_unregister_skcipher(alg_mask, i);
+
+ return ret;
+}
+
+static void sec_unregister_aead(u64 alg_mask, int end)
+{
+ int i;
+
+ for (i = 0; i < end; i++)
+ if (sec_aeads[i].alg_msk & alg_mask)
+ crypto_unregister_aead(&sec_aeads[i].alg);
+}
+
+static int sec_register_aead(u64 alg_mask)
+{
+ int i, ret, count;
+
+ count = ARRAY_SIZE(sec_aeads);
+
+ for (i = 0; i < count; i++) {
+ if (!(sec_aeads[i].alg_msk & alg_mask))
+ continue;
+
+ ret = crypto_register_aead(&sec_aeads[i].alg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ sec_unregister_aead(alg_mask, i);
+
+ return ret;
+}
int sec_register_to_crypto(struct hisi_qm *qm)
{
+ u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
int ret;
- /* To avoid repeat register */
- ret = crypto_register_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
+ ret = sec_register_skcipher(alg_mask);
if (ret)
return ret;
- if (qm->ver > QM_HW_V2) {
- ret = crypto_register_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
- if (ret)
- goto reg_skcipher_fail;
- }
-
- ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ ret = sec_register_aead(alg_mask);
if (ret)
- goto reg_aead_fail;
- if (qm->ver > QM_HW_V2) {
- ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
- if (ret)
- goto reg_aead_v3_fail;
- }
- return ret;
+ sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
-reg_aead_v3_fail:
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
-reg_aead_fail:
- if (qm->ver > QM_HW_V2)
- crypto_unregister_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
-reg_skcipher_fail:
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(struct hisi_qm *qm)
{
- if (qm->ver > QM_HW_V2)
- crypto_unregister_aeads(sec_aeads_v3,
- ARRAY_SIZE(sec_aeads_v3));
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
- if (qm->ver > QM_HW_V2)
- crypto_unregister_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
+ sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
+ sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 2c0be91c0b09..3705412bac5f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -27,7 +27,6 @@
#define SEC_BD_ERR_CHK_EN3 0xffffbfff
#define SEC_SQE_SIZE 128
-#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
#define SEC_PF_DEF_Q_NUM 256
#define SEC_PF_DEF_Q_BASE 0
#define SEC_CTX_Q_NUM_DEF 2
@@ -42,16 +41,11 @@
#define SEC_ECC_NUM 16
#define SEC_ECC_MASH 0xFF
#define SEC_CORE_INT_DISABLE 0x0
-#define SEC_CORE_INT_ENABLE 0x7c1ff
-#define SEC_CORE_INT_CLEAR 0x7c1ff
-#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
#define SEC_RAS_FE_REG 0x301054
#define SEC_RAS_NFE_REG 0x301058
-#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
-#define SEC_RAS_NFE_ENB_MSK 0x7c177
#define SEC_OOO_SHUTDOWN_SEL 0x301014
#define SEC_RAS_DISABLE 0x0
#define SEC_MEM_START_INIT_REG 0x301100
@@ -119,6 +113,16 @@
#define SEC_DFX_COMMON1_LEN 0x45
#define SEC_DFX_COMMON2_LEN 0xBA
+#define SEC_ALG_BITMAP_SHIFT 32
+
+#define SEC_CIPHER_BITMAP (GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \
+ GENMASK(24, 21))
+#define SEC_DIGEST_BITMAP (GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \
+ GENMASK_ULL(42, 25))
+#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
+ GENMASK_ULL(45, 43))
+#define SEC_DEV_ALG_MAX_LEN 256
+
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -129,6 +133,11 @@ struct sec_dfx_item {
u32 offset;
};
+struct sec_dev_alg {
+ u64 alg_msk;
+ const char *algs;
+};
+
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
@@ -137,6 +146,46 @@ static struct hisi_qm_list sec_devices = {
.unregister_from_crypto = sec_unregister_from_crypto,
};
+static const struct hisi_qm_cap_info sec_basic_info[] = {
+ {SEC_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77},
+ {SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77},
+ {SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
+ {SEC_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {SEC_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177},
+ {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177},
+ {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177},
+ {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088},
+ {SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
+ {SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
+ {SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
+ {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF},
+ {SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
+ {SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+};
+
+static const struct sec_dev_alg sec_dev_algs[] = { {
+ .alg_msk = SEC_CIPHER_BITMAP,
+ .algs = "cipher\n",
+ }, {
+ .alg_msk = SEC_DIGEST_BITMAP,
+ .algs = "digest\n",
+ }, {
+ .alg_msk = SEC_AEAD_BITMAP,
+ .algs = "aead\n",
+ },
+};
+
static const struct sec_hw_error sec_hw_errors[] = {
{
.int_msk = BIT(0),
@@ -339,6 +388,16 @@ struct hisi_qp **sec_create_qps(void)
return NULL;
}
+u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
+{
+ u32 cap_val_h, cap_val_l;
+
+ cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver);
+ cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver);
+
+ return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
+}
+
static const struct kernel_param_ops sec_uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
@@ -415,7 +474,7 @@ static void sec_open_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -435,7 +494,7 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
@@ -506,7 +565,8 @@ static int sec_engine_init(struct hisi_qm *qm)
writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
+ reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver);
+ writel(reg, qm->io_base + SEC_SAA_EN_REG);
if (qm->ver < QM_HW_V3) {
/* HW V2 enable sm4 extra mode, as ctr/ecb */
@@ -576,7 +636,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + SEC_CONTROL_REG);
if (enable) {
val1 |= SEC_AXI_SHUTDOWN_ENABLE;
- val2 = SEC_RAS_NFE_ENB_MSK;
+ val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= SEC_AXI_SHUTDOWN_DISABLE;
val2 = 0x0;
@@ -590,25 +651,30 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void sec_hw_error_enable(struct hisi_qm *qm)
{
+ u32 ce, nfe;
+
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
+ ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+
/* clear SEC hw error source if having */
- writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
+ writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
/* enable RAS int */
- writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+ writel(ce, qm->io_base + SEC_RAS_CE_REG);
writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
- writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
+ writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
/* enable SEC block master OOO when nfe occurs on Kunpeng930 */
sec_master_ooo_ctrl(qm, true);
/* enable SEC hw error interrupts */
- writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+ writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
@@ -939,7 +1005,11 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+ nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
}
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
@@ -955,14 +1025,20 @@ static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
+ err_info->fe = SEC_RAS_FE_ENB_MSK;
+ err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = BIT(0);
err_info->acpi_rst = "SRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT;
}
static const struct hisi_qm_err_ini sec_err_ini = {
@@ -1001,11 +1077,41 @@ static int sec_pf_probe_init(struct sec_dev *sec)
return ret;
}
+static int sec_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u64 alg_mask;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW);
+
+ for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++)
+ if (alg_mask & sec_dev_algs[i].alg_msk)
+ strcat(algs, sec_dev_algs[i].algs);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
qm->pdev = pdev;
qm->ver = pdev->revision;
- qm->algs = "cipher\ndigest\naead";
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -1028,7 +1134,19 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to init sec qm configures!\n");
+ return ret;
+ }
+
+ ret = sec_set_qm_algs(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to set sec algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static void sec_qm_uninit(struct hisi_qm *qm)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 3dfd3bac5a33..f2e6da3240ae 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -81,7 +81,8 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
-int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
+int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
+bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index ad35434a3fdb..6608971d10cd 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -39,6 +39,9 @@
#define HZIP_ALG_PRIORITY 300
#define HZIP_SGL_SGE_NR 10
+#define HZIP_ALG_ZLIB GENMASK(1, 0)
+#define HZIP_ALG_GZIP GENMASK(3, 2)
+
static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {
0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03
@@ -123,19 +126,19 @@ static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
return -EINVAL;
- return param_set_int(val, kp);
+ return param_set_ushort(val, kp);
}
static const struct kernel_param_ops sgl_sge_nr_ops = {
.set = sgl_sge_nr_set,
- .get = param_get_int,
+ .get = param_get_ushort,
};
static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
-static u16 get_extra_field_size(const u8 *start)
+static u32 get_extra_field_size(const u8 *start)
{
return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
}
@@ -167,7 +170,7 @@ static u32 __get_gzip_head_size(const u8 *src)
return size;
}
-static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
+static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
{
char buf[HZIP_GZIP_HEAD_BUF];
@@ -183,7 +186,7 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
int ret;
ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
- if (ret != head_size) {
+ if (unlikely(ret != head_size)) {
pr_err("the head size of buffer is wrong (%d)!\n", ret);
return -ENOMEM;
}
@@ -193,11 +196,11 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
{
- if (!acomp_req->src || !acomp_req->slen)
+ if (unlikely(!acomp_req->src || !acomp_req->slen))
return -EINVAL;
- if (req_type == HZIP_ALG_TYPE_GZIP &&
- acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)
+ if (unlikely(req_type == HZIP_ALG_TYPE_GZIP &&
+ acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
return -EINVAL;
switch (req_type) {
@@ -230,6 +233,8 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
}
set_bit(req_id, req_q->req_bitmap);
+ write_unlock(&req_q->req_lock);
+
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
@@ -242,8 +247,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req_cache->dskip = 0;
}
- write_unlock(&req_q->req_lock);
-
return req_cache;
}
@@ -254,7 +257,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
write_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
- memset(req, 0, sizeof(struct hisi_zip_req));
write_unlock(&req_q->req_lock);
}
@@ -339,7 +341,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_sqe zip_sqe;
int ret;
- if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
+ if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
return -EINVAL;
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
@@ -365,7 +367,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
ret = hisi_qp_send(qp, &zip_sqe);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;
dev_dbg_ratelimited(dev, "failed to send request!\n");
@@ -417,7 +419,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
atomic64_inc(&dfx->recv_cnt);
status = ops->get_status(sqe);
- if (status != 0 && status != HZIP_NC_ERR) {
+ if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
sqe->produced);
@@ -450,7 +452,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
/* let's output compression head now */
head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
- if (head_size < 0) {
+ if (unlikely(head_size < 0)) {
dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
head_size);
return head_size;
@@ -461,7 +463,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
+ if (unlikely(ret != -EINPROGRESS)) {
dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
hisi_zip_remove_req(qp_ctx, req);
}
@@ -478,7 +480,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
int head_size, ret;
head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
- if (head_size < 0) {
+ if (unlikely(head_size < 0)) {
dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
head_size);
return head_size;
@@ -489,7 +491,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
+ if (unlikely(ret != -EINPROGRESS)) {
dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
ret);
hisi_zip_remove_req(qp_ctx, req);
@@ -498,7 +500,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
return ret;
}
-static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
+static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
int alg_type, int req_type)
{
struct device *dev = &qp->qm->pdev->dev;
@@ -506,7 +508,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
qp->req_type = req_type;
qp->alg_type = alg_type;
- qp->qp_ctx = ctx;
+ qp->qp_ctx = qp_ctx;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
@@ -514,15 +516,15 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
return ret;
}
- ctx->qp = qp;
+ qp_ctx->qp = qp;
return 0;
}
-static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
+static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
{
- hisi_qm_stop_qp(ctx->qp);
- hisi_qm_free_qps(&ctx->qp, 1);
+ hisi_qm_stop_qp(qp_ctx->qp);
+ hisi_qm_free_qps(&qp_ctx->qp, 1);
}
static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
@@ -594,18 +596,19 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
{
int i;
- for (i = 1; i >= 0; i--)
+ for (i = 0; i < HZIP_CTX_Q_NUM; i++)
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
}
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
{
+ u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
struct hisi_zip_req_q *req_q;
int i, ret;
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
req_q = &ctx->qp_ctx[i].req_q;
- req_q->size = QM_Q_DEPTH;
+ req_q->size = q_depth;
req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
if (!req_q->req_bitmap) {
@@ -613,7 +616,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
if (i == 0)
return ret;
- goto err_free_loop0;
+ goto err_free_comp_q;
}
rwlock_init(&req_q->req_lock);
@@ -622,19 +625,19 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
if (!req_q->q) {
ret = -ENOMEM;
if (i == 0)
- goto err_free_bitmap;
+ goto err_free_comp_bitmap;
else
- goto err_free_loop1;
+ goto err_free_decomp_bitmap;
}
}
return 0;
-err_free_loop1:
+err_free_decomp_bitmap:
bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
-err_free_loop0:
+err_free_comp_q:
kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
-err_free_bitmap:
+err_free_comp_bitmap:
bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
return ret;
}
@@ -651,6 +654,7 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
{
+ u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
struct hisi_zip_qp_ctx *tmp;
struct device *dev;
int i;
@@ -658,7 +662,7 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
tmp = &ctx->qp_ctx[i];
dev = &tmp->qp->qm->pdev->dev;
- tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1,
+ tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
sgl_sge_nr);
if (IS_ERR(tmp->sgl_pool)) {
if (i == 1)
@@ -755,6 +759,28 @@ static struct acomp_alg hisi_zip_acomp_zlib = {
}
};
+static int hisi_zip_register_zlib(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
+ return 0;
+
+ ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hisi_zip_unregister_zlib(struct hisi_qm *qm)
+{
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
+ return;
+
+ crypto_unregister_acomp(&hisi_zip_acomp_zlib);
+}
+
static struct acomp_alg hisi_zip_acomp_gzip = {
.init = hisi_zip_acomp_init,
.exit = hisi_zip_acomp_exit,
@@ -769,27 +795,45 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
}
};
-int hisi_zip_register_to_crypto(struct hisi_qm *qm)
+static int hisi_zip_register_gzip(struct hisi_qm *qm)
{
int ret;
- ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
- if (ret) {
- pr_err("failed to register to zlib (%d)!\n", ret);
- return ret;
- }
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+ return 0;
ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
- if (ret) {
- pr_err("failed to register to gzip (%d)!\n", ret);
- crypto_unregister_acomp(&hisi_zip_acomp_zlib);
- }
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret);
return ret;
}
-void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
+static void hisi_zip_unregister_gzip(struct hisi_qm *qm)
{
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+ return;
+
crypto_unregister_acomp(&hisi_zip_acomp_gzip);
- crypto_unregister_acomp(&hisi_zip_acomp_zlib);
+}
+
+int hisi_zip_register_to_crypto(struct hisi_qm *qm)
+{
+ int ret = 0;
+
+ ret = hisi_zip_register_zlib(qm);
+ if (ret)
+ return ret;
+
+ ret = hisi_zip_register_gzip(qm);
+ if (ret)
+ hisi_zip_unregister_zlib(qm);
+
+ return ret;
+}
+
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
+{
+ hisi_zip_unregister_zlib(qm);
+ hisi_zip_unregister_gzip(qm);
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index c3303d99acac..c863435e8c75 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -20,18 +20,6 @@
#define HZIP_QUEUE_NUM_V1 4096
#define HZIP_CLOCK_GATE_CTRL 0x301004
-#define COMP0_ENABLE BIT(0)
-#define COMP1_ENABLE BIT(1)
-#define DECOMP0_ENABLE BIT(2)
-#define DECOMP1_ENABLE BIT(3)
-#define DECOMP2_ENABLE BIT(4)
-#define DECOMP3_ENABLE BIT(5)
-#define DECOMP4_ENABLE BIT(6)
-#define DECOMP5_ENABLE BIT(7)
-#define HZIP_ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
- DECOMP0_ENABLE | DECOMP1_ENABLE | \
- DECOMP2_ENABLE | DECOMP3_ENABLE | \
- DECOMP4_ENABLE | DECOMP5_ENABLE)
#define HZIP_DECOMP_CHECK_ENABLE BIT(16)
#define HZIP_FSM_MAX_CNT 0x301008
@@ -69,20 +57,14 @@
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
-#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
+#define HZIP_CORE_INT_RAS_FE_ENB_MASK 0x0
#define HZIP_OOO_SHUTDOWN_SEL 0x30120C
-#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
-#define HZIP_COMP_CORE_NUM 2
-#define HZIP_DECOMP_CORE_NUM 6
-#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
- HZIP_DECOMP_CORE_NUM)
#define HZIP_SQE_SIZE 128
-#define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH)
#define HZIP_PF_DEF_Q_NUM 64
#define HZIP_PF_DEF_Q_BASE 0
@@ -92,6 +74,12 @@
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_WR_PORT BIT(11)
+#define HZIP_DEV_ALG_MAX_LEN 256
+#define HZIP_ALG_ZLIB_BIT GENMASK(1, 0)
+#define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
+#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
+#define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
+
#define HZIP_BUF_SIZE 22
#define HZIP_SQE_MASK_OFFSET 64
#define HZIP_SQE_MASK_LEN 48
@@ -132,6 +120,26 @@ struct zip_dfx_item {
u32 offset;
};
+struct zip_dev_alg {
+ u32 alg_msk;
+ const char *algs;
+};
+
+static const struct zip_dev_alg zip_dev_algs[] = { {
+ .alg_msk = HZIP_ALG_ZLIB_BIT,
+ .algs = "zlib\n",
+ }, {
+ .alg_msk = HZIP_ALG_GZIP_BIT,
+ .algs = "gzip\n",
+ }, {
+ .alg_msk = HZIP_ALG_DEFLATE_BIT,
+ .algs = "deflate\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ77_BIT,
+ .algs = "lz77_zstd\n",
+ },
+};
+
static struct hisi_qm_list zip_devices = {
.register_to_crypto = hisi_zip_register_to_crypto,
.unregister_from_crypto = hisi_zip_unregister_from_crypto,
@@ -187,6 +195,58 @@ struct hisi_zip_ctrl {
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
};
+enum zip_cap_type {
+ ZIP_QM_NFE_MASK_CAP = 0x0,
+ ZIP_QM_RESET_MASK_CAP,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP,
+ ZIP_QM_CE_MASK_CAP,
+ ZIP_NFE_MASK_CAP,
+ ZIP_RESET_MASK_CAP,
+ ZIP_OOO_SHUTDOWN_MASK_CAP,
+ ZIP_CE_MASK_CAP,
+ ZIP_CLUSTER_NUM_CAP,
+ ZIP_CORE_TYPE_NUM_CAP,
+ ZIP_CORE_NUM_CAP,
+ ZIP_CLUSTER_COMP_NUM_CAP,
+ ZIP_CLUSTER_DECOMP_NUM_CAP,
+ ZIP_DECOMP_ENABLE_BITMAP,
+ ZIP_COMP_ENABLE_BITMAP,
+ ZIP_DRV_ALG_BITMAP,
+ ZIP_DEV_ALG_BITMAP,
+ ZIP_CORE1_ALG_BITMAP,
+ ZIP_CORE2_ALG_BITMAP,
+ ZIP_CORE3_ALG_BITMAP,
+ ZIP_CORE4_ALG_BITMAP,
+ ZIP_CORE5_ALG_BITMAP,
+ ZIP_CAP_MAX
+};
+
+static struct hisi_qm_cap_info zip_basic_cap_info[] = {
+ {ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77},
+ {ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77},
+ {ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
+ {ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE},
+ {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE},
+ {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE},
+ {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+ {ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2},
+ {ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5},
+ {ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2},
+ {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3},
+ {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C},
+ {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3},
+ {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF},
+ {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF},
+ {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
+ {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
+ {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
+};
+
enum {
HZIP_COMP_CORE0,
HZIP_COMP_CORE1,
@@ -343,12 +403,52 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
+bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
+{
+ u32 cap_val;
+
+ cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver);
+ if ((alg & cap_val) == alg)
+ return true;
+
+ return false;
+}
+
+static int hisi_zip_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u32 alg_mask;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver);
+
+ for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++)
+ if (alg_mask & zip_dev_algs[i].alg_msk)
+ strcat(algs, zip_dev_algs[i].algs);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -368,7 +468,7 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
@@ -401,6 +501,7 @@ static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
+ u32 dcomp_bm, comp_bm;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -438,8 +539,11 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
}
/* let's open all compression/decompression cores */
- writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN,
- base + HZIP_CLOCK_GATE_CTRL);
+ dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver);
+ comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_COMP_ENABLE_BITMAP, qm->cap_ver);
+ writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
@@ -458,7 +562,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
if (enable) {
val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
- val2 = HZIP_CORE_INT_RAS_NFE_ENABLE;
+ val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -472,6 +577,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
+ u32 nfe, ce;
+
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -479,17 +586,17 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
return;
}
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+
/* clear ZIP hw error source if having */
- writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
+ writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(HZIP_CORE_INT_RAS_CE_ENABLE,
- qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
- writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
- qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- /* enable ZIP block master OOO when nfe occurs on Kunpeng930 */
hisi_zip_master_ooo_ctrl(qm, true);
/* enable ZIP hw error interrupts */
@@ -498,10 +605,13 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
+ u32 nfe, ce;
+
/* disable ZIP hw error interrupts */
- writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+ writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
- /* disable ZIP block master OOO when nfe occurs on Kunpeng930 */
hisi_zip_master_ooo_ctrl(qm, false);
}
@@ -586,8 +696,9 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
return len;
tbuf[len] = '\0';
- if (kstrtoul(tbuf, 0, &val))
- return -EFAULT;
+ ret = kstrtoul(tbuf, 0, &val);
+ if (ret)
+ return ret;
ret = hisi_qm_get_dfx_access(qm);
if (ret)
@@ -651,18 +762,23 @@ DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
+ u32 zip_core_num, zip_comp_core_num;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
- for (i = 0; i < HZIP_CORE_NUM; i++) {
- if (i < HZIP_COMP_CORE_NUM)
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+ zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+ qm->cap_ver);
+
+ for (i = 0; i < zip_core_num; i++) {
+ if (i < zip_comp_core_num)
scnprintf(buf, sizeof(buf), "comp_core%d", i);
else
scnprintf(buf, sizeof(buf), "decomp_core%d",
- i - HZIP_COMP_CORE_NUM);
+ i - zip_comp_core_num);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -675,7 +791,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
debugfs_create_file("regs", 0444, tmp_d, regset,
- &hisi_zip_regs_fops);
+ &hisi_zip_regs_fops);
}
return 0;
@@ -795,10 +911,13 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 zip_core_num;
int i, j, idx;
- debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM +
- com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+
+ debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
+ sizeof(unsigned int), GFP_KERNEL);
if (!debug->last_words)
return -ENOMEM;
@@ -807,7 +926,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
debug->last_words[i] = readl_relaxed(io_base);
}
- for (i = 0; i < HZIP_CORE_NUM; i++) {
+ for (i = 0; i < zip_core_num; i++) {
io_base = qm->io_base + core_offsets[i];
for (j = 0; j < core_dfx_regs_num; j++) {
idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
@@ -834,6 +953,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
{
int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
+ u32 zip_core_num, zip_comp_core_num;
struct qm_debug *debug = &qm->debug;
char buf[HZIP_BUF_SIZE];
void __iomem *base;
@@ -847,15 +967,18 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset);
if (debug->last_words[i] != val)
pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n",
- hzip_com_dfx_regs[i].name, debug->last_words[i], val);
+ hzip_com_dfx_regs[i].name, debug->last_words[i], val);
}
- for (i = 0; i < HZIP_CORE_NUM; i++) {
- if (i < HZIP_COMP_CORE_NUM)
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+ zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+ qm->cap_ver);
+ for (i = 0; i < zip_core_num; i++) {
+ if (i < zip_comp_core_num)
scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
else
scnprintf(buf, sizeof(buf), "Decomp_core-%d",
- i - HZIP_COMP_CORE_NUM);
+ i - zip_comp_core_num);
base = qm->io_base + core_offsets[i];
pci_info(qm->pdev, "==>%s:\n", buf);
@@ -865,7 +988,8 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset);
if (debug->last_words[idx] != val)
pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n",
- hzip_dump_dfx_regs[j].name, debug->last_words[idx], val);
+ hzip_dump_dfx_regs[j].name,
+ debug->last_words[idx], val);
}
}
}
@@ -900,7 +1024,11 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
}
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
@@ -934,16 +1062,21 @@ static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
+ err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
- err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE;
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HZIP_WR_PORT;
err_info->acpi_rst = "ZRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
-
- if (qm->ver >= QM_HW_V3)
- err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT;
}
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
@@ -976,7 +1109,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
qm->err_ini = &hisi_zip_err_ini;
qm->err_ini->err_info_init(qm);
- hisi_zip_set_user_domain_and_cache(qm);
+ ret = hisi_zip_set_user_domain_and_cache(qm);
+ if (ret)
+ return ret;
+
hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -990,12 +1126,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
qm->pdev = pdev;
qm->ver = pdev->revision;
- if (pdev->revision >= QM_HW_V3)
- qm->algs = "zlib\ngzip\ndeflate\nlz77_zstd";
- else
- qm->algs = "zlib\ngzip";
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1019,7 +1153,19 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to init zip qm configures!\n");
+ return ret;
+ }
+
+ ret = hisi_zip_set_qm_algs(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to set zip algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static void hisi_zip_qm_uninit(struct hisi_qm *qm)
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d68ef16650d4..32a37e3850c5 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -63,7 +63,6 @@ struct safexcel_cipher_ctx {
u32 hash_alg;
u32 state_sz;
- struct crypto_cipher *hkaes;
struct crypto_aead *fback;
};
@@ -642,10 +641,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
safexcel_complete(priv, ring);
if (src == dst) {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_TO_DEVICE);
+ if (sreq->nr_dst > 0)
+ dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
/*
@@ -737,23 +742,29 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
max(totlen_src, totlen_dst));
return -EINVAL;
}
- dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_map_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
totlen_src);
return -EINVAL;
}
- dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+
+ if (sreq->nr_src > 0)
+ dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
totlen_dst);
- dma_unmap_sg(priv->dev, src, sreq->nr_src,
- DMA_TO_DEVICE);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unmap;
}
- dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+
+ if (sreq->nr_dst > 0)
+ dma_map_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
@@ -883,12 +894,18 @@ rdesc_rollback:
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
-
+unmap:
if (src == dst) {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_TO_DEVICE);
+ if (sreq->nr_dst > 0)
+ dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
return ret;
@@ -2589,15 +2606,8 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
ctx->key_len = len;
/* Compute hash key by encrypting zeroes with cipher key */
- crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->hkaes, key, len);
- if (ret)
- return ret;
-
memset(hashkey, 0, AES_BLOCK_SIZE);
- crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey);
+ aes_encrypt(&aes, (u8 *)hashkey, (u8 *)hashkey);
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
@@ -2626,15 +2636,11 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
ctx->xcm = EIP197_XCM_MODE_GCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
- ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
- return PTR_ERR_OR_ZERO(ctx->hkaes);
+ return 0;
}
static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_cipher(ctx->hkaes);
safexcel_aead_cra_exit(tfm);
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index bc60b5802256..103fc551d2af 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -30,7 +30,7 @@ struct safexcel_ahash_ctx {
bool fb_init_done;
bool fb_do_setkey;
- struct crypto_cipher *kaes;
+ struct crypto_aes_ctx *aes;
struct crypto_ahash *fback;
struct crypto_shash *shpre;
struct shash_desc *shdesc;
@@ -383,7 +383,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
u32 x;
x = ipad[i] ^ ipad[i + 4];
- cache[i] ^= swab(x);
+ cache[i] ^= swab32(x);
}
}
cache_len = AES_BLOCK_SIZE;
@@ -821,10 +821,10 @@ static int safexcel_ahash_final(struct ahash_request *areq)
u32 *result = (void *)areq->result;
/* K3 */
- result[i] = swab(ctx->base.ipad.word[i + 4]);
+ result[i] = swab32(ctx->base.ipad.word[i + 4]);
}
areq->result[0] ^= 0x80; // 10- padding
- crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
+ aes_encrypt(ctx->aes, areq->result, areq->result);
return 0;
} else if (unlikely(req->hmac &&
(req->len == req->block_sz) &&
@@ -2083,37 +2083,26 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct crypto_aes_ctx aes;
u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
int ret, i;
- ret = aes_expandkey(&aes, key, len);
+ ret = aes_expandkey(ctx->aes, key, len);
if (ret)
return ret;
/* precompute the XCBC key material */
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes, key, len);
- if (ret)
- return ret;
-
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
- "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp,
- "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
- "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp,
+ "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp + AES_BLOCK_SIZE,
+ "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->base.ipad.word[i] = swab(key_tmp[i]);
-
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes,
- (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
- AES_MIN_KEY_SIZE);
+ ctx->base.ipad.word[i] = swab32(key_tmp[i]);
+
+ ret = aes_expandkey(ctx->aes,
+ (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ AES_MIN_KEY_SIZE);
if (ret)
return ret;
@@ -2121,7 +2110,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
ctx->cbcmac = false;
- memzero_explicit(&aes, sizeof(aes));
return 0;
}
@@ -2130,15 +2118,15 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_ahash_cra_init(tfm);
- ctx->kaes = crypto_alloc_cipher("aes", 0, 0);
- return PTR_ERR_OR_ZERO(ctx->kaes);
+ ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL);
+ return PTR_ERR_OR_ZERO(ctx->aes);
}
static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->kaes);
+ kfree(ctx->aes);
safexcel_ahash_cra_exit(tfm);
}
@@ -2178,31 +2166,23 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct crypto_aes_ctx aes;
__be64 consts[4];
u64 _const[2];
u8 msb_mask, gfmask;
int ret, i;
- ret = aes_expandkey(&aes, key, len);
+ /* precompute the CMAC key material */
+ ret = aes_expandkey(ctx->aes, key, len);
if (ret)
return ret;
for (i = 0; i < len / sizeof(u32); i++)
- ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]);
-
- /* precompute the CMAC key material */
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes, key, len);
- if (ret)
- return ret;
+ ctx->base.ipad.word[i + 8] = swab32(ctx->aes->key_enc[i]);
/* code below borrowed from crypto/cmac.c */
/* encrypt the zero block */
memset(consts, 0, AES_BLOCK_SIZE);
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts);
+ aes_encrypt(ctx->aes, (u8 *)consts, (u8 *)consts);
gfmask = 0x87;
_const[0] = be64_to_cpu(consts[1]);
@@ -2234,7 +2214,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
}
ctx->cbcmac = false;
- memzero_explicit(&aes, sizeof(aes));
return 0;
}
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index 7942b48dd55a..1cd62f9c3e3a 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -42,7 +42,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
config CRYPTO_DEV_KEEMBAY_OCS_ECC
tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
depends on ARCH_KEEMBAY || COMPILE_TEST
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
select CRYPTO_ECDH
select CRYPTO_ENGINE
@@ -64,7 +64,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_HCU
select CRYPTO_ENGINE
depends on HAS_IOMEM
depends on ARCH_KEEMBAY || COMPILE_TEST
- depends on OF || COMPILE_TEST
+ depends on OF
help
Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
Control Unit (HCU) hardware acceleration for use with Crypto API.
diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
index b8bdb9f134f3..205eacac4a34 100644
--- a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
+++ b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
@@ -403,7 +403,7 @@ union otx_cptx_pf_exe_bist_status {
* big-endian format in memory.
* iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
* 0 = The hardware issues NCB transient load (LDT) towards the cache,
- * which if the line hits and is is dirty will cause the line to be
+ * which if the line hits and is dirty will cause the line to be
* written back before being replaced.
* 1 = The hardware issues NCB LDWB read-and-invalidate command towards
* the cache when fetching the last word of instructions; as a result the
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index 40b482198ebc..df9c2b8747e6 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -97,7 +97,7 @@ static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
static void set_ucode_filename(struct otx_cpt_ucode *ucode,
const char *filename)
{
- strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
+ strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
}
static char *get_eng_type_str(int eng_type)
@@ -138,7 +138,7 @@ static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
u32 i, val = 0;
u8 nn;
- strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
+ strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
@@ -286,6 +286,7 @@ static int process_tar_file(struct device *dev,
struct tar_ucode_info_t *tar_info;
struct otx_cpt_ucode_hdr *ucode_hdr;
int ucode_type, ucode_size;
+ unsigned int code_length;
/*
* If size is less than microcode header size then don't report
@@ -303,7 +304,13 @@ static int process_tar_file(struct device *dev,
if (get_ucode_type(ucode_hdr, &ucode_type))
return 0;
- ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ code_length = ntohl(ucode_hdr->code_length);
+ if (code_length >= INT_MAX / 2) {
+ dev_err(dev, "Invalid code_length %u\n", code_length);
+ return -EINVAL;
+ }
+
+ ucode_size = code_length * 2;
if (!ucode_size || (size < round_up(ucode_size, 16) +
sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", filename);
@@ -886,6 +893,7 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
{
struct otx_cpt_ucode_hdr *ucode_hdr;
const struct firmware *fw;
+ unsigned int code_length;
int ret;
set_ucode_filename(ucode, ucode_filename);
@@ -896,7 +904,13 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
ucode->ver_num = ucode_hdr->ver_num;
- ucode->size = ntohl(ucode_hdr->code_length) * 2;
+ code_length = ntohl(ucode_hdr->code_length);
+ if (code_length >= INT_MAX / 2) {
+ dev_err(dev, "Ucode invalid code_length %u\n", code_length);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ ucode->size = code_length * 2;
if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
@@ -1328,7 +1342,7 @@ static ssize_t ucode_load_store(struct device *dev,
eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
err_msg = "Invalid engine group format";
- strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
+ strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
start = tmp_buf;
has_se = has_ie = has_ae = false;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index 36d72e35ebeb..88a41d1ca5f6 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -661,7 +661,7 @@ static ssize_t vf_type_show(struct device *dev,
msg = "Invalid";
}
- return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
+ return sysfs_emit(buf, "%s\n", msg);
}
static ssize_t vf_engine_group_show(struct device *dev,
@@ -670,7 +670,7 @@ static ssize_t vf_engine_group_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
+ return sysfs_emit(buf, "%d\n", cptvf->vfgrp);
}
static ssize_t vf_engine_group_store(struct device *dev,
@@ -706,7 +706,7 @@ static ssize_t vf_coalesc_time_wait_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
cptvf_read_vq_done_timewait(cptvf));
}
@@ -716,7 +716,7 @@ static ssize_t vf_coalesc_num_wait_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
cptvf_read_vq_done_numwait(cptvf));
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
index 5663787c7a62..90fdafb7c468 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
@@ -159,12 +159,10 @@ static int cptvf_send_msg_to_pf_timeout(struct otx_cptvf *cptvf,
int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_READY;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -174,13 +172,11 @@ int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_QLEN;
mbx.data = cptvf->qsize;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -208,14 +204,12 @@ int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group)
int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VQ_PRIORITY;
/* Convey group of the VF */
mbx.data = cptvf->priority;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -224,12 +218,10 @@ int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VF_UP;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -238,10 +230,8 @@ int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VF_DOWN;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index f10050fead16..1577986677f6 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -68,7 +68,7 @@ static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
const char *filename)
{
- strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
+ strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
}
static char *get_eng_type_str(int eng_type)
@@ -126,7 +126,7 @@ static int get_ucode_type(struct device *dev,
int i, val = 0;
u8 nn;
- strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
+ strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 02cb9e44afd8..75c403f2b1d9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -191,7 +191,6 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
struct pci_dev *pdev = cptvf->pdev;
struct mbox_msghdr *req;
- int ret;
req = (struct mbox_msghdr *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
@@ -204,7 +203,5 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
req->sig = OTX2_MBOX_REQ_SIG;
req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
- ret = otx2_cpt_send_mbox_msg(mbox, pdev);
-
- return ret;
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 3b0bf6fea491..31e24df18877 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1494,7 +1494,7 @@ static void n2_unregister_algs(void)
*
* So we have to back-translate, going through the 'intr' and 'ino'
* property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to to figure out which
+ * 'interrupts' property entries, in order to figure out which
* devino goes to which already-translated IRQ.
*/
static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 3793885f928d..c843f4c6f684 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -134,7 +134,6 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
unsigned int cryptlen, u8 *b0)
{
unsigned int l, lp, m = authsize;
- int rc;
memcpy(b0, iv, 16);
@@ -148,9 +147,7 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
if (assoclen)
*b0 |= 64;
- rc = set_msg_len(b0 + 16 - l, cryptlen, l);
-
- return rc;
+ return set_msg_len(b0 + 16 - l, cryptlen, l);
}
static int generate_pat(u8 *iv,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index e61b3e13db3b..1931e5b37f2b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -251,13 +251,13 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
return -ENOMEM;
INIT_LIST_HEAD(&key_val->list);
- strlcpy(key_val->key, key, sizeof(key_val->key));
+ strscpy(key_val->key, key, sizeof(key_val->key));
if (type == ADF_DEC) {
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"%ld", (*((long *)val)));
} else if (type == ADF_STR) {
- strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+ strscpy(key_val->val, (char *)val, sizeof(key_val->val));
} else if (type == ADF_HEX) {
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"0x%lx", (unsigned long)val);
@@ -315,7 +315,7 @@ int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
if (!sec)
return -ENOMEM;
- strlcpy(sec->name, name, sizeof(sec->name));
+ strscpy(sec->name, name, sizeof(sec->name));
INIT_LIST_HEAD(&sec->param_head);
down_write(&cfg->lock);
list_add_tail(&sec->list, &cfg->sec_list);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index e8ac932bbaab..82b69e1f725b 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -16,6 +16,9 @@
#include "adf_cfg_common.h"
#include "adf_cfg_user.h"
+#define ADF_CFG_MAX_SECTION 512
+#define ADF_CFG_MAX_KEY_VAL 256
+
#define DEVICE_NAME "qat_adf_ctl"
static DEFINE_MUTEX(adf_ctl_lock);
@@ -137,10 +140,11 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
struct adf_user_cfg_key_val key_val;
struct adf_user_cfg_key_val *params_head;
struct adf_user_cfg_section section, *section_head;
+ int i, j;
section_head = ctl_data->config_section;
- while (section_head) {
+ for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
if (copy_from_user(&section, (void __user *)section_head,
sizeof(*section_head))) {
dev_err(&GET_DEV(accel_dev),
@@ -156,7 +160,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
params_head = section.params;
- while (params_head) {
+ for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
if (copy_from_user(&key_val, (void __user *)params_head,
sizeof(key_val))) {
dev_err(&GET_DEV(accel_dev),
@@ -363,7 +367,7 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
dev_info.num_logical_accel = hw_data->num_logical_accel;
dev_info.banks_per_accel = hw_data->num_banks
/ hw_data->num_logical_accel;
- strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+ strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
dev_info.instance_id = hw_data->instance_id;
dev_info.type = hw_data->dev_class->type;
dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index 43b8f864806b..4fb4b3df5a18 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -107,7 +107,7 @@ do { \
* Timeout is in cycles. Clock speed may vary across products but this
* value should be a few milli-seconds.
*/
-#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL
#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000
#define ADF_SSMWDTL_OFFSET 0x54
#define ADF_SSMWDTH_OFFSET 0x5C
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index e69e5907f595..08bca1c506c0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -96,7 +96,7 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
if (!ring_debug)
return -ENOMEM;
- strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+ strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
snprintf(entry_name, sizeof(entry_name), "ring_%02d",
ring->ring_number);
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
index 4b36869bf460..69482abdb8b9 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -86,7 +86,8 @@
ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
ICP_QAT_CSS_SIGNATURE_LEN(handle))
-#define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000
+#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000
+#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index fb45fa83841c..cad9c58caab1 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -673,11 +673,14 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
dma_addr_t blpout = qat_req->buf.bloutp;
size_t sz = qat_req->buf.sz;
size_t sz_out = qat_req->buf.sz_out;
+ int bl_dma_dir;
int i;
+ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
for (i = 0; i < bl->num_bufs; i++)
dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, DMA_BIDIRECTIONAL);
+ bl->bufers[i].len, bl_dma_dir);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
@@ -691,7 +694,7 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
for (i = bufless; i < blout->num_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -716,6 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n);
int node = dev_to_node(&GET_DEV(inst->accel_dev));
+ int bufl_dma_dir;
if (unlikely(!n))
return -EINVAL;
@@ -733,6 +737,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
qat_req->buf.sgl_src_valid = true;
}
+ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
@@ -744,7 +750,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
goto err_in;
@@ -787,7 +793,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
goto err_out;
bufers[y].len = sg->length;
@@ -817,7 +823,7 @@ err_out:
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (!qat_req->buf.sgl_dst_valid)
kfree(buflout);
@@ -831,7 +837,7 @@ err_in:
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
if (!qat_req->buf.sgl_src_valid)
kfree(bufl);
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 095ed2a404d2..94a26702aeae 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -332,14 +332,14 @@ static int qat_dh_compute_value(struct kpp_request *req)
qat_req->in.dh.in_tab[n_input_params] = 0;
qat_req->out.dh.out_tab[1] = 0;
/* Mapping in.in.b or in.in_g2.xa is the same */
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
- sizeof(qat_req->in.dh.in.b),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
+ sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
- sizeof(qat_req->out.dh.r),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
+ sizeof(struct qat_dh_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -729,14 +729,14 @@ static int qat_rsa_enc(struct akcipher_request *req)
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
- sizeof(qat_req->in.rsa.enc.m),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+ sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
- sizeof(qat_req->out.rsa.enc.c),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+ sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -875,14 +875,14 @@ static int qat_rsa_dec(struct akcipher_request *req)
else
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
- sizeof(qat_req->in.rsa.dec.c),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+ sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
- sizeof(qat_req->out.rsa.dec.m),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+ sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 0fe5a474aa45..b7f7869ef8b2 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -1367,6 +1367,48 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
}
}
+static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ unsigned int fw_type)
+{
+ char *fw_type_name = fw_type ? "MMP" : "AE";
+ unsigned int css_dword_size = sizeof(u32);
+
+ if (handle->chip_info->fw_auth) {
+ struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+ unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+
+ if ((css_hdr->header_len * css_dword_size) != header_len)
+ goto err;
+ if ((css_hdr->size * css_dword_size) != size)
+ goto err;
+ if (fw_type != css_hdr->fw_type)
+ goto err;
+ if (size <= header_len)
+ goto err;
+ size -= header_len;
+ }
+
+ if (fw_type == CSS_AE_FIRMWARE) {
+ if (size < sizeof(struct icp_qat_simg_ae_mode *) +
+ ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
+ goto err;
+ if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
+ goto err;
+ } else if (fw_type == CSS_MMP_FIRMWARE) {
+ if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
+ goto err;
+ } else {
+ pr_err("QAT: Unsupported firmware type\n");
+ return -EINVAL;
+ }
+ return 0;
+
+err:
+ pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+ return -EINVAL;
+}
+
static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
char *image, unsigned int size,
struct icp_qat_fw_auth_desc **desc)
@@ -1379,7 +1421,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_simg_ae_mode *simg_ae_mode;
struct icp_firml_dram_desc img_desc;
- if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
+ if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
pr_err("QAT: error, input image size overflow %d\n", size);
return -EINVAL;
}
@@ -1547,6 +1589,11 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
{
struct icp_qat_fw_auth_desc *desc = NULL;
int status = 0;
+ int ret;
+
+ ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
+ if (ret)
+ return ret;
if (handle->chip_info->fw_auth) {
status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
@@ -2018,8 +2065,15 @@ static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
struct icp_qat_fw_auth_desc *desc = NULL;
struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+ int ret;
for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+ ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
+ simg_hdr[i].simg_len,
+ CSS_AE_FIRMWARE);
+ if (ret)
+ return ret;
+
if (qat_uclo_map_auth_fw(handle,
(char *)simg_hdr[i].simg_buf,
(unsigned int)
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 97a530171f07..6eb4d2e35629 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -450,8 +450,8 @@ qce_aead_async_req_handle(struct crypto_async_request *async_req)
if (ret)
return ret;
dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (dst_nents < 0) {
- ret = dst_nents;
+ if (!dst_nents) {
+ ret = -EIO;
goto error_free;
}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 59159f5e64e5..37bafd7aeb79 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -97,14 +97,16 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
}
ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
- if (ret < 0)
- return ret;
+ if (!ret)
+ return -EIO;
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
- if (ret < 0)
+ if (!ret) {
+ ret = -EIO;
goto error_unmap_src;
+ }
ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
&rctx->result_sg, 1, qce_ahash_done, async_req);
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 3d27cd5210ef..5b493fdc1e74 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -124,15 +124,15 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
rctx->dst_sg = rctx->dst_tbl.sgl;
dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (dst_nents < 0) {
- ret = dst_nents;
+ if (!dst_nents) {
+ ret = -EIO;
goto error_free;
}
if (diff_dst) {
src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
- if (src_nents < 0) {
- ret = src_nents;
+ if (!src_nents) {
+ ret = -EIO;
goto error_unmap_dst;
}
rctx->src_sg = req->src;
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 031b5f701a0a..72dd1a4ebac4 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -9,6 +9,7 @@
#include <linux/crypto.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -201,15 +202,13 @@ static int qcom_rng_remove(struct platform_device *pdev)
return 0;
}
-#if IS_ENABLED(CONFIG_ACPI)
-static const struct acpi_device_id qcom_rng_acpi_match[] = {
+static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = {
{ .id = "QCOM8160", .driver_data = 1 },
{}
};
MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
-#endif
-static const struct of_device_id qcom_rng_of_match[] = {
+static const struct of_device_id __maybe_unused qcom_rng_of_match[] = {
{ .compatible = "qcom,prng", .data = (void *)0},
{ .compatible = "qcom,prng-ee", .data = (void *)1},
{}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 457084b344c1..7ab20fb95166 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -26,10 +26,10 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#define SHA_BUFFER_LEN PAGE_SIZE
#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -196,7 +196,7 @@ struct sahara_dev {
void __iomem *regs_base;
struct clk *clk_ipg;
struct clk *clk_ahb;
- struct mutex queue_mutex;
+ spinlock_t queue_spinlock;
struct task_struct *kthread;
struct completion dma_completion;
@@ -487,13 +487,13 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
- if (ret != dev->nb_in_sg) {
+ if (!ret) {
dev_err(dev->device, "couldn't map in sg\n");
goto unmap_in;
}
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
- if (ret != dev->nb_out_sg) {
+ if (!ret) {
dev_err(dev->device, "couldn't map out sg\n");
goto unmap_out;
}
@@ -642,9 +642,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx->mode = mode;
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
err = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1043,10 +1043,10 @@ static int sahara_queue_manage(void *data)
do {
__set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -1092,9 +1092,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
rctx->first = 1;
}
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
ret = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1449,7 +1449,7 @@ static int sahara_probe(struct platform_device *pdev)
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
- mutex_init(&dev->queue_mutex);
+ spin_lock_init(&dev->queue_spinlock);
dev_ptr = dev;
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index a37622060fff..4852a2dbdb27 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -11,9 +11,17 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/memory-tiers.h>
#include "dax-private.h"
#include "bus.h"
+/*
+ * Default abstract distance assigned to the NUMA node onlined
+ * by DAX/kmem if the low level platform driver didn't initialize
+ * one for this NUMA node.
+ */
+#define MEMTIER_DEFAULT_DAX_ADISTANCE (MEMTIER_ADISTANCE_DRAM * 5)
+
/* Memory resource name used for add_memory_driver_managed(). */
static const char *kmem_name;
/* Set if any memory will remain added when the driver will be unloaded. */
@@ -41,6 +49,7 @@ struct dax_kmem_data {
struct resource *res[];
};
+static struct memory_dev_type *dax_slowmem_type;
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
@@ -79,11 +88,13 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return -EINVAL;
}
+ init_node_memory_type(numa_node, dax_slowmem_type);
+
+ rc = -ENOMEM;
data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
if (!data)
- return -ENOMEM;
+ goto err_dax_kmem_data;
- rc = -ENOMEM;
data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!data->res_name)
goto err_res_name;
@@ -155,6 +166,8 @@ err_reg_mgid:
kfree(data->res_name);
err_res_name:
kfree(data);
+err_dax_kmem_data:
+ clear_node_memory_type(numa_node, dax_slowmem_type);
return rc;
}
@@ -162,6 +175,7 @@ err_res_name:
static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
int i, success = 0;
+ int node = dev_dax->target_node;
struct device *dev = &dev_dax->dev;
struct dax_kmem_data *data = dev_get_drvdata(dev);
@@ -198,6 +212,14 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
kfree(data->res_name);
kfree(data);
dev_set_drvdata(dev, NULL);
+ /*
+ * Clear the memtype association on successful unplug.
+ * If not, we have memory blocks left which can be
+ * offlined/onlined later. We need to keep memory_dev_type
+ * for that. This implies this reference will be around
+ * till next reboot.
+ */
+ clear_node_memory_type(node, dax_slowmem_type);
}
}
#else
@@ -228,9 +250,22 @@ static int __init dax_kmem_init(void)
if (!kmem_name)
return -ENOMEM;
+ dax_slowmem_type = alloc_memory_type(MEMTIER_DEFAULT_DAX_ADISTANCE);
+ if (IS_ERR(dax_slowmem_type)) {
+ rc = PTR_ERR(dax_slowmem_type);
+ goto err_dax_slowmem_type;
+ }
+
rc = dax_driver_register(&device_dax_kmem_driver);
if (rc)
- kfree_const(kmem_name);
+ goto error_dax_driver;
+
+ return rc;
+
+error_dax_driver:
+ destroy_memory_type(dax_slowmem_type);
+err_dax_slowmem_type:
+ kfree_const(kmem_name);
return rc;
}
@@ -239,6 +274,7 @@ static void __exit dax_kmem_exit(void)
dax_driver_unregister(&device_dax_kmem_driver);
if (!any_hotremove_failed)
kfree_const(kmem_name);
+ destroy_memory_type(dax_slowmem_type);
}
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 0eb6b617f709..015c95a825d3 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -567,8 +567,13 @@ static int __init dmi_present(const u8 *buf)
{
u32 smbios_ver;
+ /*
+ * The size of this structure is 31 bytes, but we also accept value
+ * 30 due to a mistake in SMBIOS specification version 2.1.
+ */
if (memcmp(buf, "_SM_", 4) == 0 &&
- buf[5] < 32 && dmi_checksum(buf, buf[5])) {
+ buf[5] >= 30 && buf[5] <= 32 &&
+ dmi_checksum(buf, buf[5])) {
smbios_ver = get_unaligned_be16(buf + 6);
smbios_entry_point_size = buf[5];
memcpy(smbios_entry_point, buf, smbios_entry_point_size);
@@ -629,7 +634,8 @@ static int __init dmi_present(const u8 *buf)
static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
- buf[6] < 32 && dmi_checksum(buf, buf[6])) {
+ buf[6] >= 24 && buf[6] <= 32 &&
+ dmi_checksum(buf, buf[6])) {
dmi_ver = get_unaligned_be24(buf + 7);
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 11857af72859..9624735f1575 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -59,7 +59,7 @@ static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
struct mm_struct efi_mm = {
- .mm_rb = RB_ROOT,
+ .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 01a01be4a2a0..b1601aad7e1a 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -57,6 +57,7 @@ GCOV_PROFILE := n
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index ed9e71d6713e..a01af1180616 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -990,20 +990,6 @@ endmenu
menu "I2C GPIO expanders"
depends on I2C
-config GPIO_ADP5588
- tristate "ADP5588 I2C GPIO expander"
- help
- This option enables support for 18 GPIOs found
- on Analog Devices ADP5588 GPIO Expanders.
-
-config GPIO_ADP5588_IRQ
- bool "Interrupt controller support for ADP5588"
- depends on GPIO_ADP5588=y
- select GPIOLIB_IRQCHIP
- help
- Say yes here to enable the adp5588 to be used as an interrupt
- controller. It requires the driver to be built in the kernel.
-
config GPIO_ADNP
tristate "Avionic Design N-bit GPIO expander"
depends on OF_GPIO
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b67e29d348cf..29e3beb6548c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
-obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
obj-$(CONFIG_GPIO_AGGREGATOR) += gpio-aggregator.o
obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o
obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
deleted file mode 100644
index 9b562dbbd733..000000000000
--- a/drivers/gpio/gpio-adp5588.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * GPIO Chip driver for Analog Devices
- * ADP5588/ADP5587 I/O Expander and QWERTY Keypad Controller
- *
- * Copyright 2009-2010 Analog Devices Inc.
- */
-
-#include <linux/gpio/driver.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <linux/platform_data/adp5588.h>
-
-/*
- * Early pre 4.0 Silicon required to delay readout by at least 25ms,
- * since the Event Counter Register updated 25ms after the interrupt
- * asserted.
- */
-#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
-
-struct adp5588_gpio {
- struct i2c_client *client;
- struct gpio_chip gpio_chip;
- struct mutex lock; /* protect cached dir, dat_out */
- /* protect serialized access to the interrupt controller bus */
- struct mutex irq_lock;
- uint8_t dat_out[3];
- uint8_t dir[3];
- uint8_t int_lvl_low[3];
- uint8_t int_lvl_high[3];
- uint8_t int_en[3];
- uint8_t irq_mask[3];
- uint8_t int_input_en[3];
-};
-
-static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
-{
- int ret = i2c_smbus_read_byte_data(client, reg);
-
- if (ret < 0)
- dev_err(&client->dev, "Read Error\n");
-
- return ret;
-}
-
-static int adp5588_gpio_write(struct i2c_client *client, u8 reg, u8 val)
-{
- int ret = i2c_smbus_write_byte_data(client, reg, val);
-
- if (ret < 0)
- dev_err(&client->dev, "Write Error\n");
-
- return ret;
-}
-
-static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
-{
- struct adp5588_gpio *dev = gpiochip_get_data(chip);
- unsigned bank = ADP5588_BANK(off);
- unsigned bit = ADP5588_BIT(off);
- int val;
-
- mutex_lock(&dev->lock);
-
- if (dev->dir[bank] & bit)
- val = dev->dat_out[bank];
- else
- val = adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + bank);
-
- mutex_unlock(&dev->lock);
-
- return !!(val & bit);
-}
-
-static void adp5588_gpio_set_value(struct gpio_chip *chip,
- unsigned off, int val)
-{
- unsigned bank, bit;
- struct adp5588_gpio *dev = gpiochip_get_data(chip);
-
- bank = ADP5588_BANK(off);
- bit = ADP5588_BIT(off);
-
- mutex_lock(&dev->lock);
- if (val)
- dev->dat_out[bank] |= bit;
- else
- dev->dat_out[bank] &= ~bit;
-
- adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank,
- dev->dat_out[bank]);
- mutex_unlock(&dev->lock);
-}
-
-static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
-{
- int ret;
- unsigned bank;
- struct adp5588_gpio *dev = gpiochip_get_data(chip);
-
- bank = ADP5588_BANK(off);
-
- mutex_lock(&dev->lock);
- dev->dir[bank] &= ~ADP5588_BIT(off);
- ret = adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, dev->dir[bank]);
- mutex_unlock(&dev->lock);
-
- return ret;
-}
-
-static int adp5588_gpio_direction_output(struct gpio_chip *chip,
- unsigned off, int val)
-{
- int ret;
- unsigned bank, bit;
- struct adp5588_gpio *dev = gpiochip_get_data(chip);
-
- bank = ADP5588_BANK(off);
- bit = ADP5588_BIT(off);
-
- mutex_lock(&dev->lock);
- dev->dir[bank] |= bit;
-
- if (val)
- dev->dat_out[bank] |= bit;
- else
- dev->dat_out[bank] &= ~bit;
-
- ret = adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank,
- dev->dat_out[bank]);
- ret |= adp5588_gpio_write(dev->client, GPIO_DIR1 + bank,
- dev->dir[bank]);
- mutex_unlock(&dev->lock);
-
- return ret;
-}
-
-#ifdef CONFIG_GPIO_ADP5588_IRQ
-
-static void adp5588_irq_bus_lock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
-
- mutex_lock(&dev->irq_lock);
-}
-
- /*
- * genirq core code can issue chip->mask/unmask from atomic context.
- * This doesn't work for slow busses where an access needs to sleep.
- * bus_sync_unlock() is therefore called outside the atomic context,
- * syncs the current irq mask state with the slow external controller
- * and unlocks the bus.
- */
-
-static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
- int i;
-
- for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
- if (dev->int_input_en[i]) {
- mutex_lock(&dev->lock);
- dev->dir[i] &= ~dev->int_input_en[i];
- dev->int_input_en[i] = 0;
- adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
- dev->dir[i]);
- mutex_unlock(&dev->lock);
- }
-
- if (dev->int_en[i] ^ dev->irq_mask[i]) {
- dev->int_en[i] = dev->irq_mask[i];
- adp5588_gpio_write(dev->client, GPI_EM1 + i,
- dev->int_en[i]);
- }
- }
-
- mutex_unlock(&dev->irq_lock);
-}
-
-static void adp5588_irq_mask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
-
- dev->irq_mask[ADP5588_BANK(d->hwirq)] &= ~ADP5588_BIT(d->hwirq);
-}
-
-static void adp5588_irq_unmask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
-
- dev->irq_mask[ADP5588_BANK(d->hwirq)] |= ADP5588_BIT(d->hwirq);
-}
-
-static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
- uint16_t gpio = d->hwirq;
- unsigned bank, bit;
-
- bank = ADP5588_BANK(gpio);
- bit = ADP5588_BIT(gpio);
-
- dev->int_lvl_low[bank] &= ~bit;
- dev->int_lvl_high[bank] &= ~bit;
-
- if (type & IRQ_TYPE_EDGE_BOTH || type & IRQ_TYPE_LEVEL_HIGH)
- dev->int_lvl_high[bank] |= bit;
-
- if (type & IRQ_TYPE_EDGE_BOTH || type & IRQ_TYPE_LEVEL_LOW)
- dev->int_lvl_low[bank] |= bit;
-
- dev->int_input_en[bank] |= bit;
-
- return 0;
-}
-
-static struct irq_chip adp5588_irq_chip = {
- .name = "adp5588",
- .irq_mask = adp5588_irq_mask,
- .irq_unmask = adp5588_irq_unmask,
- .irq_bus_lock = adp5588_irq_bus_lock,
- .irq_bus_sync_unlock = adp5588_irq_bus_sync_unlock,
- .irq_set_type = adp5588_irq_set_type,
-};
-
-static irqreturn_t adp5588_irq_handler(int irq, void *devid)
-{
- struct adp5588_gpio *dev = devid;
- int status = adp5588_gpio_read(dev->client, INT_STAT);
-
- if (status & ADP5588_KE_INT) {
- int ev_cnt = adp5588_gpio_read(dev->client, KEY_LCK_EC_STAT);
-
- if (ev_cnt > 0) {
- int i;
-
- for (i = 0; i < (ev_cnt & ADP5588_KEC); i++) {
- int key = adp5588_gpio_read(dev->client,
- Key_EVENTA + i);
- /* GPIN events begin at 97,
- * bit 7 indicates logic level
- */
- int gpio = (key & 0x7f) - 97;
- int lvl = key & (1 << 7);
- int bank = ADP5588_BANK(gpio);
- int bit = ADP5588_BIT(gpio);
-
- if ((lvl && dev->int_lvl_high[bank] & bit) ||
- (!lvl && dev->int_lvl_low[bank] & bit))
- handle_nested_irq(irq_find_mapping(
- dev->gpio_chip.irq.domain, gpio));
- }
- }
- }
-
- adp5588_gpio_write(dev->client, INT_STAT, status); /* Status is W1C */
-
- return IRQ_HANDLED;
-}
-
-
-static int adp5588_irq_init_hw(struct gpio_chip *gc)
-{
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
- /* Enable IRQs after registering chip */
- adp5588_gpio_write(dev->client, CFG,
- ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_KE_IEN);
-
- return 0;
-}
-
-static int adp5588_irq_setup(struct adp5588_gpio *dev)
-{
- struct i2c_client *client = dev->client;
- int ret;
- struct adp5588_gpio_platform_data *pdata =
- dev_get_platdata(&client->dev);
- struct gpio_irq_chip *girq;
-
- adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC);
- adp5588_gpio_write(client, INT_STAT, -1); /* status is W1C */
-
- mutex_init(&dev->irq_lock);
-
- ret = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, adp5588_irq_handler, IRQF_ONESHOT
- | IRQF_TRIGGER_FALLING | IRQF_SHARED,
- dev_name(&client->dev), dev);
- if (ret) {
- dev_err(&client->dev, "failed to request irq %d\n",
- client->irq);
- return ret;
- }
-
- /* This will be registered in the call to devm_gpiochip_add_data() */
- girq = &dev->gpio_chip.irq;
- girq->chip = &adp5588_irq_chip;
- /* This will let us handle the parent IRQ in the driver */
- girq->parent_handler = NULL;
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->first = pdata ? pdata->irq_base : 0;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
- girq->init_hw = adp5588_irq_init_hw;
- girq->threaded = true;
-
- return 0;
-}
-
-#else
-static int adp5588_irq_setup(struct adp5588_gpio *dev)
-{
- struct i2c_client *client = dev->client;
- dev_warn(&client->dev, "interrupt support not compiled in\n");
-
- return 0;
-}
-
-#endif /* CONFIG_GPIO_ADP5588_IRQ */
-
-static int adp5588_gpio_probe(struct i2c_client *client)
-{
- struct adp5588_gpio_platform_data *pdata =
- dev_get_platdata(&client->dev);
- struct adp5588_gpio *dev;
- struct gpio_chip *gc;
- int ret, i, revid;
- unsigned int pullup_dis_mask = 0;
-
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
- return -EIO;
- }
-
- dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->client = client;
-
- gc = &dev->gpio_chip;
- gc->direction_input = adp5588_gpio_direction_input;
- gc->direction_output = adp5588_gpio_direction_output;
- gc->get = adp5588_gpio_get_value;
- gc->set = adp5588_gpio_set_value;
- gc->can_sleep = true;
- gc->base = -1;
- gc->parent = &client->dev;
-
- if (pdata) {
- gc->base = pdata->gpio_start;
- gc->names = pdata->names;
- pullup_dis_mask = pdata->pullup_dis_mask;
- }
-
- gc->ngpio = ADP5588_MAXGPIO;
- gc->label = client->name;
- gc->owner = THIS_MODULE;
-
- mutex_init(&dev->lock);
-
- ret = adp5588_gpio_read(dev->client, DEV_ID);
- if (ret < 0)
- return ret;
-
- revid = ret & ADP5588_DEVICE_ID_MASK;
-
- for (i = 0, ret = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
- dev->dat_out[i] = adp5588_gpio_read(client, GPIO_DAT_OUT1 + i);
- dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i);
- ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0);
- ret |= adp5588_gpio_write(client, GPIO_PULL1 + i,
- (pullup_dis_mask >> (8 * i)) & 0xFF);
- ret |= adp5588_gpio_write(client, GPIO_INT_EN1 + i, 0);
- if (ret)
- return ret;
- }
-
- if (client->irq) {
- if (WA_DELAYED_READOUT_REVID(revid)) {
- dev_warn(&client->dev, "GPIO int not supported\n");
- } else {
- ret = adp5588_irq_setup(dev);
- if (ret)
- return ret;
- }
- }
-
- ret = devm_gpiochip_add_data(&client->dev, &dev->gpio_chip, dev);
- if (ret)
- return ret;
-
- i2c_set_clientdata(client, dev);
-
- return 0;
-}
-
-static void adp5588_gpio_remove(struct i2c_client *client)
-{
- struct adp5588_gpio *dev = i2c_get_clientdata(client);
-
- if (dev->client->irq)
- free_irq(dev->client->irq, dev);
-}
-
-static const struct i2c_device_id adp5588_gpio_id[] = {
- { "adp5588-gpio" },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, adp5588_gpio_id);
-
-static const struct of_device_id adp5588_gpio_of_id[] = {
- { .compatible = "adi,adp5588-gpio" },
- {}
-};
-MODULE_DEVICE_TABLE(of, adp5588_gpio_of_id);
-
-static struct i2c_driver adp5588_gpio_driver = {
- .driver = {
- .name = "adp5588-gpio",
- .of_match_table = adp5588_gpio_of_id,
- },
- .probe_new = adp5588_gpio_probe,
- .remove = adp5588_gpio_remove,
- .id_table = adp5588_gpio_id,
-};
-
-module_i2c_driver(adp5588_gpio_driver);
-
-MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
-MODULE_DESCRIPTION("GPIO ADP5588 Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 64cb060d9d75..77a41151c921 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -273,10 +273,8 @@ static irqreturn_t mlxbf2_gpio_irq_handler(int irq, void *ptr)
pending = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CAUSE_EVTEN0);
writel(pending, gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
- for_each_set_bit(level, &pending, gc->ngpio) {
- int gpio_irq = irq_find_mapping(gc->irq.domain, level);
- generic_handle_irq(gpio_irq);
- }
+ for_each_set_bit(level, &pending, gc->ngpio)
+ generic_handle_domain_irq_safe(gc->irq.domain, level);
return IRQ_RETVAL(pending);
}
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 6765477edb06..870910bb9dd3 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -19,6 +19,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/regmap.h>
@@ -156,6 +157,12 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip,
unsigned long flags;
u32 data = input ? 0 : 1;
+
+ if (input)
+ pinctrl_gpio_direction_input(bank->pin_base + offset);
+ else
+ pinctrl_gpio_direction_output(bank->pin_base + offset);
+
raw_spin_lock_irqsave(&bank->slock, flags);
rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr);
raw_spin_unlock_irqrestore(&bank->slock, flags);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 285ecbf107c9..a7d2358736fe 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -754,6 +754,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->info.pin_config = agpio->pin_config;
lookup->info.debounce = agpio->debounce_timeout;
lookup->info.gpioint = gpioint;
+ lookup->info.wake_capable = agpio->wake_capable == ACPI_WAKE_CAPABLE;
/*
* Polarity and triggering are only specified for GpioInt
@@ -1000,10 +1001,11 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
}
/**
- * acpi_dev_gpio_irq_get_by() - Find GpioInt and translate it to Linux IRQ number
+ * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number
* @adev: pointer to a ACPI device to get IRQ from
* @name: optional name of GpioInt resource
* @index: index of GpioInt resource (starting from %0)
+ * @wake_capable: Set to true if the IRQ is wake capable
*
* If the device has one or more GpioInt resources, this function can be
* used to translate from the GPIO offset in the resource to the Linux IRQ
@@ -1015,9 +1017,13 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
* The function takes optional @name parameter. If the resource has a property
* name, then only those will be taken into account.
*
+ * The GPIO is considered wake capable if the GpioInt resource specifies
+ * SharedAndWake or ExclusiveAndWake.
+ *
* Return: Linux IRQ number (> %0) on success, negative errno on failure.
*/
-int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index)
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index,
+ bool *wake_capable)
{
int idx, i;
unsigned int irq_flags;
@@ -1074,13 +1080,16 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind
dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
}
+ if (wake_capable)
+ *wake_capable = info.wake_capable;
+
return irq;
}
}
return -ENOENT;
}
-EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get_by);
+EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_wake_get_by);
static acpi_status
acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index e476558d9471..1ac6816839db 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -18,6 +18,7 @@ struct acpi_device;
* @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
+ * @wake_capable: wake capability as provided by ACPI
* @debounce: debounce timeout as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
@@ -28,6 +29,7 @@ struct acpi_gpio_info {
int pin_config;
int polarity;
int triggering;
+ bool wake_capable;
unsigned int debounce;
unsigned int quirks;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 9e98f3866edc..03bbfaa51cbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -75,9 +75,6 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
adev->kfd.dev = kgd2kfd_probe(adev, vf);
-
- if (adev->kfd.dev)
- amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -201,6 +198,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
adev_to_drm(adev), &gpu_resources);
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
+
INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
}
}
@@ -210,6 +209,7 @@ void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
if (adev->kfd.dev) {
kgd2kfd_device_exit(adev->kfd.dev);
adev->kfd.dev = NULL;
+ amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 23998f727c7f..1a06b8d724f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,8 +38,6 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_damage_helper.h>
-#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -500,12 +498,6 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
};
-static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
- .destroy = drm_gem_fb_destroy,
- .create_handle = drm_gem_fb_create_handle,
- .dirty = drm_atomic_helper_dirtyfb,
-};
-
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1108,10 +1100,8 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- if (drm_drv_uses_atomic_modeset(dev))
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
- else
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e6a9b9fc9e0b..2e8f6cd7a729 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -688,13 +688,16 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
* num of amdgpu_vm_pt entries.
*/
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
- bp->destroy = &amdgpu_bo_vm_destroy;
r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
+ /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
+ * is initialized.
+ */
+ bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ccebd8e2a2d8..2dad7aa9a03b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -2877,9 +2877,9 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
err_data.err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
sizeof(struct eeprom_table_record), GFP_KERNEL);
- if(!err_data.err_addr) {
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record in mca notifier!\n");
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error record in mca notifier!\n");
return NOTIFY_DONE;
}
@@ -2889,7 +2889,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
if (adev->umc.ras &&
adev->umc.ras->convert_ras_error_address)
adev->umc.ras->convert_ras_error_address(adev,
- &err_data, 0, ch_inst, umc_inst, m->addr);
+ &err_data, m->addr, ch_inst, umc_inst);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 3949b7e3907f..ea5278f094c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -222,8 +222,10 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
adev->sdma.instance[instance].fw->data;
version_major = le16_to_cpu(header->header_version_major);
- if ((duplicate && instance) || (!duplicate && version_major > 1))
- return -EINVAL;
+ if ((duplicate && instance) || (!duplicate && version_major > 1)) {
+ err = -EINVAL;
+ goto out;
+ }
err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
if (err)
@@ -272,7 +274,7 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
break;
default:
- return -EINVAL;
+ err = -EINVAL;
}
}
@@ -283,3 +285,24 @@ out:
}
return err;
}
+
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue) {
+ sdma = &adev->sdma.instance[i].page;
+ if (adev->mman.buffer_funcs_ring == sdma) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ break;
+ }
+ }
+ sdma = &adev->sdma.instance[i].ring;
+ if (adev->mman.buffer_funcs_ring == sdma) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index d2d88279fefb..7d99205c2e01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -128,4 +128,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
char *fw_name, u32 instance, bool duplicate);
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate);
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b1c455329023..dc262d2c2925 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -424,8 +424,9 @@ error:
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
- uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+ u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT;
struct amdgpu_res_cursor cursor;
+ u64 end;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
@@ -434,12 +435,18 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
return false;
amdgpu_res_first(mem, 0, mem_size, &cursor);
+ end = cursor.start + cursor.size;
+ while (cursor.remaining) {
+ amdgpu_res_next(&cursor, cursor.size);
- /* ttm_resource_ioremap only supports contiguous memory */
- if (cursor.size != mem_size)
- return false;
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (end != cursor.start)
+ return false;
+
+ end = cursor.start + cursor.size;
+ }
- return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
+ return end <= adev->gmc.visible_vram_size;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 2fb4951a6433..e46439274f3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -22,8 +22,6 @@
#define __AMDGPU_UMC_H__
#include "amdgpu_ras.h"
-#define UMC_INVALID_ADDR 0x1ULL
-
/*
* (addr / 256) * 4096, the higher 26 bits in ErrorAddr
* is the index of 4KB block
@@ -54,9 +52,8 @@ struct amdgpu_umc_ras {
void (*err_cnt_init)(struct amdgpu_device *adev);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
void (*convert_ras_error_address)(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- uint32_t umc_reg_offset, uint32_t ch_inst,
- uint32_t umc_inst, uint64_t mca_addr);
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst);
void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5647f13b98d4..cbca9866645c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
*/
static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6bdffdc1c0b9..c52d246a1d96 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2584fa3cb13e..486d9b5c1b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7241a9fb0121..298fa11702e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -915,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
- int i, unset = 0;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sdma[i] = &adev->sdma.instance[i].ring;
+ int i;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = 1;
- }
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
@@ -957,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
- bool unset = false;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sdma[i] = &adev->sdma.instance[i].page;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (!unset)) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = true;
- }
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
RB_ENABLE, 0);
@@ -1954,8 +1940,11 @@ static int sdma_v4_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index c05c3eebde4c..d4d9f196db83 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -584,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -1460,8 +1456,11 @@ static int sdma_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_0_ctx_switch_enable(adev, false);
sdma_v5_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index f136fec7b4f4..809eca54fc61 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -414,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
- struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring;
- struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1) ||
- (adev->mman.buffer_funcs_ring == sdma2) ||
- (adev->mman.buffer_funcs_ring == sdma3))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -1357,8 +1349,11 @@ static int sdma_v5_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_2_ctx_switch_enable(adev, false);
sdma_v5_2_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index db51230163c5..da3beb0bf2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -398,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
@@ -415,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
}
-
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -846,7 +839,8 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_cntl =
order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
- 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+ 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+ 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
@@ -1317,8 +1311,11 @@ static int sdma_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index f675111ace20..4d5e718540aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev)
u32 rb_cntl;
unsigned i;
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
/* dma0 */
rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 16b757664a35..795706b3b092 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -629,6 +629,7 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 939cb203f7ad..f17d297b594b 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -327,10 +327,9 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
return;
}
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
@@ -343,10 +342,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
+ amdgpu_umc_fill_error_record(err_data, err_addr,
retired_page, channel_index, umc_inst);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index a0d19b768346..5d5d031c9e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -187,20 +187,51 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
}
}
+static void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ uint32_t channel_index;
+ uint64_t soc_pa, retired_page, column;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+ /* translate umc channel address to soc pa, 3 parts are included */
+ soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* The umc channel bits are not original values, they are hashed */
+ SET_CHANNEL_HASH(channel_index, soc_pa);
+
+ /* clear [C4 C3 C2] in soc physical address */
+ soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+
+ /* shift R14 bit */
+ retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+ }
+}
+
static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column;
- uint32_t channel_index;
+ uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
@@ -209,42 +240,15 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* The umc channel bits are not original values, they are hashed */
- SET_CHANNEL_HASH(channel_index, soc_pa);
-
- /* clear [C4 C3 C2] in soc physical address */
- soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
-
- /* shift R14 bit */
- retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
- }
- }
+ umc_v6_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
}
@@ -453,81 +457,40 @@ static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t umc_reg_offset, uint32_t ch_inst,
- uint32_t umc_inst, uint64_t mca_addr)
+ uint32_t umc_inst)
{
uint32_t mc_umc_status_addr;
- uint32_t channel_index;
- uint64_t mc_umc_status = 0, mc_umc_addrt0;
- uint64_t err_addr, soc_pa, retired_page, column;
+ uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr;
- if (mca_addr == UMC_INVALID_ADDR) {
- mc_umc_status_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
- mc_umc_addrt0 =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
- mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
- if (mc_umc_status == 0)
- return;
+ if (mc_umc_status == 0)
+ return;
- if (!err_data->err_addr) {
- /* clear umc status */
- WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
- return;
- }
+ if (!err_data->err_addr) {
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ return;
}
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
- /* calculate error address if ue/ce error is detected */
- if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) ||
- mca_addr != UMC_INVALID_ADDR) {
- if (mca_addr == UMC_INVALID_ADDR) {
- err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
- err_addr =
- REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- } else {
- err_addr = mca_addr;
- }
+ /* calculate error address if ue error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+ err_addr =
+ REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* The umc channel bits are not original values, they are hashed */
- SET_CHANNEL_HASH(channel_index, soc_pa);
-
- /* clear [C4 C3 C2] in soc physical address */
- soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1 ||
- mca_addr != UMC_INVALID_ADDR) {
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
-
- /* shift R14 bit */
- retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
- }
- }
+ umc_v6_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
/* clear umc status */
- if (mca_addr == UMC_INVALID_ADDR)
- WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
}
static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
@@ -549,7 +512,7 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
umc_v6_7_query_error_address(adev,
err_data,
umc_reg_offset, ch_inst,
- umc_inst, UMC_INVALID_ADDR);
+ umc_inst);
}
}
@@ -590,5 +553,5 @@ struct amdgpu_umc_ras umc_v6_7_ras = {
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
- .convert_ras_error_address = umc_v6_7_query_error_address,
+ .convert_ras_error_address = umc_v6_7_convert_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index a8cbda81828d..91235df54e22 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -208,7 +208,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
{
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
- uint32_t channel_index;
+ uint64_t mc_umc_addrt0, na_err_addr_base;
+ uint64_t na_err_addr, retired_page_addr;
+ uint32_t channel_index, addr_lsb, col = 0;
+ int ret = 0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -229,13 +232,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
umc_inst * adev->umc.channel_inst_num +
ch_inst];
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
- uint32_t addr_lsb;
- uint64_t mc_umc_addrt0;
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
@@ -243,32 +243,24 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
/* the lowest lsb bits should be ignored */
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
-
err_addr &= ~((0x1ULL << addr_lsb) - 1);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
- uint64_t na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
- uint64_t na_err_addr, retired_page_addr;
- uint32_t col = 0;
- int ret = 0;
-
- /* loop for all possibilities of [C6 C5] in normal address. */
- for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
- na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
-
- /* Mapping normal error address to retired soc physical address. */
- ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
- na_err_addr, &retired_page_addr);
- if (ret) {
- dev_err(adev->dev, "Failed to map pa from umc na.\n");
- break;
- }
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
- retired_page_addr);
- amdgpu_umc_fill_error_record(err_data, na_err_addr,
- retired_page_addr, channel_index, umc_inst);
+ na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
+
+ /* loop for all possibilities of [C6 C5] in normal address. */
+ for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
+ na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
+
+ /* Mapping normal error address to retired soc physical address. */
+ ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
+ na_err_addr, &retired_page_addr);
+ if (ret) {
+ dev_err(adev->dev, "Failed to map pa from umc na.\n");
+ break;
}
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
+ retired_page_addr);
+ amdgpu_umc_fill_error_record(err_data, na_err_addr,
+ retired_page_addr, channel_index, umc_inst);
}
}
@@ -338,6 +330,31 @@ static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
}
}
+static uint32_t umc_v8_10_query_ras_poison_mode_per_channel(
+ struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_ctrl_addr, ecc_ctrl;
+
+ ecc_ctrl_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl);
+ ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
+ umc_reg_offset) * 4);
+
+ return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn);
+}
+
+static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t umc_reg_offset = 0;
+
+ /* Enabling fatal error in umc node0 instance0 channel0 will be
+ * considered as fatal error mode
+ */
+ umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0);
+ return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
+}
+
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
@@ -348,4 +365,5 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
.hw_ops = &umc_v8_10_ras_hw_ops,
},
.err_cnt_init = umc_v8_10_err_cnt_init,
+ .query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index f35253e0eaa6..b717fdaa46e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -108,20 +108,35 @@ static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
}
}
+static void umc_v8_7_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ uint64_t retired_page;
+ uint32_t channel_index;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+}
+
static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, retired_page;
- uint32_t channel_index;
+ uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
@@ -130,24 +145,15 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
+ umc_v8_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
}
@@ -324,14 +330,12 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
uint32_t umc_inst)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
- uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+ uint64_t mc_umc_status, err_addr, mc_umc_addrt0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
mc_umc_addrt0 =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
-
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (mc_umc_status == 0)
@@ -343,10 +347,9 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
return;
}
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
@@ -354,16 +357,8 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
err_addr &= ~((0x1ULL << lsb) - 1);
- /* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
+ umc_v8_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
/* clear umc status */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 26b53b6d673e..4f6390f3236e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -333,7 +333,8 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
<< SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
- 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+ 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+ 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4c73727e0b7d..c053cb79cd06 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1110,7 +1110,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hw_params.fb[i] = &fb_info->fb[i];
switch (adev->ip_versions[DCE_HWIP][0]) {
- case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
break;
@@ -7478,15 +7479,15 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
* We also need vupdate irq for the actual core vblank handling
* at end of vblank.
*/
- dm_set_vupdate_irq(new_state->base.crtc, true);
- drm_crtc_vblank_get(new_state->base.crtc);
+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
* Allow vblank irq disable again for fixed refresh rate.
*/
- dm_set_vupdate_irq(new_state->base.crtc, false);
+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -8242,23 +8243,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_unlock(&dm->dc_lock);
}
- /* Count number of newly disabled CRTCs for dropping PM refs later. */
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (old_crtc_state->active && !new_crtc_state->active)
- crtc_disable_count++;
-
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
- /* For freesync config update on crtc state and params for irq */
- update_stream_irq_parameters(dm, dm_new_crtc_state);
-
- /* Handle vrr on->off / off->on transitions */
- amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
- dm_new_crtc_state);
- }
-
/**
* Enable interrupts for CRTCs that are newly enabled or went through
* a modeset. It was intentionally deferred until after the front end
@@ -8268,16 +8252,29 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
#ifdef CONFIG_DEBUG_FS
- bool configure_crc = false;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
+ struct crc_rd_work *crc_rd_wrk;
+#endif
+#endif
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* For freesync config update on crtc state and params for irq */
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ crc_rd_wrk = dm->crc_rd_wrk;
#endif
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
cur_crc_src = acrtc->dm_irq_params.crc_src;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
#endif
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (new_crtc_state->active &&
(!old_crtc_state->active ||
@@ -8285,16 +8282,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
+ }
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
#ifdef CONFIG_DEBUG_FS
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
/**
* Frontend may have changed so reapply the CRC capture
* settings for the stream.
*/
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
- configure_crc = true;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
@@ -8306,14 +8306,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
- }
-
- if (configure_crc)
if (amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, cur_crc_src))
DRM_DEBUG_DRIVER("Failed to configure crc source");
-#endif
+ }
}
+#endif
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
@@ -9392,10 +9390,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
}
- if (!pre_validate_dsc(state, &dm_state, vars)) {
- ret = -EINVAL;
- goto fail;
- }
}
#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -9529,6 +9523,15 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ if (!pre_validate_dsc(state, &dm_state, vars)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+#endif
+
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
if (ret) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 8ca10ab3dfc1..26291db0a3cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -60,11 +60,15 @@ static bool link_supports_psrsu(struct dc_link *link)
*/
void amdgpu_dm_set_psr_caps(struct dc_link *link)
{
- if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP)) {
+ link->psr_settings.psr_feature_enabled = false;
return;
+ }
- if (link->type == dc_connection_none)
+ if (link->type == dc_connection_none) {
+ link->psr_settings.psr_feature_enabled = false;
return;
+ }
if (link->dpcd_caps.psr_info.psr_version == 0) {
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 53b077b40d72..ee0456b5e14e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -51,13 +51,6 @@
#define LAST_RECORD_TYPE 0xff
#define SMU9_SYSPLL0_ID 0
-struct i2c_id_config_access {
- uint8_t bfI2C_LineMux:4;
- uint8_t bfHW_EngineID:3;
- uint8_t bfHW_Capable:1;
- uint8_t ucAccess;
-};
-
static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
struct atom_i2c_record *record,
struct graphics_object_i2c_info *info);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 0d30d1d9d67e..650f3b4b562e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -179,7 +179,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
} else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) {
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, 126);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 100);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
@@ -206,7 +206,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 897105d1c111..ef0795b14a1f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -339,29 +339,24 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
if (!clk_mgr->smu_present)
return;
- if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface &&
- (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
- support = DCN_ZSTATE_SUPPORT_DISALLOW;
-
-
// Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits
// Arg[16] = Disallow Z9 -> new bit
switch (support) {
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 9;
+ param = (1 << 10) | (1 << 9) | (1 << 8);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 8;
+ param = 0;
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 0x00010008;
+ param = (1 << 10);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index f0f3f66629cc..1c612ccf1944 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -156,7 +156,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
- unsigned int num_dcfclk_levels, num_dtbclk_levels, num_dispclk_levels;
+ struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
@@ -180,27 +180,28 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
/* DCFCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
- &num_levels);
- num_dcfclk_levels = num_levels;
+ &num_entries_per_clk->num_dcfclk_levels);
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_socclk_levels);
+
/* DTBCLK */
if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch)
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
- &num_levels);
- num_dtbclk_levels = num_levels;
+ &num_entries_per_clk->num_dtbclk_levels);
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
- &num_levels);
- num_dispclk_levels = num_levels;
+ &num_entries_per_clk->num_dispclk_levels);
+ num_levels = num_entries_per_clk->num_dispclk_levels;
- if (num_dcfclk_levels && num_dtbclk_levels && num_dispclk_levels)
+ if (num_entries_per_clk->num_dcfclk_levels &&
+ num_entries_per_clk->num_dtbclk_levels &&
+ num_entries_per_clk->num_dispclk_levels)
clk_mgr->dpm_present = true;
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
@@ -333,6 +334,21 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (enter_display_off == safe_to_lower)
dcn30_smu_set_num_of_displays(clk_mgr, display_count);
+ clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
+
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
+
+ if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
+ clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+
+ /* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support) {
+ /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
+ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
+ }
+ }
+
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
@@ -352,7 +368,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
@@ -361,27 +376,25 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
}
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+
p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
}
- if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) &&
- clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21) {
- clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+ /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
+ if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
+ update_fclk = true;
+ }
- /* To disable FCLK P-state switching, send FCLK_PSTATE_NOTSUPPORTED message to PMFW */
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
- dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
- }
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
+ /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
+ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
@@ -390,21 +403,11 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
update_uclk = true;
}
- /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
- if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
- update_fclk = true;
- }
-
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
- }
-
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
clk_mgr_base->clks.num_ways = new_clocks->num_ways;
@@ -632,7 +635,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current
khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
else
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
@@ -648,22 +651,34 @@ static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
return;
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
}
/* Get current memclk states, update bounding box */
static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
unsigned int num_levels;
if (!clk_mgr->smu_present)
return;
- /* Refresh memclk states */
+ /* Refresh memclk and fclk states */
dcn32_init_single_clock(clk_mgr, PPCLK_UCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_memclk_levels);
+
+ dcn32_init_single_clock(clk_mgr, PPCLK_FCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
+ &num_entries_per_clk->num_fclk_levels);
+
+ if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
+ num_levels = num_entries_per_clk->num_memclk_levels;
+ } else {
+ num_levels = num_entries_per_clk->num_fclk_levels;
+ }
+
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
if (clk_mgr->dpm_present && !num_levels)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 258ba5a872b1..997ab031f816 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1734,10 +1734,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
struct dc_state *old_state;
+ bool subvp_prev_use = false;
dc_z10_restore(dc);
dc_allow_idle_optimizations(dc, false);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* Check old context for SubVP */
+ subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ if (subvp_prev_use)
+ break;
+ }
+
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
@@ -1777,6 +1787,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
result = dc->hwss.apply_ctx_to_hw(dc, context);
if (result != DC_OK) {
@@ -1794,6 +1807,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
+
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
+
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -2927,6 +2946,12 @@ static bool update_planes_and_stream_state(struct dc *dc,
dc_resource_state_copy_construct(
dc->current_state, context);
+ /* For each full update, remove all existing phantom pipes first.
+ * Ensures that we have enough pipes for newly added MPO planes
+ */
+ if (dc->res_pool->funcs->remove_phantom_pipes)
+ dc->res_pool->funcs->remove_phantom_pipes(dc, context);
+
/*remove old surfaces from context */
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
@@ -3334,8 +3359,14 @@ static void commit_planes_for_stream(struct dc *dc,
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ }
+
return;
}
@@ -3495,6 +3526,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
if (update_type != UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
@@ -3542,6 +3576,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
bool force_minimal_pipe_splitting = false;
+ uint32_t i;
*is_plane_addition = false;
@@ -3573,6 +3608,36 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
}
}
+ /* For SubVP pipe split case when adding MPO video
+ * we need to add a minimal transition. In this case
+ * there will be 2 streams (1 main stream, 1 phantom
+ * stream).
+ */
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 2 &&
+ stream->mall_stream_config.type == SUBVP_MAIN) {
+ bool is_pipe_split = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream &&
+ (dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe ||
+ dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) {
+ is_pipe_split = true;
+ break;
+ }
+ }
+
+ /* determine if minimal transition is required due to SubVP*/
+ if (surface_count > 0 && is_pipe_split) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
return force_minimal_pipe_splitting;
}
@@ -3582,6 +3647,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_context = dc_create_state(dc);
enum pipe_split_policy tmp_mpc_policy;
bool temp_dynamic_odm_policy;
+ bool temp_subvp_policy;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
@@ -3596,6 +3662,9 @@ static bool commit_minimal_transition_state(struct dc *dc,
temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
dc->debug.enable_single_display_2to1_odm_policy = false;
+ temp_subvp_policy = dc->debug.force_disable_subvp;
+ dc->debug.force_disable_subvp = true;
+
dc_resource_state_copy_construct(transition_base_context, transition_context);
//commit minimal state
@@ -3624,6 +3693,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
dc->debug.pipe_split_policy = tmp_mpc_policy;
dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
+ dc->debug.force_disable_subvp = temp_subvp_policy;
if (ret != DC_OK) {
/*this should never happen*/
@@ -4587,6 +4657,37 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
}
/**
+ *****************************************************************************
+ * Function: dc_process_dmub_dpia_hpd_int_enable
+ *
+ * @brief
+ * Submits dpia hpd int enable command to dmub via inbox message
+ *
+ * @param
+ * [in] dc: dc structure
+ * [in] hpd_int_enable: 1 for hpd int enable, 0 to disable
+ *
+ * @return
+ * None
+ *****************************************************************************
+ */
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+
+ cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
+ cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
+
+ dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
+
+ DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
+}
+
+/**
* dc_disable_accelerated_mode - disable accelerated mode
* @dc: dc structure
*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 3d19fb92333b..d7b1ace6328a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1307,7 +1307,10 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- // Init dc_panel_config
+ /* Init dc_panel_config by HW config */
+ if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults)
+ dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config);
+ /* Pickup base DM settings */
dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
// Override dc_panel_config if system has specific settings
dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
@@ -3143,7 +3146,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- if (allow_active && link->type == dc_connection_none) {
+ if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) {
// Don't enter PSR if panel is not connected
return false;
}
@@ -3375,8 +3378,8 @@ bool dc_link_setup_psr(struct dc_link *link,
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
case AMDGPU_FAMILY_GC_11_0_1:
- if(!dc->debug.disable_z10)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
+ if (dc->debug.disable_z10)
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
break;
default:
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index c57df45e83ff..1254d38f1778 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -944,6 +944,23 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
return status;
}
+static enum dc_status dpcd_128b_132b_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ enum dc_status status = core_link_write_dpcd(link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
+ sizeof(link_training_setting->dpcd_lane_settings));
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ return status;
+}
+
+
enum dc_status dpcd_set_lane_settings(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
@@ -964,16 +981,6 @@ enum dc_status dpcd_set_lane_settings(
link_training_setting->link_settings.lane_count);
if (is_repeater(link_training_setting, offset)) {
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- offset,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
@@ -985,14 +992,6 @@ enum dc_status dpcd_set_lane_settings(
link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
lane0_set_address,
@@ -2023,7 +2022,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
result = DP_128b_132b_LT_FAILED;
} else {
dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
- dpcd_set_lane_settings(link, lt_settings, DPRX);
+ dpcd_128b_132b_set_lane_settings(link, lt_settings);
}
loop_count++;
}
@@ -5090,6 +5089,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
(dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
ASSERT(0);
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
@@ -5098,6 +5098,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
if (is_lttpr_present)
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
return is_lttpr_present;
}
@@ -5134,6 +5135,7 @@ void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
} else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
*override = LTTPR_MODE_NON_LTTPR;
}
+ DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override));
}
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
@@ -5146,22 +5148,34 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
return LTTPR_MODE_NON_LTTPR;
if (vbios_lttpr_aware) {
- if (vbios_lttpr_force_non_transparent)
+ if (vbios_lttpr_force_non_transparent) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
return LTTPR_MODE_NON_TRANSPARENT;
- else
+ } else {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
return LTTPR_MODE_TRANSPARENT;
+ }
}
if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- link->dc->caps.extended_aux_timeout_support)
+ link->dc->caps.extended_aux_timeout_support) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n");
return LTTPR_MODE_NON_TRANSPARENT;
+ }
+ DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n");
return LTTPR_MODE_NON_LTTPR;
}
enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
{
- return dp_is_lttpr_present(link) ? LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_NON_LTTPR;
+ enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR;
+
+ if (dp_is_lttpr_present(link))
+ mode = LTTPR_MODE_NON_TRANSPARENT;
+
+ DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode);
+ return mode;
}
static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
@@ -5179,9 +5193,10 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
link->dc, link->link_enc->transmitter);
if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
- cmd.cable_id.header.ret_status == 1)
+ cmd.cable_id.header.ret_status == 1) {
cable_id->raw = cmd.cable_id.data.output_raw;
-
+ DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
+ }
return cmd.cable_id.header.ret_status == 1;
}
@@ -5228,6 +5243,7 @@ static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout
lttpr_present = dp_is_lttpr_present(link) ||
(!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support);
+ DC_LOG_DC("lttpr_present = %d.\n", lttpr_present ? 1 : 0);
/* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to
* be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read.
@@ -5795,7 +5811,7 @@ void detect_edp_sink_caps(struct dc_link *link)
* Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
*/
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
- (link->dc->debug.optimize_edp_link_rate ||
+ (link->panel_config.ilr.optimize_edp_link_rate ||
link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
@@ -6744,7 +6760,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
ASSERT(link || crtc_timing); // invalid input
if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
- !link->dc->debug.optimize_edp_link_rate)
+ !link->panel_config.ilr.optimize_edp_link_rate)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8ee0d946bb2f..fd8db482e56f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1747,7 +1747,6 @@ bool dc_remove_plane_from_context(
for (i = 0; i < stream_status->plane_count; i++) {
if (stream_status->plane_states[i] == plane_state) {
-
dc_plane_state_release(stream_status->plane_states[i]);
break;
}
@@ -3683,4 +3682,56 @@ bool is_h_timing_divisible_by_2(struct dc_stream_state *stream)
(stream->timing.h_sync_width % 2 == 0);
}
return divisible;
+}
+
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ const struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm)
+{
+ int pipe_idx = sec_pipe->pipe_idx;
+ struct pipe_ctx *sec_top, *sec_bottom, *sec_next, *sec_prev;
+ const struct resource_pool *pool = dc->res_pool;
+
+ sec_top = sec_pipe->top_pipe;
+ sec_bottom = sec_pipe->bottom_pipe;
+ sec_next = sec_pipe->next_odm_pipe;
+ sec_prev = sec_pipe->prev_odm_pipe;
+
+ *sec_pipe = *pri_pipe;
+
+ sec_pipe->top_pipe = sec_top;
+ sec_pipe->bottom_pipe = sec_bottom;
+ sec_pipe->next_odm_pipe = sec_next;
+ sec_pipe->prev_odm_pipe = sec_prev;
+
+ sec_pipe->pipe_idx = pipe_idx;
+ sec_pipe->plane_res.mi = pool->mis[pipe_idx];
+ sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
+ sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
+ sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
+ sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
+ sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
+ sec_pipe->stream_res.dsc = NULL;
+ if (odm) {
+ if (!sec_pipe->top_pipe)
+ sec_pipe->stream_res.opp = pool->opps[pipe_idx];
+ else
+ sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
+ if (sec_pipe->stream->timing.flags.DSC == 1) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+#endif
+ ASSERT(sec_pipe->stream_res.dsc);
+ if (sec_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
+#endif
+ }
+
+ return true;
} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index ae13887756bf..38d71b5c1f2d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -276,6 +276,8 @@ static void program_cursor_attributes(
}
dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
@@ -382,6 +384,8 @@ static void program_cursor_position(
}
dc->hwss.set_cursor_position(pipe_ctx);
+
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
if (pipe_to_program)
@@ -520,9 +524,9 @@ bool dc_stream_remove_writeback(struct dc *dc,
}
/* remove writeback info for disabled writeback pipes from stream */
- for (i = 0, j = 0; i < stream->num_wb_info && j < MAX_DWB_PIPES; i++) {
+ for (i = 0, j = 0; i < stream->num_wb_info; i++) {
if (stream->writeback_info[i].wb_enabled) {
- if (i != j)
+ if (j < i)
/* trim the array */
stream->writeback_info[j] = stream->writeback_info[i];
j++;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 2ecf36e6329b..bfc5474c0f4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.205"
+#define DC_VER "3.2.207"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -821,7 +821,6 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool disable_fams;
- bool optimize_edp_link_rate; /* eDP ILR */
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
@@ -1192,6 +1191,8 @@ struct dc_plane_state {
enum dc_irq_source irq_source;
struct kref refcount;
struct tg_color visual_confirm_color;
+
+ bool is_statically_allocated;
};
struct dc_plane_info {
@@ -1611,6 +1612,9 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint8_t mst_alloc_slots,
uint8_t *mst_slots_in_use);
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable);
+
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 89d7d3fd3321..0541e87e4f38 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -30,6 +30,7 @@
#include "dc_hw_types.h"
#include "core_types.h"
#include "../basics/conversion.h"
+#include "cursor_reg_cache.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -780,7 +781,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
// Store the original watermark value for this SubVP config so we can lower it when the
// MCLK switch starts
wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
- dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 / 1000;
+ (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
}
@@ -880,3 +881,147 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
diag_data.is_cw0_enabled,
diag_data.is_cw6_enabled);
}
+
+static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state != NULL) {
+ if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ return false;
+ }
+
+ if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+ pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
+ pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
+ return true;
+
+ return false;
+}
+
+static void dc_build_cursor_update_payload0(
+ struct pipe_ctx *pipe_ctx, uint8_t p_idx,
+ struct dmub_cmd_update_cursor_payload0 *payload)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ unsigned int panel_inst = 0;
+
+ if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
+ pipe_ctx->stream->link, &panel_inst))
+ return;
+
+ /* Payload: Cursor Rect is built from position & attribute
+ * x & y are obtained from postion
+ */
+ payload->cursor_rect.x = hubp->cur_rect.x;
+ payload->cursor_rect.y = hubp->cur_rect.y;
+ /* w & h are obtained from attribute */
+ payload->cursor_rect.width = hubp->cur_rect.w;
+ payload->cursor_rect.height = hubp->cur_rect.h;
+
+ payload->enable = hubp->pos.cur_ctl.bits.cur_enable;
+ payload->pipe_idx = p_idx;
+ payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+ payload->panel_inst = panel_inst;
+}
+
+static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv,
+ union dmub_rb_cmd *cmd)
+{
+ dc_dmub_srv_cmd_queue(dmub_srv, cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
+}
+
+static void dc_build_cursor_position_update_payload0(
+ struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
+ const struct hubp *hubp, const struct dpp *dpp)
+{
+ /* Hubp */
+ pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw;
+ pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
+ pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
+ pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
+
+ /* dpp */
+ pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
+ pl->position_cfg.pipe_idx = p_idx;
+}
+
+static void dc_build_cursor_attribute_update_payload1(
+ struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
+ const struct hubp *hubp, const struct dpp *dpp)
+{
+ /* Hubp */
+ pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
+ pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw;
+ pl_A->aHubp.size.raw = hubp->att.size.raw;
+ pl_A->aHubp.settings.raw = hubp->att.settings.raw;
+
+ /* dpp */
+ pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
+}
+
+/**
+ * ***************************************************************************************
+ * dc_send_update_cursor_info_to_dmu: Populate the DMCUB Cursor update info command
+ *
+ * This function would store the cursor related information and pass it into dmub
+ *
+ * @param [in] pCtx: pipe context
+ * @param [in] pipe_idx: pipe index
+ *
+ * @return: void
+ *
+ * ***************************************************************************************
+ */
+
+void dc_send_update_cursor_info_to_dmu(
+ struct pipe_ctx *pCtx, uint8_t pipe_idx)
+{
+ union dmub_rb_cmd cmd = { 0 };
+ union dmub_cmd_update_cursor_info_data *update_cursor_info =
+ &cmd.update_cursor_info.update_cursor_info_data;
+
+ if (!dc_dmub_should_update_cursor_data(pCtx))
+ return;
+ /*
+ * Since we use multi_cmd_pending for dmub command, the 2nd command is
+ * only assigned to store cursor attributes info.
+ * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
+ * is to store cursor position info.
+ *
+ * Command heaer type must be the same type if using multi_cmd_pending.
+ * Besides, while process 2nd command in DMU, the sub type is useless.
+ * So it's meanless to pass the sub type header with different type.
+ */
+
+ {
+ /* Build Payload#0 Header */
+ cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd.update_cursor_info.header.payload_bytes =
+ sizeof(cmd.update_cursor_info.update_cursor_info_data);
+ cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */
+
+ /* Prepare Payload */
+ dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0);
+
+ dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx,
+ pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+ /* Send update_curosr_info to queue */
+ dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd);
+ }
+ {
+ /* Build Payload#1 Header */
+ memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data));
+ cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
+ cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */
+
+ dc_build_cursor_attribute_update_payload1(
+ &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
+ pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+
+ /* Combine 2nd cmds update_curosr_info to DMU */
+ dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 7e438345b1a8..d34f5563df2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -88,4 +88,5 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu
void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
+void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index bf5f9e2773bc..caf0c7af2d0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -138,6 +138,10 @@ struct dc_panel_config {
bool disable_dsc_edp;
unsigned int force_dsc_edp_policy;
} dsc;
+ /* eDP ILR */
+ struct ilr {
+ bool optimize_edp_link_rate; /* eDP ILR */
+ } ilr;
};
/*
* A link contains one or more sinks and their connected status.
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 32782ef9ef77..140297c8ff55 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -942,10 +942,6 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_RET_ERROR_ENGINE_ACQUIRE:
case AUX_RET_ERROR_UNKNOWN:
default:
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
- LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: Failure: operation_result=%d",
- (int)operation_result);
goto fail;
}
}
@@ -953,14 +949,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
fail:
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
LOG_FLAG_Error_I2cAux,
- "dce_aux_transfer_with_retries: FAILURE");
+ "%s: Failure: operation_result=%d",
+ __func__,
+ (int)operation_result);
if (!payload_reply)
payload->reply = NULL;
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
- WPP_BIT_FLAG_DC_ERROR,
- "AUX transaction failed. Result: %d",
- operation_result);
-
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 897f412f539e..b9765b3899e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -469,6 +469,7 @@ void dpp1_set_cursor_position(
REG_UPDATE(CURSOR0_CONTROL,
CUR0_ENABLE, cur_en);
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 72521749c01d..11e4c4e46947 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2244,6 +2244,9 @@ void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Setting up OTG reset trigger\n");
for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -2254,13 +2257,21 @@ void dcn10_enable_timing_synchronization(
for (i = 0; i < group_size; i++) {
if (grouped_pipes[i]->stream == NULL)
continue;
+
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream->vblank_synchronized = false;
}
- for (i = 1; i < group_size; i++)
+ for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
grouped_pipes[i]->stream_res.tg,
grouped_pipes[0]->stream_res.tg->inst);
+ }
DC_SYNC_INFO("Waiting for trigger\n");
@@ -2268,12 +2279,21 @@ void dcn10_enable_timing_synchronization(
* synchronized. Look at last pipe programmed to reset.
*/
- wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
- for (i = 1; i < group_size; i++)
+ if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
+
+ for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
grouped_pipes[i]->stream_res.tg);
+ }
for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -3005,6 +3025,7 @@ void dcn10_prepare_bandwidth(
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3027,8 +3048,11 @@ void dcn10_prepare_bandwidth(
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
@@ -3041,6 +3065,7 @@ void dcn10_optimize_bandwidth(
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3064,8 +3089,11 @@ void dcn10_optimize_bandwidth(
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
@@ -3344,127 +3372,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
return false;
}
-static bool dcn10_dmub_should_update_cursor_data(
- struct pipe_ctx *pipe_ctx,
- struct dc_debug_options *debug)
-{
- if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
- return false;
-
- if (dcn10_can_pipe_disable_cursor(pipe_ctx))
- return false;
-
- if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- && pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
- return true;
-
- return false;
-}
-
-static void dcn10_dmub_update_cursor_data(
- struct pipe_ctx *pipe_ctx,
- struct hubp *hubp,
- const struct dc_cursor_mi_param *param,
- const struct dc_cursor_position *cur_pos,
- const struct dc_cursor_attributes *cur_attr)
-{
- union dmub_rb_cmd cmd;
- struct dmub_cmd_update_cursor_info_data *update_cursor_info;
- const struct dc_cursor_position *pos;
- const struct dc_cursor_attributes *attr;
- int src_x_offset = 0;
- int src_y_offset = 0;
- int x_hotspot = 0;
- int cursor_height = 0;
- int cursor_width = 0;
- uint32_t cur_en = 0;
- unsigned int panel_inst = 0;
-
- struct dc_debug_options *debug = &hubp->ctx->dc->debug;
-
- if (!dcn10_dmub_should_update_cursor_data(pipe_ctx, debug))
- return;
- /**
- * if cur_pos == NULL means the caller is from cursor_set_attribute
- * then driver use previous cursor position data
- * if cur_attr == NULL means the caller is from cursor_set_position
- * then driver use previous cursor attribute
- * if cur_pos or cur_attr is not NULL then update it
- */
- if (cur_pos != NULL)
- pos = cur_pos;
- else
- pos = &hubp->curs_pos;
-
- if (cur_attr != NULL)
- attr = cur_attr;
- else
- attr = &hubp->curs_attr;
-
- if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, pipe_ctx->stream->link, &panel_inst))
- return;
-
- src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
- x_hotspot = pos->x_hotspot;
- cursor_height = (int)attr->height;
- cursor_width = (int)attr->width;
- cur_en = pos->enable ? 1:0;
-
- // Rotated cursor width/height and hotspots tweaks for offset calculation
- if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- swap(cursor_height, cursor_width);
- if (param->rotation == ROTATION_ANGLE_90) {
- src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
- }
- } else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
-
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
- }
-
- if (src_x_offset >= (int)param->viewport.width)
- cur_en = 0; /* not visible beyond right edge*/
-
- if (src_x_offset + cursor_width <= 0)
- cur_en = 0; /* not visible beyond left edge*/
-
- if (src_y_offset >= (int)param->viewport.height)
- cur_en = 0; /* not visible beyond bottom edge*/
-
- if (src_y_offset + cursor_height <= 0)
- cur_en = 0; /* not visible beyond top edge*/
-
- // Cursor bitmaps have different hotspot values
- // There's a possibility that the above logic returns a negative value, so we clamp them to 0
- if (src_x_offset < 0)
- src_x_offset = 0;
- if (src_y_offset < 0)
- src_y_offset = 0;
-
- memset(&cmd, 0x0, sizeof(cmd));
- cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
- cmd.update_cursor_info.header.payload_bytes =
- sizeof(cmd.update_cursor_info.update_cursor_info_data);
- update_cursor_info = &cmd.update_cursor_info.update_cursor_info_data;
- update_cursor_info->cursor_rect.x = src_x_offset + param->viewport.x;
- update_cursor_info->cursor_rect.y = src_y_offset + param->viewport.y;
- update_cursor_info->cursor_rect.width = attr->width;
- update_cursor_info->cursor_rect.height = attr->height;
- update_cursor_info->enable = cur_en;
- update_cursor_info->pipe_idx = pipe_ctx->pipe_idx;
- update_cursor_info->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
- update_cursor_info->panel_inst = panel_inst;
- dc_dmub_srv_cmd_queue(pipe_ctx->stream->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(pipe_ctx->stream->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(pipe_ctx->stream->ctx->dmub_srv);
-}
-
void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -3699,7 +3606,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
}
- dcn10_dmub_update_cursor_data(pipe_ctx, hubp, &param, &pos_cpy, NULL);
hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
}
@@ -3707,25 +3613,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
- struct dc_cursor_mi_param param = { 0 };
-
- /**
- * If enter PSR without cursor attribute update
- * the cursor attribute of dmub_restore_plane
- * are initial value. call dmub to exit PSR and
- * restore plane then update cursor attribute to
- * avoid override with initial value
- */
- if (pipe_ctx->plane_state != NULL) {
- param.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- param.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz;
- param.viewport = pipe_ctx->plane_res.scl_data.viewport;
- param.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz;
- param.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert;
- param.rotation = pipe_ctx->plane_state->rotation;
- param.mirror = pipe_ctx->plane_state->horizontal_mirror;
- dcn10_dmub_update_cursor_data(pipe_ctx, pipe_ctx->plane_res.hubp, &param, NULL, attributes);
- }
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
pipe_ctx->plane_res.hubp, attributes);
@@ -3810,28 +3697,14 @@ void dcn10_calc_vupdate_position(
uint32_t *start_line,
uint32_t *end_line)
{
- const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
- int vline_int_offset_from_vupdate =
- pipe_ctx->stream->periodic_interrupt.lines_offset;
- int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- int start_position;
-
- if (vline_int_offset_from_vupdate > 0)
- vline_int_offset_from_vupdate--;
- else if (vline_int_offset_from_vupdate < 0)
- vline_int_offset_from_vupdate++;
-
- start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
+ const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- if (start_position >= 0)
- *start_line = start_position;
+ if (vupdate_pos >= 0)
+ *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
else
- *start_line = dc_crtc_timing->v_total + start_position - 1;
-
- *end_line = *start_line + 2;
-
- if (*end_line >= dc_crtc_timing->v_total)
- *end_line = 2;
+ *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
+ *end_line = (*start_line + 2) % timing->v_total;
}
static void dcn10_cal_vline_position(
@@ -3840,23 +3713,27 @@ static void dcn10_cal_vline_position(
uint32_t *start_line,
uint32_t *end_line)
{
- switch (pipe_ctx->stream->periodic_interrupt.ref_point) {
- case START_V_UPDATE:
- dcn10_calc_vupdate_position(
- dc,
- pipe_ctx,
- start_line,
- end_line);
- break;
- case START_V_SYNC:
+ const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
+
+ if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
+ if (vline_pos > 0)
+ vline_pos--;
+ else if (vline_pos < 0)
+ vline_pos++;
+
+ vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+ if (vline_pos >= 0)
+ *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
+ else
+ *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
+ *end_line = (*start_line + 2) % timing->v_total;
+ } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
// vsync is line 0 so start_line is just the requested line offset
- *start_line = pipe_ctx->stream->periodic_interrupt.lines_offset;
- *end_line = *start_line + 2;
- break;
- default:
+ *start_line = vline_pos;
+ *end_line = (*start_line + 2) % timing->v_total;
+ } else
ASSERT(0);
- break;
- }
}
void dcn10_setup_periodic_interrupt(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index ea7739255119..33d780218790 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -207,10 +207,7 @@ void optc1_program_timing(
/* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and
* OTG_V_TOTAL_MIN are equal to V_TOTAL.
*/
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, v_total);
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, v_total);
+ optc->funcs->set_vtotal_min_max(optc, v_total, v_total);
/* v_sync_start = 0, v_sync_end = v_sync_width */
v_sync_end = patched_crtc_timing.v_sync_width;
@@ -649,13 +646,6 @@ uint32_t optc1_get_vblank_counter(struct timing_generator *optc)
void optc1_lock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t regval = 0;
-
- regval = REG_READ(OTG_CONTROL);
-
- /* otg is not running, do not need to be locked */
- if ((regval & 0x1) == 0x0)
- return;
REG_SET(OTG_GLOBAL_CONTROL0, 0,
OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
@@ -663,12 +653,10 @@ void optc1_lock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 1);
/* Should be fast, status does not update on maximus */
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) {
-
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
- }
}
void optc1_unlock(struct timing_generator *optc)
@@ -679,16 +667,6 @@ void optc1_unlock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 0);
}
-bool optc1_is_locked(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t locked;
-
- REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &locked);
-
- return (locked == 1);
-}
-
void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
@@ -941,11 +919,7 @@ void optc1_set_drr(
}
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, params->vertical_total_max - 1);
-
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, params->vertical_total_min - 1);
+ optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
@@ -964,11 +938,7 @@ void optc1_set_drr(
OTG_V_TOTAL_MAX_SEL, 0,
OTG_FORCE_LOCK_ON_EVENT, 0);
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, 0);
-
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, 0);
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
}
@@ -1583,11 +1553,11 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc1_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = NULL,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.set_test_pattern = optc1_set_test_pattern,
.program_stereo = optc1_program_stereo,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 6323ca6dc3b3..88ac5f6f4c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -654,7 +654,6 @@ void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking);
bool optc1_is_blanked(struct timing_generator *optc);
-bool optc1_is_locked(struct timing_generator *optc);
void optc1_program_blank_color(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 831080b9eb87..56d30baf12df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1336,6 +1336,21 @@ static noinline void dcn10_resource_construct_fp(
}
}
+static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+{
+ int i;
+
+ if (clks->num_levels == 0)
+ return false;
+
+ for (i = 0; i < clks->num_levels; i++)
+ /* Ensure that the result is sane */
+ if (clks->data[i].clocks_in_khz == 0)
+ return false;
+
+ return true;
+}
+
static bool dcn10_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
@@ -1345,6 +1360,9 @@ static bool dcn10_resource_construct(
int j;
struct dc_context *ctx = dc->ctx;
uint32_t pipe_fuses = read_pipe_fuses(ctx);
+ struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
+ bool res;
ctx->dc_bios->regs = &bios_regs;
@@ -1523,15 +1541,53 @@ static bool dcn10_resource_construct(
&& pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
dc->debug.az_endpoint_mute_only = false;
- DC_FP_START();
- if (!dc->debug.disable_pplib_clock_request)
- dcn_bw_update_from_pplib(dc);
+
+ if (!dc->debug.disable_pplib_clock_request) {
+ /*
+ * TODO: This is not the proper way to obtain
+ * fabric_and_dram_bandwidth, should be min(fclk, memclk).
+ */
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
+
+ DC_FP_START();
+
+ if (res)
+ res = verify_clock_values(&fclks);
+
+ if (res)
+ dcn_bw_update_from_pplib_fclks(dc, &fclks);
+ else
+ BREAK_TO_DEBUGGER();
+
+ DC_FP_END();
+
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
+
+ DC_FP_START();
+
+ if (res)
+ res = verify_clock_values(&dcfclks);
+
+ if (res)
+ dcn_bw_update_from_pplib_dcfclks(dc, &dcfclks);
+ else
+ BREAK_TO_DEBUGGER();
+
+ DC_FP_END();
+ }
+
dcn_bw_sync_calcs_and_dml(dc);
if (!dc->debug.disable_pplib_wm_range) {
dc->res_pool = &pool->base;
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ DC_FP_START();
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
+ DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
- DC_FP_END();
{
struct irq_service_init_data init_data;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index b1ec0e6f7f58..4996d2810edb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -617,6 +617,17 @@ void hubp2_cursor_set_attributes(
CURSOR0_DST_Y_OFFSET, 0,
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
}
void hubp2_dmdata_set_attributes(
@@ -1033,6 +1044,25 @@ void hubp2_cursor_set_position(
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
+ /* Cursor Position Register Config */
+ hubp->pos.cur_ctl.bits.cur_enable = cur_en;
+ hubp->pos.position.bits.x_pos = pos->x;
+ hubp->pos.position.bits.y_pos = pos->y;
+ hubp->pos.hot_spot.bits.x_hot = x_hotspot;
+ hubp->pos.hot_spot.bits.y_hot = y_hotspot;
+ hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
+ /* Cursor Rectangle Cache
+ * Cursor bitmaps have different hotspot values
+ * There's a possibility that the above logic returns a negative value,
+ * so we clamp them to 0
+ */
+ if (src_x_offset < 0)
+ src_x_offset = 0;
+ if (src_y_offset < 0)
+ src_y_offset = 0;
+ /* Save necessary cursor info x, y position. w, h is saved in attribute func. */
+ hubp->cur_rect.x = src_x_offset + param->viewport.x;
+ hubp->cur_rect.y = src_y_offset + param->viewport.y;
}
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e1d271fe9e64..d732b6f031a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1862,24 +1862,6 @@ void dcn20_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpcc_pipe;
-
- if (pipe->vtp_locked) {
- dc->hwseq->funcs.wait_for_blank_complete(pipe->stream_res.opp);
- pipe->plane_res.hubp->funcs->set_blank(pipe->plane_res.hubp, true);
- pipe->vtp_locked = false;
-
- for (mpcc_pipe = pipe->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
- mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* If an active, non-phantom pipe is being transitioned into a phantom
@@ -2018,6 +2000,10 @@ void dcn20_optimize_bandwidth(
context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ /* increase compbuf size */
+ if (hubbub->funcs->program_compbuf_size)
+ hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
@@ -2033,9 +2019,6 @@ void dcn20_optimize_bandwidth(
pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
}
}
- /* increase compbuf size */
- if (hubbub->funcs->program_compbuf_size)
- hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
}
bool dcn20_update_bandwidth(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 0340fdd3f5fb..a08c335b7383 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -529,6 +529,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 5752271f22df..c5e200d09038 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,15 +67,9 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active, prefetch_done;
+ uint32_t riommu_active;
int i;
- REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
-
- if (prefetch_done) {
- hubbub->riommu_active = true;
- return;
- }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 7cb35bb1c0f1..887081472c0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -657,7 +657,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
- .optimize_edp_link_rate = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -677,6 +676,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
@@ -1367,6 +1372,11 @@ static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
+static void dcn21_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
#define CTX ctx
#define REG(reg_name) \
@@ -1408,6 +1418,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
+ .get_panel_config_defaults = dcn21_get_panel_config_defaults,
};
static bool dcn21_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 4a668d6563df..e5b7ef7422b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -372,6 +372,10 @@ void dpp3_set_cursor_attributes(
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
+
+ dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
+ dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en;
+ dpp_base->att.cur0_ctl.bits.mode = color_format;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 1782b9c26cf4..892d3c4d01a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -319,13 +319,13 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
@@ -366,4 +366,3 @@ void dcn30_timing_generator_init(struct optc *optc1)
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 3a3b2ac791c7..020f512e9690 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1655,6 +1655,9 @@ noinline bool dcn30_internal_validate_bw(
if (!pipes)
return false;
+ context->bw_ctx.dml.vba.maxMpcComb = 0;
+ context->bw_ctx.dml.vba.VoltageLevel = 0;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
@@ -1873,6 +1876,7 @@ noinline bool dcn30_internal_validate_bw(
if (repopulate_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ context->bw_ctx.dml.vba.VoltageLevel = vlevel;
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 559e563d5bc1..f04595b750ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -852,7 +852,7 @@ static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
vmid->masks = &vmid_masks;
}
- hubbub3->num_vmid = res_cap_dcn301.num_vmid;
+ hubbub3->num_vmid = res_cap_dcn301.num_vmid;
return &hubbub3->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 52fb2bf3d578..814f401db3b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -197,7 +197,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
uint32_t h_back_porch;
uint32_t h_width;
uint32_t v_height;
- unsigned long long v_freq;
+ uint64_t v_freq;
uint8_t misc0 = 0;
uint8_t misc1 = 0;
uint8_t hsp;
@@ -360,7 +360,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80;
vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;
- v_freq = hw_crtc_timing.pix_clk_100hz * 100;
+ v_freq = (uint64_t)hw_crtc_timing.pix_clk_100hz * 100;
/* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
*
@@ -436,32 +436,28 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
{
struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
uint32_t dmdata_packet_enabled = 0;
- bool sdp_stream_enable = false;
- if (info_frame->vsc.valid) {
+ if (info_frame->vsc.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
0, /* packetIndex */
&info_frame->vsc,
true);
- sdp_stream_enable = true;
- }
- if (info_frame->spd.valid) {
+
+ if (info_frame->spd.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
2, /* packetIndex */
&info_frame->spd,
true);
- sdp_stream_enable = true;
- }
- if (info_frame->hdrsmd.valid) {
+
+ if (info_frame->hdrsmd.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
3, /* packetIndex */
&info_frame->hdrsmd,
true);
- sdp_stream_enable = true;
- }
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index 2f7404a97479..63a677c8ee27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -201,7 +201,6 @@ void optc31_set_drr(
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
-
} else {
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
@@ -260,7 +259,6 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 8c1a6fb36306..fddc21a5a04c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -888,9 +888,8 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.disable_z10 = true,
- .optimize_edp_link_rate = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
+ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -911,6 +910,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1803,6 +1808,11 @@ validate_out:
return out;
}
+static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1829,6 +1839,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn31_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn31_get_panel_config_defaults,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 0d2ffb692957..7e773bf7b895 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -262,7 +262,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
-void enc314_stream_encoder_dp_blank(
+static void enc314_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 24ec71cbd3e3..d0ad72caead2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -881,7 +881,8 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_z10 = true, /*hw not support it*/
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
@@ -914,7 +915,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
.seamless_boot_odm_combine = true
};
@@ -936,6 +936,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1675,6 +1681,11 @@ static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
DC_FP_END();
}
+static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct resource_funcs dcn314_res_pool_funcs = {
.destroy = dcn314_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1697,6 +1708,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn314_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn314_get_panel_config_defaults,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index eebb42c9ddd6..58746c437554 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
.psr_power_use_phy_fsm = 0,
};
@@ -907,6 +906,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1708,6 +1713,11 @@ static int dcn315_populate_dml_pipes_from_context(
return pipe_cnt;
}
+static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1721,7 +1731,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.panel_cntl_create = dcn31_panel_cntl_create,
.validate_bandwidth = dcn31_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
- .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .update_soc_for_wm_a = dcn315_update_soc_for_wm_a,
.populate_dml_pipes = dcn315_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1734,6 +1744,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn315_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn315_get_panel_config_defaults,
};
static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index f4b52a35ad84..6b40a11ac83a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -906,6 +905,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1710,6 +1715,11 @@ static int dcn316_populate_dml_pipes_from_context(
return pipe_cnt;
}
+static void dcn316_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1736,6 +1746,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn316_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn316_get_panel_config_defaults,
};
static bool dcn316_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index fdae6aa89908..076969d928af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -150,12 +150,6 @@ static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
}
-void enc32_set_dig_output_mode(struct link_encoder *enc, uint8_t pix_per_container)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container);
-}
-
static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
@@ -186,7 +180,6 @@ static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.is_in_alt_mode = dcn32_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn32_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
- .set_dig_output_mode = enc32_set_dig_output_mode,
};
void dcn32_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index 749a1e8cb811..bbcfce06bec0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -53,8 +53,4 @@ void dcn32_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
-void enc32_set_dig_output_mode(
- struct link_encoder *enc,
- uint8_t pix_per_container);
-
#endif /* __DC_LINK_ENCODER__DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 0e9dce414641..d19fc93dbc75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -243,6 +243,39 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+static bool is_h_timing_divisible_by_2(const struct dc_crtc_timing *timing)
+{
+ /* math borrowed from function of same name in inc/resource
+ * checks if h_timing is divisible by 2
+ */
+
+ bool divisible = false;
+ uint16_t h_blank_start = 0;
+ uint16_t h_blank_end = 0;
+
+ if (timing) {
+ h_blank_start = timing->h_total - timing->h_front_porch;
+ h_blank_end = h_blank_start - timing->h_addressable;
+
+ /* HTOTAL, Hblank start/end, and Hsync start/end all must be
+ * divisible by 2 in order for the horizontal timing params
+ * to be considered divisible by 2. Hsync start is always 0.
+ */
+ divisible = (timing->h_total % 2 == 0) &&
+ (h_blank_start % 2 == 0) &&
+ (h_blank_end % 2 == 0) &&
+ (timing->h_sync_width % 2 == 0);
+ }
+ return divisible;
+}
+
+static bool is_dp_dig_pixel_rate_div_policy(struct dc *dc, const struct dc_crtc_timing *timing)
+{
+ /* should be functionally the same as dcn32_is_dp_dig_pixel_rate_div_policy for DP encoders*/
+ return is_h_timing_divisible_by_2(timing) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy;
+}
+
static void enc32_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -259,7 +292,7 @@ static void enc32_stream_encoder_dp_unblank(
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1
- || dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ || is_dp_dig_pixel_rate_div_policy(dc, &param->timing)) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
}
@@ -355,7 +388,7 @@ static void enc32_dp_set_dsc_config(struct stream_encoder *enc,
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode);
+ REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
}
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
@@ -378,24 +411,6 @@ static void enc32_read_state(struct stream_encoder *enc, struct enc_state *s)
}
}
-static void enc32_stream_encoder_reset_fifo(struct stream_encoder *enc)
-{
- struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t fifo_enabled;
-
- REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &fifo_enabled);
-
- if (fifo_enabled == 0) {
- /* reset DIG resync FIFO */
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
- /* TODO: fix timeout when wait for DIG_FIFO_RESET_DONE */
- //REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 1, 100);
- udelay(1);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
- REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 1, 100);
- }
-}
-
static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -425,8 +440,6 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc32_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index 250d9a341cf6..ecd041a446d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -71,7 +71,9 @@
SRI(DP_MSE_RATE_UPDATE, DP, id), \
SRI(DP_PIXEL_FORMAT, DP, id), \
SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_SEC_CNTL2, DP, id), \
+ SRI(DP_SEC_CNTL5, DP, id), \
SRI(DP_SEC_CNTL6, DP, id), \
SRI(DP_STEER_FIFO, DP, id), \
SRI(DP_VID_M, DP, id), \
@@ -93,7 +95,7 @@
SRI(DIG_FIFO_CTRL0, DIG, id)
-#define SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)\
+#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, mask_sh),\
@@ -106,6 +108,7 @@
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
@@ -244,15 +247,6 @@
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
- SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh),\
- SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh)
-#else
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
- SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)
-#endif
-
void dcn32_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
index 9db1323e1933..176b1537d2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
@@ -47,6 +47,7 @@
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index f6d3da475835..9fbb72369c10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -936,6 +936,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.program_watermarks = hubbub32_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub32_init_watermarks,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 2038cbda33f7..ac1c6458dd55 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -79,6 +79,8 @@ void hubp32_phantom_hubp_post_enable(struct hubp *hubp)
uint32_t reg_val;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ /* For phantom pipe enable, disable GSL */
+ REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, 0);
REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, 1);
reg_val = REG_READ(DCHUBP_CNTL);
if (reg_val) {
@@ -179,12 +181,12 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_init = hubp3_init,
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp32_update_mall_sel,
- .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
- .hubp_set_flip_int = hubp1_set_flip_int
+ .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index a750343ca521..cf5bd9713f54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -206,8 +206,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
*/
static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
{
- uint8_t i;
- int j;
+ int i, j;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
uint32_t cursor_size = 0;
@@ -630,10 +629,9 @@ bool dcn32_set_input_transfer_func(struct dc *dc,
params = &dpp_base->degamma_params;
}
- result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
+ dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
- if (result &&
- pipe_ctx->stream_res.opp &&
+ if (pipe_ctx->stream_res.opp &&
pipe_ctx->stream_res.opp->ctx &&
hws->funcs.set_mcm_luts)
result = hws->funcs.set_mcm_luts(pipe_ctx, plane_state);
@@ -991,6 +989,10 @@ void dcn32_init_hw(struct dc *dc)
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
}
+
+ /* Enable support for ODM and windowed MPO if policy flag is set */
+ if (dc->debug.enable_single_display_2to1_odm_policy)
+ dc->config.enable_windowed_mpo_odm = true;
}
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
@@ -1145,23 +1147,25 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
true);
}
- // Don't program pixel clock after link is already enabled
-/* if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
- pipe_ctx->clock_source,
- &pipe_ctx->stream_res.pix_clk_params,
- &pipe_ctx->pll_settings)) {
- BREAK_TO_DEBUGGER();
- }*/
+ if (pipe_ctx->stream_res.dsc) {
+ struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
- if (pipe_ctx->stream_res.dsc)
update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
+
+ /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
+ if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
+ current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
+ struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
+ /* disconnect DSC block from stream */
+ dsc->funcs->dsc_disconnect(dsc);
+ }
+ }
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
bool two_pix_per_container = false;
// For phantom pipes, use the same programming as the main pipes
@@ -1189,7 +1193,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -1226,7 +1230,6 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
struct pipe_ctx *odm_pipe;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
uint32_t pix_per_cycle = 1;
params.opp_cnt = 1;
@@ -1245,7 +1248,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
pipe_ctx->stream_res.tg->inst);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1
- || dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) {
params.timing.pix_clk_100hz /= 2;
pix_per_cycle = 2;
}
@@ -1262,6 +1265,9 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+ return false;
+
if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy)
return true;
@@ -1394,7 +1400,7 @@ bool dcn32_dsc_pg_status(
break;
}
- return pwr_status == 0 ? true : false;
+ return pwr_status == 0;
}
void dcn32_update_dsc_pg(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index ec3989d37086..2b33eeb213e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -151,7 +151,7 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
- 1, 100000);
+ 1, 150000);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 05de97ea855f..a88dd7b3d1c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -1680,6 +1680,8 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
+ phantom_plane->is_phantom = true;
+
dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
curr_pipe = curr_pipe->bottom_pipe;
@@ -1749,6 +1751,10 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
pipe->stream->mall_stream_config.type = SUBVP_NONE;
pipe->stream->mall_stream_config.paired_stream = NULL;
}
+
+ if (pipe->plane_state) {
+ pipe->plane_state->is_phantom = false;
+ }
}
return removed_pipe;
}
@@ -1798,14 +1804,39 @@ bool dcn32_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ struct mall_temp_config mall_temp_config;
+
+ /* To handle Freesync properly, setting FreeSync DML parameters
+ * to its default state for the first stage of validation
+ */
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
+ context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
+
DC_LOGGER_INIT(dc->ctx->logger);
+ /* For fast validation, there are situations where a shallow copy of
+ * of the dc->current_state is created for the validation. In this case
+ * we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt
+ * to add it back if it's fast validation). If we don't restore the
+ * subvp config in cases of fast validation + shallow copy of the
+ * dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ */
+ if (fast_validate) {
+ memset(&mall_temp_config, 0, sizeof(mall_temp_config));
+ dcn32_save_mall_state(dc, context, &mall_temp_config);
+ }
+
BW_VAL_TRACE_COUNT();
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
+ if (fast_validate)
+ dcn32_restore_mall_state(dc, context, &mall_temp_config);
+
if (pipe_cnt == 0)
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 55945cca2260..f76120e67c16 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -45,6 +45,17 @@
extern struct _vcs_dpi_ip_params_st dcn3_2_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc;
+/* Temp struct used to save and restore MALL config
+ * during validation.
+ *
+ * TODO: Move MALL config into dc_state instead of stream struct
+ * to avoid needing to save/restore.
+ */
+struct mall_temp_config {
+ struct mall_stream_config mall_stream_config[MAX_PIPES];
+ bool is_phantom_plane[MAX_PIPES];
+};
+
struct dcn32_resource_pool {
struct resource_pool base;
};
@@ -108,6 +119,8 @@ bool dcn32_subvp_in_use(struct dc *dc,
bool dcn32_mpo_in_use(struct dc_state *context);
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
+
struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_state *state,
const struct resource_pool *pool,
@@ -120,6 +133,15 @@ void dcn32_determine_det_override(struct dc *dc,
void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes);
+
+void dcn32_save_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config);
+
+void dcn32_restore_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index a2a70a1572b7..d51d0c40ae5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -233,6 +233,23 @@ bool dcn32_mpo_in_use(struct dc_state *context)
return false;
}
+
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0)
+ return true;
+ }
+ return false;
+}
+
/**
* *******************************************************************************************
* dcn32_determine_det_override: Determine DET allocation for each pipe
@@ -363,3 +380,74 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
} else
dcn32_determine_det_override(dc, context, pipes);
}
+
+/**
+ * *******************************************************************************************
+ * dcn32_save_mall_state: Save MALL (SubVP) state for fast validation cases
+ *
+ * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
+ * there are situations where a shallow copy of the dc->current_state is created for the
+ * validation. In this case we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
+ * fast validation). If we don't restore the subvp config in cases of fast validation +
+ * shallow copy of the dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ *
+ * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
+ * validation. We don't expect this to happen in fast_validation=1 cases.
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ * @param [out]: temp_config: struct used to cache the existing MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_save_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream)
+ temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
+
+ if (pipe->plane_state)
+ temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
+ }
+}
+
+/**
+ * *******************************************************************************************
+ * dcn32_restore_mall_state: Restore MALL (SubVP) state for fast validation cases
+ *
+ * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in/out]: context: New DC state to be programmed, restore MALL state into here
+ * @param [in]: temp_config: struct that has the cached MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_restore_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream)
+ pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
+
+ if (pipe->plane_state)
+ pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index 49682a31ecbd..fa9b6603cfd3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -91,7 +91,6 @@ static const struct link_encoder_funcs dcn321_link_enc_funcs = {
.is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
- .set_dig_output_mode = enc32_set_dig_output_mode,
};
void dcn321_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index aed0f689cbbf..61087f2385a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -94,8 +94,6 @@
#include "dcn20/dcn20_vmid.h"
#define DC_LOGGER_INIT(logger)
-#define fixed16_to_double(x) (((double)x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
enum dcn321_clk_src_array_id {
DCN321_CLK_SRC_PLL0,
@@ -1606,7 +1604,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.validate_bandwidth = dcn32_validate_bandwidth,
.calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg,
.populate_dml_pipes = dcn32_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1656,7 +1654,7 @@ static bool dcn321_resource_construct(
#undef REG_STRUCT
#define REG_STRUCT dccg_regs
- dccg_regs_init();
+ dccg_regs_init();
ctx->dc_bios->regs = &bios_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index cb81ed2fbd53..d70838edba80 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -34,7 +34,7 @@ dml_ccflags := -mhard-float -maltivec
endif
ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+ifneq ($(call gcc-min-version, 70100),y)
IS_OLD_GCC = 1
endif
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index d46adc849d2a..e73f089c84bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1444,81 +1444,67 @@ unsigned int dcn_find_dcfclk_suits_all(
return dcf_clk;
}
-static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+void dcn_bw_update_from_pplib_fclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *fclks)
{
- int i;
-
- if (clks->num_levels == 0)
- return false;
-
- for (i = 0; i < clks->num_levels; i++)
- /* Ensure that the result is sane */
- if (clks->data[i].clocks_in_khz == 0)
- return false;
+ unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
- return true;
+ ASSERT(fclks->num_levels);
+
+ vmin0p65_idx = 0;
+ vmid0p72_idx = fclks->num_levels -
+ (fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
+ vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
+ vmax0p9_idx = fclks->num_levels - 1;
+
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+ 32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vmid0p72_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vnom0p8_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vmax0p9_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
}
-void dcn_bw_update_from_pplib(struct dc *dc)
+void dcn_bw_update_from_pplib_dcfclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *dcfclks)
{
- struct dc_context *ctx = dc->ctx;
- struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
- bool res;
- unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
-
- /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
- res = dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
-
- if (res)
- res = verify_clock_values(&fclks);
-
- if (res) {
- ASSERT(fclks.num_levels);
-
- vmin0p65_idx = 0;
- vmid0p72_idx = fclks.num_levels -
- (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
- vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
- vmax0p9_idx = fclks.num_levels - 1;
-
- dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
- 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- } else
- BREAK_TO_DEBUGGER();
-
- res = dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
-
- if (res)
- res = verify_clock_values(&dcfclks);
+ if (dcfclks->num_levels >= 3) {
+ dc->dcn_soc->dcfclkv_min0p65 = dcfclks->data[0].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_mid0p72 = dcfclks->data[dcfclks->num_levels - 3].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_nom0p8 = dcfclks->data[dcfclks->num_levels - 2].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_max0p9 = dcfclks->data[dcfclks->num_levels - 1].clocks_in_khz / 1000.0;
+ }
+}
- if (res && dcfclks.num_levels >= 3) {
- dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0;
- } else
- BREAK_TO_DEBUGGER();
+void dcn_get_soc_clks(
+ struct dc *dc,
+ int *min_fclk_khz,
+ int *min_dcfclk_khz,
+ int *socclk_khz)
+{
+ *min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+ *min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+ *socclk_khz = dc->dcn_soc->socclk * 1000;
}
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+void dcn_bw_notify_pplib_of_wm_ranges(
+ struct dc *dc,
+ int min_fclk_khz,
+ int min_dcfclk_khz,
+ int socclk_khz)
{
struct pp_smu_funcs_rv *pp = NULL;
struct pp_smu_wm_range_sets ranges = {0};
- int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
if (dc->res_pool->pp_smu)
@@ -1526,10 +1512,6 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
if (!pp || !pp->set_wm_ranges)
return;
- min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
- min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
- socclk_khz = dc->dcn_soc->socclk * 1000;
-
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
* Memory clock member variables for Watermarks calculations for each
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index b6e99eefe869..7dd0845d1bd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -292,6 +292,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
.num_chans = 4,
+ .dummy_pstate_latency_us = 10.0
};
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -459,13 +460,30 @@ void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
}
}
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+ dc_assert_fp_enabled();
+
+ if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
+ /* For 315 pstate change is only supported if possible in vactive */
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[context->bw_ctx.dml.vba.VoltageLevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_vactive)
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+ else
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
+ dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us =
+ dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_exit_time_us;
+ }
+}
+
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx;
+ int i, pipe_idx, active_dpp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
@@ -486,72 +504,6 @@ void dcn31_calculate_wm_and_dlg_fp(
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-#if 0 // TODO
- /* Set B:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set C:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Set D:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
-
/* Set A:
* All clocks min required
*
@@ -568,16 +520,17 @@ void dcn31_calculate_wm_and_dlg_fp(
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- /* TODO: remove: */
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- /* end remove*/
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ active_dpp_count++;
+
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -594,6 +547,9 @@ void dcn31_calculate_wm_and_dlg_fp(
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ /* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
+ context->bw_ctx.bw.dcn.clk.p_state_change_support =
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -739,7 +695,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
+ dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315);
else
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 4372f17b55d4..fd58b2561ec9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -35,6 +35,7 @@ void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt);
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 8dfe639b6508..b612edb14417 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -43,6 +43,8 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN31_MAX_DSC_IMAGE_WIDTH 5184
#define DCN31_MAX_FMT_420_BUFFER_WIDTH 4096
+#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_15_MAX_DET_SIZE 384
// For DML-C changes that hasn't been propagated to VBA yet
//#define __DML_VBA_ALLOW_DELTA__
@@ -3775,6 +3777,17 @@ static noinline void CalculatePrefetchSchedulePerPlane(
&v->VReadyOffsetPix[k]);
}
+static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
+{
+ int i, total_pipes = 0;
+ for (i = 0; i < NumberOfActivePlanes; i++)
+ total_pipes += NoOfDPPThisState[i];
+ *DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
+ if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
+ *DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
+}
+
+
void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
@@ -4533,6 +4546,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
+ if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
+ PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 0571700f53f9..819de0f11012 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -243,7 +243,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
}
-/**
+/*
* Finds dummy_latency_index when MCLK switching using firmware based
* vblank stretch is enabled. This function will iterate through the
* table of dummy pstate latencies until the lowest value that allows
@@ -290,15 +290,14 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
/**
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
* and populate pipe_ctx with those params.
- *
- * This function must be called AFTER the phantom pipes are added to context
- * and run through DML (so that the DLG params for the phantom pipes can be
- * populated), and BEFORE we program the timing for the phantom pipes.
- *
* @dc: [in] current dc state
* @context: [in] new dc state
* @pipes: [in] DML pipe params array
* @pipe_cnt: [in] DML pipe count
+ *
+ * This function must be called AFTER the phantom pipes are added to context
+ * and run through DML (so that the DLG params for the phantom pipes can be
+ * populated), and BEFORE we program the timing for the phantom pipes.
*/
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
struct dc_state *context,
@@ -331,8 +330,9 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
}
/**
- * *******************************************************************************************
- * dcn32_predict_pipe_split: Predict if pipe split will occur for a given DML pipe
+ * dcn32_predict_pipe_split - Predict if pipe split will occur for a given DML pipe
+ * @context: [in] New DC state to be programmed
+ * @pipe_e2e: [in] DML pipe end to end context
*
* This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both
* ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is
@@ -343,12 +343,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
* - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and
* MPC is required
*
- * @param [in]: context: New DC state to be programmed
- * @param [in]: pipe_e2e: DML pipe end to end context
- *
- * @return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
- *
- * *******************************************************************************************
+ * Return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
*/
uint8_t dcn32_predict_pipe_split(struct dc_state *context,
display_e2e_pipe_params_st *pipe_e2e)
@@ -504,7 +499,14 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
}
/**
- * dcn32_set_phantom_stream_timing: Set timing params for the phantom stream
+ * dcn32_set_phantom_stream_timing - Set timing params for the phantom stream
+ * @dc: current dc state
+ * @context: new dc state
+ * @ref_pipe: Main pipe for the phantom stream
+ * @phantom_stream: target phantom stream state
+ * @pipes: DML pipe params
+ * @pipe_cnt: number of DML pipes
+ * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*
* Set timing params of the phantom stream based on calculated output from DML.
* This function first gets the DML pipe index using the DC pipe index, then
@@ -517,13 +519,6 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
* that separately.
*
* - Set phantom backporch = vstartup of main pipe
- *
- * @dc: current dc state
- * @context: new dc state
- * @ref_pipe: Main pipe for the phantom stream
- * @pipes: DML pipe params
- * @pipe_cnt: number of DML pipes
- * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*/
void dcn32_set_phantom_stream_timing(struct dc *dc,
struct dc_state *context,
@@ -592,16 +587,14 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
}
/**
- * dcn32_get_num_free_pipes: Calculate number of free pipes
+ * dcn32_get_num_free_pipes - Calculate number of free pipes
+ * @dc: current dc state
+ * @context: new dc state
*
* This function assumes that a "used" pipe is a pipe that has
* both a stream and a plane assigned to it.
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * Number of free pipes available in the context
+ * Return: Number of free pipes available in the context
*/
static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context)
{
@@ -625,7 +618,10 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
}
/**
- * dcn32_assign_subvp_pipe: Function to decide which pipe will use Sub-VP.
+ * dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP.
+ * @dc: current dc state
+ * @context: new dc state
+ * @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
*
* We enter this function if we are Sub-VP capable (i.e. enough pipes available)
* and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if
@@ -639,12 +635,7 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
* for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't
* support MCLK switching naturally [i.e. ACTIVE or VBLANK]).
*
- * @param dc: current dc state
- * @param context: new dc state
- * @param index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
- *
- * Return:
- * True if a valid pipe assignment was found for Sub-VP. Otherwise false.
+ * Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false.
*/
static bool dcn32_assign_subvp_pipe(struct dc *dc,
struct dc_state *context,
@@ -711,7 +702,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
}
/**
- * dcn32_enough_pipes_for_subvp: Function to check if there are "enough" pipes for SubVP.
+ * dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP.
+ * @dc: current dc state
+ * @context: new dc state
*
* This function returns true if there are enough free pipes
* to create the required phantom pipes for any given stream
@@ -723,9 +716,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* pipe which can be used as the phantom pipe for the non pipe
* split pipe.
*
- * @dc: current dc state
- * @context: new dc state
- *
* Return:
* True if there are enough free pipes to assign phantom pipes to at least one
* stream that does not already have phantom pipes assigned. Otherwise false.
@@ -764,7 +754,9 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
}
/**
- * subvp_subvp_schedulable: Determine if SubVP + SubVP config is schedulable
+ * subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
*
* High level algorithm:
* 1. Find longest microschedule length (in us) between the two SubVP pipes
@@ -772,11 +764,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
* pipes still allows for the maximum microschedule to fit in the active
* region for both pipes.
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + SubVP config is schedulable, false otherwise
+ * Return: True if the SubVP + SubVP config is schedulable, false otherwise
*/
static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
{
@@ -836,7 +824,10 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_drr_schedulable: Determine if SubVP + DRR config is schedulable
+ * subvp_drr_schedulable - Determine if SubVP + DRR config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
+ * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
@@ -845,12 +836,7 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
* 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame))
* then report the configuration as supported
*
- * @dc: current dc state
- * @context: new dc state
- * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
- *
- * Return:
- * bool - True if the SubVP + DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + DRR config is schedulable, false otherwise
*/
static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struct pipe_ctx *drr_pipe)
{
@@ -914,7 +900,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
/**
- * subvp_vblank_schedulable: Determine if SubVP + VBLANK config is schedulable
+ * subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe
@@ -922,11 +910,7 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
* then report the configuration as supported
* 3. If the VBLANK display is DRR, then take the DRR static schedulability path
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
*/
static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
{
@@ -1003,20 +987,18 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_validate_static_schedulability: Check which SubVP case is calculated and handle
- * static analysis based on the case.
+ * subvp_validate_static_schedulability - Check which SubVP case is calculated
+ * and handle static analysis based on the case.
+ * @dc: current dc state
+ * @context: new dc state
+ * @vlevel: Voltage level calculated by DML
*
* Three cases:
* 1. SubVP + SubVP
* 2. SubVP + VBLANK (DRR checked internally)
* 3. SubVP + VACTIVE (currently unsupported)
*
- * @dc: current dc state
- * @context: new dc state
- * @vlevel: Voltage level calculated by DML
- *
- * Return:
- * bool - True if statically schedulable, false otherwise
+ * Return: True if statically schedulable, false otherwise
*/
static bool subvp_validate_static_schedulability(struct dc *dc,
struct dc_state *context,
@@ -1115,7 +1097,8 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
*/
if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
- !dcn32_mpo_in_use(context) && (*vlevel == context->bw_ctx.dml.soc.num_states ||
+ !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) &&
+ (*vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
@@ -1597,6 +1580,9 @@ bool dcn32_internal_validate_bw(struct dc *dc,
/*MPC split rules will handle this case*/
pipe->bottom_pipe->top_pipe = NULL;
} else {
+ /* when merging an ODM pipes, the bottom MPC pipe must now point to
+ * the previous ODM pipe and its associated stream assets
+ */
if (pipe->prev_odm_pipe->bottom_pipe) {
/* 3 plane MPO*/
pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
@@ -1606,6 +1592,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
}
+
+ memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource));
}
}
@@ -1781,6 +1769,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
int i, pipe_idx, vlevel_temp = 0;
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
dm_dram_clock_change_unsupported;
unsigned int dummy_latency_index = 0;
@@ -1816,7 +1805,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
- dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] !=
dm_dram_clock_change_unsupported;
}
@@ -1902,6 +1891,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching;
+ }
+
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
min_dram_speed_mts_margin = 160;
@@ -2275,7 +2268,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
return 0;
}
-/**
+/*
* dcn32_update_bw_bounding_box
*
* This would override some dcn3_2 ip_or_soc initial parameters hardcoded from
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 75be1e1ce543..5b91660a6496 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -733,6 +733,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
mode_lib->vba.FCLKChangeLatency, v->UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
+ memset(&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, 0, sizeof(DmlPipe));
+
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dppclk = mode_lib->vba.DPPCLK[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dispclk = mode_lib->vba.DISPCLK;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.PixelClock = mode_lib->vba.PixelClock[k];
@@ -2252,9 +2254,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
- || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0
- || mode_lib->vba.DSCInputBitPerComponent[k] >
- mode_lib->vba.MaximumDSCBitsPerComponent)) {
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)
+ || mode_lib->vba.DSCInputBitPerComponent[k] > mode_lib->vba.MaximumDSCBitsPerComponent) {
mode_lib->vba.NonsupportedDSCInputBPC = true;
}
}
@@ -2330,16 +2331,15 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
- if (mode_lib->vba.OutputMultistreamId[k] == j && mode_lib->vba.OutputMultistreamEn[k]
+ if (mode_lib->vba.OutputMultistreamId[k] == j
&& mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
}
}
if ((mode_lib->vba.Output[k] == dm_edp || mode_lib->vba.Output[k] == dm_hdmi)) {
- if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.OutputMultistreamEn[k])
+ if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
-
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == j)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
@@ -2478,8 +2478,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k]);
}
- m = 0;
-
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActiveSurfaces - 1; m++) {
for (j = 0; j <= mode_lib->vba.NumberOfActiveSurfaces - 1; j++) {
@@ -2856,8 +2854,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- m = 0;
-
//Calculate Return BW
for (i = 0; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
@@ -3618,11 +3614,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.ModeIsSupported = mode_lib->vba.ModeSupport[i][0] == true
|| mode_lib->vba.ModeSupport[i][1] == true;
- if (mode_lib->vba.ModeSupport[i][0] == true) {
+ if (mode_lib->vba.ModeSupport[i][0] == true)
MaximumMPCCombine = 0;
- } else {
+ else
MaximumMPCCombine = 1;
- }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index f5400eda07a5..4125d3d111d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -114,6 +114,7 @@ void dml_init_instance(struct display_mode_lib *lib,
break;
case DML_PROJECT_DCN31:
case DML_PROJECT_DCN31_FPGA:
+ case DML_PROJECT_DCN315:
lib->funcs = dml31_funcs;
break;
case DML_PROJECT_DCN314:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index b1878a1440e2..3d643d50c3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -40,6 +40,7 @@ enum dml_project {
DML_PROJECT_DCN21,
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
+ DML_PROJECT_DCN315,
DML_PROJECT_DCN31_FPGA,
DML_PROJECT_DCN314,
DML_PROJECT_DCN32,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 8919a2092ac5..9498105c98ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -39,6 +39,8 @@
#include "panel_cntl.h"
#define MAX_CLOCK_SOURCES 7
+#define MAX_SVP_PHANTOM_STREAMS 2
+#define MAX_SVP_PHANTOM_PLANES 2
void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
uint32_t controller_id);
@@ -232,6 +234,7 @@ struct resource_funcs {
unsigned int index);
bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context);
+ void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
};
struct audio_support{
@@ -438,7 +441,6 @@ struct pipe_ctx {
union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
- bool vtp_locked;
};
/* Data used for dynamic link encoder assignment.
@@ -492,6 +494,8 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
int compbuf_size_kb;
+ unsigned int legacy_svp_drr_stream_index;
+ bool legacy_svp_drr_stream_index_valid;
};
union bw_output {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 806f3041db14..9e4ddc985240 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -628,8 +628,23 @@ unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
struct dc_clocks *clocks);
-void dcn_bw_update_from_pplib(struct dc *dc);
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
+void dcn_get_soc_clks(
+ struct dc *dc,
+ int *min_fclk_khz,
+ int *min_dcfclk_khz,
+ int *socclk_khz);
+
+void dcn_bw_update_from_pplib_fclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *fclks);
+void dcn_bw_update_from_pplib_dcfclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *dcfclks);
+void dcn_bw_notify_pplib_of_wm_ranges(
+ struct dc *dc,
+ int min_fclk_khz,
+ int min_dcfclk_khz,
+ int socclk_khz);
void dcn_bw_sync_calcs_and_dml(struct dc *dc);
enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index d9f1b0a4fbd4..591ab1389e3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -95,10 +95,23 @@ struct clk_limit_table_entry {
unsigned int wck_ratio;
};
+struct clk_limit_num_entries {
+ unsigned int num_dcfclk_levels;
+ unsigned int num_fclk_levels;
+ unsigned int num_memclk_levels;
+ unsigned int num_socclk_levels;
+ unsigned int num_dtbclk_levels;
+ unsigned int num_dispclk_levels;
+ unsigned int num_dppclk_levels;
+ unsigned int num_phyclk_levels;
+ unsigned int num_phyclk_d18_levels;
+};
+
/* This table is contiguous */
struct clk_limit_table {
struct clk_limit_table_entry entries[MAX_NUM_DPM_LVL];
- unsigned int num_entries;
+ struct clk_limit_num_entries num_entries_per_clk;
+ unsigned int num_entries; /* highest populated dpm level for back compatibility */
};
struct wm_range_table_entry {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
new file mode 100644
index 000000000000..45645f9fd86c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2022 Advanced Micro Devices, Inc. All rights reserved. */
+
+#ifndef __DAL_CURSOR_CACHE_H__
+#define __DAL_CURSOR_CACHE_H__
+
+union reg_cursor_control_cfg {
+ struct {
+ uint32_t cur_enable: 1;
+ uint32_t reser0: 3;
+ uint32_t cur_2x_magnify: 1;
+ uint32_t reser1: 3;
+ uint32_t mode: 3;
+ uint32_t reser2: 5;
+ uint32_t pitch: 2;
+ uint32_t reser3: 6;
+ uint32_t line_per_chunk: 5;
+ uint32_t reser4: 3;
+ } bits;
+ uint32_t raw;
+};
+struct cursor_position_cache_hubp {
+ union reg_cursor_control_cfg cur_ctl;
+ union reg_position_cfg {
+ struct {
+ uint32_t x_pos: 16;
+ uint32_t y_pos: 16;
+ } bits;
+ uint32_t raw;
+ } position;
+ union reg_hot_spot_cfg {
+ struct {
+ uint32_t x_hot: 16;
+ uint32_t y_hot: 16;
+ } bits;
+ uint32_t raw;
+ } hot_spot;
+ union reg_dst_offset_cfg {
+ struct {
+ uint32_t dst_x_offset: 13;
+ uint32_t reserved: 19;
+ } bits;
+ uint32_t raw;
+ } dst_offset;
+};
+
+struct cursor_attribute_cache_hubp {
+ uint32_t SURFACE_ADDR_HIGH;
+ uint32_t SURFACE_ADDR;
+ union reg_cursor_control_cfg cur_ctl;
+ union reg_cursor_size_cfg {
+ struct {
+ uint32_t width: 16;
+ uint32_t height: 16;
+ } bits;
+ uint32_t raw;
+ } size;
+ union reg_cursor_settings_cfg {
+ struct {
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
+ } bits;
+ uint32_t raw;
+ } settings;
+};
+
+struct cursor_rect {
+ uint32_t x;
+ uint32_t y;
+ uint32_t w;
+ uint32_t h;
+};
+
+union reg_cur0_control_cfg {
+ struct {
+ uint32_t cur0_enable: 1;
+ uint32_t expansion_mode: 1;
+ uint32_t reser0: 1;
+ uint32_t cur0_rom_en: 1;
+ uint32_t mode: 3;
+ uint32_t reserved: 25;
+ } bits;
+ uint32_t raw;
+};
+struct cursor_position_cache_dpp {
+ union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attribute_cache_dpp {
+ union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attributes_cfg {
+ struct cursor_attribute_cache_hubp aHubp;
+ struct cursor_attribute_cache_dpp aDpp;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 3ef7faa92052..dcb80c4747b0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -28,6 +28,7 @@
#define __DAL_DPP_H__
#include "transform.h"
+#include "cursor_reg_cache.h"
union defer_reg_writes {
struct {
@@ -58,6 +59,9 @@ struct dpp {
struct pwl_params shaper_params;
bool cm_bypass_mode;
+
+ struct cursor_position_cache_dpp pos;
+ struct cursor_attribute_cache_dpp att;
};
struct dpp_input_csc_matrix {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 44c4578193a3..d5ea7545583e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -27,6 +27,7 @@
#define __DAL_HUBP_H__
#include "mem_input.h"
+#include "cursor_reg_cache.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -65,6 +66,10 @@ struct hubp {
struct dc_cursor_attributes curs_attr;
struct dc_cursor_position curs_pos;
bool power_gated;
+
+ struct cursor_position_cache_hubp pos;
+ struct cursor_attribute_cache_hubp att;
+ struct cursor_rect cur_rect;
};
struct surface_flip_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 72eef7a5ed83..25a1df45b264 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -209,7 +209,6 @@ struct timing_generator_funcs {
void (*set_blank)(struct timing_generator *tg,
bool enable_blanking);
bool (*is_blanked)(struct timing_generator *tg);
- bool (*is_locked)(struct timing_generator *tg);
void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
void (*set_colors)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index c37d1141febe..5040836f404d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -230,4 +230,10 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
bool is_h_timing_divisible_by_2(struct dc_stream_state *stream);
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ const struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index 7d3147175ca2..153a88381f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -111,7 +111,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
index 9522fe0b36c9..4f7f99156897 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
@@ -37,7 +37,7 @@ void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
-void virtual_disable_link_output(struct dc_link *link,
+static void virtual_disable_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index f34c45b19fcb..eb5b7eb292ef 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -248,6 +248,7 @@ struct dmub_srv_hw_params {
bool disable_dpia;
bool usb4_cm_version;
bool fw_in_system_memory;
+ bool dpia_hpd_int_enable_supported;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 5d1aadade8a5..7a8f61517424 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -400,8 +400,9 @@ union dmub_fw_boot_options {
uint32_t diag_env: 1; /* 1 if diagnostic environment */
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
uint32_t usb4_cm_version: 1; /**< 1 CM support */
+ uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
- uint32_t reserved : 17; /**< reserved */
+ uint32_t reserved : 16; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -728,6 +729,12 @@ enum dmub_cmd_type {
/**
* Command type used for all VBIOS interface commands.
*/
+
+ /**
+ * Command type used to set DPIA HPD interrupt state
+ */
+ DMUB_CMD__DPIA_HPD_INT_ENABLE = 86,
+
DMUB_CMD__VBIOS = 128,
};
@@ -760,11 +767,6 @@ enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
};
-enum dmub_cmd_header_sub_type {
- DMUB_CMD__SUB_TYPE_GENERAL = 0,
- DMUB_CMD__SUB_TYPE_CURSOR_POSITION = 1
-};
-
#pragma pack(push, 1)
/**
@@ -1261,6 +1263,14 @@ struct dmub_rb_cmd_set_mst_alloc_slots {
};
/**
+ * DMUB command structure for DPIA HPD int enable control.
+ */
+struct dmub_rb_cmd_dpia_hpd_int_enable {
+ struct dmub_cmd_header header; /* header */
+ uint32_t enable; /* dpia hpd interrupt enable */
+};
+
+/**
* struct dmub_rb_cmd_dpphy_init - DPPHY init.
*/
struct dmub_rb_cmd_dpphy_init {
@@ -2089,7 +2099,99 @@ struct dmub_rb_cmd_update_dirty_rect {
/**
* Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
*/
-struct dmub_cmd_update_cursor_info_data {
+union dmub_reg_cursor_control_cfg {
+ struct {
+ uint32_t cur_enable: 1;
+ uint32_t reser0: 3;
+ uint32_t cur_2x_magnify: 1;
+ uint32_t reser1: 3;
+ uint32_t mode: 3;
+ uint32_t reser2: 5;
+ uint32_t pitch: 2;
+ uint32_t reser3: 6;
+ uint32_t line_per_chunk: 5;
+ uint32_t reser4: 3;
+ } bits;
+ uint32_t raw;
+};
+struct dmub_cursor_position_cache_hubp {
+ union dmub_reg_cursor_control_cfg cur_ctl;
+ union dmub_reg_position_cfg {
+ struct {
+ uint32_t cur_x_pos: 16;
+ uint32_t cur_y_pos: 16;
+ } bits;
+ uint32_t raw;
+ } position;
+ union dmub_reg_hot_spot_cfg {
+ struct {
+ uint32_t hot_x: 16;
+ uint32_t hot_y: 16;
+ } bits;
+ uint32_t raw;
+ } hot_spot;
+ union dmub_reg_dst_offset_cfg {
+ struct {
+ uint32_t dst_x_offset: 13;
+ uint32_t reserved: 19;
+ } bits;
+ uint32_t raw;
+ } dst_offset;
+};
+
+union dmub_reg_cur0_control_cfg {
+ struct {
+ uint32_t cur0_enable: 1;
+ uint32_t expansion_mode: 1;
+ uint32_t reser0: 1;
+ uint32_t cur0_rom_en: 1;
+ uint32_t mode: 3;
+ uint32_t reserved: 25;
+ } bits;
+ uint32_t raw;
+};
+struct dmub_cursor_position_cache_dpp {
+ union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_position_cfg {
+ struct dmub_cursor_position_cache_hubp pHubp;
+ struct dmub_cursor_position_cache_dpp pDpp;
+ uint8_t pipe_idx;
+ /*
+ * Padding is required. To be 4 Bytes Aligned.
+ */
+ uint8_t padding[3];
+};
+
+struct dmub_cursor_attribute_cache_hubp {
+ uint32_t SURFACE_ADDR_HIGH;
+ uint32_t SURFACE_ADDR;
+ union dmub_reg_cursor_control_cfg cur_ctl;
+ union dmub_reg_cursor_size_cfg {
+ struct {
+ uint32_t width: 16;
+ uint32_t height: 16;
+ } bits;
+ uint32_t raw;
+ } size;
+ union dmub_reg_cursor_settings_cfg {
+ struct {
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
+ } bits;
+ uint32_t raw;
+ } settings;
+};
+struct dmub_cursor_attribute_cache_dpp {
+ union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_attributes_cfg {
+ struct dmub_cursor_attribute_cache_hubp aHubp;
+ struct dmub_cursor_attribute_cache_dpp aDpp;
+};
+
+struct dmub_cmd_update_cursor_payload0 {
/**
* Cursor dirty rects.
*/
@@ -2116,6 +2218,20 @@ struct dmub_cmd_update_cursor_info_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * Cursor Position Register.
+ * Registers contains Hubp & Dpp modules
+ */
+ struct dmub_cursor_position_cfg position_cfg;
+};
+
+struct dmub_cmd_update_cursor_payload1 {
+ struct dmub_cursor_attributes_cfg attribute_cfg;
+};
+
+union dmub_cmd_update_cursor_info_data {
+ struct dmub_cmd_update_cursor_payload0 payload0;
+ struct dmub_cmd_update_cursor_payload1 payload1;
};
/**
* Definition of a DMUB_CMD__UPDATE_CURSOR_INFO command.
@@ -2128,7 +2244,7 @@ struct dmub_rb_cmd_update_cursor_info {
/**
* Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
*/
- struct dmub_cmd_update_cursor_info_data update_cursor_info_data;
+ union dmub_cmd_update_cursor_info_data update_cursor_info_data;
};
/**
@@ -2825,11 +2941,7 @@ struct dmub_rb_cmd_get_visual_confirm_color {
struct dmub_optc_state {
uint32_t v_total_max;
uint32_t v_total_min;
- uint32_t v_total_mid;
- uint32_t v_total_mid_frame_num;
uint32_t tg_inst;
- uint32_t enable_manual_trigger;
- uint32_t clear_force_vsync;
};
struct dmub_rb_cmd_drr_update {
@@ -3235,6 +3347,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__QUERY_HPD_STATE command.
*/
struct dmub_rb_cmd_query_hpd_state query_hpd;
+ /**
+ * Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command.
+ */
+ struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index c7bd7e216710..c90b9ee42e12 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -350,6 +350,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.dpia_supported = params->dpia_supported;
boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
+ boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 04f7656906ca..447a0ec9cbe2 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1692,7 +1692,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinates_x;
- build_coefficients(&coeff, true);
+ build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
i = 0;
while (i != hw_points_num + 1) {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
index b798cf5a2c39..38adde3cae5a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
@@ -29,5 +29,7 @@
#define regMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 2
#define regMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4
#define regMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 2
+#define regUMCCH0_0_GeccCtrl 0x0053
+#define regUMCCH0_0_GeccCtrl_BASE_IDX 2
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
index bd99b431247f..4dbec524f943 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
@@ -90,5 +90,8 @@
#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38
#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+//UMCCH0_0_GeccCtrl
+#define UMCCH0_0_GeccCtrl__UCFatalEn__SHIFT 0xd
+#define UMCCH0_0_GeccCtrl__UCFatalEn_MASK 0x00002000L
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 8fd0782a2b20..f5e08b60f66e 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -1384,13 +1384,16 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
static void kv_dpm_disable(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
+ int err;
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- amdgpu_kv_smc_bapm_enable(adev, false);
+ err = amdgpu_kv_smc_bapm_enable(adev, false);
+ if (err)
+ DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, false);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e4fcbf8a7eb5..7ef7e81525a3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -3603,7 +3603,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
return -EINVAL);
PP_ASSERT_WITH_CODE(
- (smu7_power_state->performance_level_count <=
+ (smu7_power_state->performance_level_count <
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
return -EINVAL);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 99bfe5efe171..c8c9fb827bda 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -3155,7 +3155,7 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
return -1);
PP_ASSERT_WITH_CODE(
- (vega10_ps->performance_level_count <=
+ (vega10_ps->performance_level_count <
hwmgr->platform_descriptor.
hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index bf33c3084cb4..a971590b8132 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -4,7 +4,6 @@
// Author: Inki Dae <inki.dae@samsung.com>
// Author: Andrzej Hajda <a.hajda@samsung.com>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index f84d39762a72..ca127ff797f7 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -142,8 +142,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
if (ret)
drm_warn(dev, "Failed to update vram location.\n");
- hv->dirt_needed = true;
-
ret = hyperv_mode_config_init(hv);
if (ret)
goto err_free_mmio;
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
index 76a182a9a765..013a7829182d 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -208,7 +208,7 @@ static inline int hyperv_sendpacket(struct hv_device *hdev, struct synthvid_msg
VM_PKT_DATA_INBAND, 0);
if (ret)
- drm_err(&hv->dev, "Unable to send packet via vmbus\n");
+ drm_err_ratelimited(&hv->dev, "Unable to send packet via vmbus; error %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 5fbd2ae95869..2b73f5ff0d02 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -120,7 +120,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
- dotclock = pipe_config->port_clock * 2 / 3;
+ dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 2, 3);
else
dotclock = pipe_config->port_clock;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index dd008ba8afe3..461c62c88413 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -8130,6 +8130,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
}
+static int max_dotclock(struct drm_i915_private *i915)
+{
+ int max_dotclock = i915->max_dotclk_freq;
+
+ /* icl+ might use bigjoiner */
+ if (DISPLAY_VER(i915) >= 11)
+ max_dotclock *= 2;
+
+ return max_dotclock;
+}
+
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -8167,6 +8178,13 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
+ /*
+ * Reject clearly excessive dotclocks early to
+ * avoid having to worry about huge integers later.
+ */
+ if (mode->clock > max_dotclock(dev_priv))
+ return MODE_CLOCK_HIGH;
+
/* Transcoder timing limits */
if (DISPLAY_VER(dev_priv) >= 11) {
hdisplay_max = 16384;
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index c86e5d4ee016..1dddd6abd77b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -26,10 +26,17 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
u32 alignment;
int ret;
+ /*
+ * We are not syncing against the binding (and potential migrations)
+ * below, so this vm must never be async.
+ */
+ GEM_WARN_ON(vm->bind_async_flags);
+
if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
@@ -37,29 +44,48 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- ret = i915_gem_object_lock_interruptible(obj, NULL);
- if (!ret) {
+ for_i915_gem_ww(&ww, ret, true) {
+ ret = i915_gem_object_lock(obj, &ww);
+ if (ret)
+ continue;
+
+ if (HAS_LMEM(dev_priv)) {
+ unsigned int flags = obj->flags;
+
+ /*
+ * For this type of buffer we need to able to read from the CPU
+ * the clear color value found in the buffer, hence we need to
+ * ensure it is always in the mappable part of lmem, if this is
+ * a small-bar device.
+ */
+ if (intel_fb_rc_ccs_cc_plane(fb) >= 0)
+ flags &= ~I915_BO_ALLOC_GPU_ONLY;
+ ret = __i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0,
+ flags);
+ if (ret)
+ continue;
+ }
+
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
- i915_gem_object_unlock(obj);
- }
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
+ if (ret)
+ continue;
- vma = i915_vma_instance(obj, vm, view);
- if (IS_ERR(vma))
- goto err;
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ continue;
+ }
- if (i915_vma_misplaced(vma, 0, alignment, 0)) {
- ret = i915_vma_unbind_unlocked(vma);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
+ if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ continue;
}
- }
- ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
+ ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL);
+ if (ret)
+ continue;
+ }
if (ret) {
vma = ERR_PTR(ret);
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 9def8d9fade6..d4cce627d7a8 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -116,34 +116,56 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
}
}
+static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
+ EDP_PSR_ERROR(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
+ EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
+ EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
+ EDP_PSR_MASK(intel_dp->psr.transcoder);
+}
+
static void psr_irq_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum transcoder trans_shift;
i915_reg_t imr_reg;
u32 mask, val;
- /*
- * gen12+ has registers relative to transcoder and one per transcoder
- * using the same bit definition: handle it as TRANSCODER_EDP to force
- * 0 shift in bit definition
- */
- if (DISPLAY_VER(dev_priv) >= 12) {
- trans_shift = 0;
+ if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
- } else {
- trans_shift = intel_dp->psr.transcoder;
+ else
imr_reg = EDP_PSR_IMR;
- }
- mask = EDP_PSR_ERROR(trans_shift);
+ mask = psr_irq_psr_error_bit_get(intel_dp);
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
- mask |= EDP_PSR_POST_EXIT(trans_shift) |
- EDP_PSR_PRE_ENTRY(trans_shift);
+ mask |= psr_irq_post_exit_bit_get(intel_dp) |
+ psr_irq_pre_entry_bit_get(intel_dp);
- /* Warning: it is masking/setting reserved bits too */
val = intel_de_read(dev_priv, imr_reg);
- val &= ~EDP_PSR_TRANS_MASK(trans_shift);
+ val &= ~psr_irq_mask_get(intel_dp);
val |= ~mask;
intel_de_write(dev_priv, imr_reg, val);
}
@@ -191,25 +213,21 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ktime_t time_ns = ktime_get();
- enum transcoder trans_shift;
i915_reg_t imr_reg;
- if (DISPLAY_VER(dev_priv) >= 12) {
- trans_shift = 0;
+ if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
- } else {
- trans_shift = intel_dp->psr.transcoder;
+ else
imr_reg = EDP_PSR_IMR;
- }
- if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
+ if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
intel_dp->psr.last_entry_attempt = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
- if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
+ if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
intel_dp->psr.last_exit = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR exit completed\n",
@@ -226,7 +244,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
}
}
- if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
+ if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
u32 val;
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
@@ -243,7 +261,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* or unset irq_aux_error.
*/
val = intel_de_read(dev_priv, imr_reg);
- val |= EDP_PSR_ERROR(trans_shift);
+ val |= psr_irq_psr_error_bit_get(intel_dp);
intel_de_write(dev_priv, imr_reg, val);
schedule_work(&intel_dp->psr.work);
@@ -1194,14 +1212,12 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12)
val = intel_de_read(dev_priv,
TRANS_PSR_IIR(intel_dp->psr.transcoder));
- val &= EDP_PSR_ERROR(0);
- } else {
+ else
val = intel_de_read(dev_priv, EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
- }
+ val &= psr_irq_psr_error_bit_get(intel_dp);
if (val) {
intel_dp->psr.sink_not_reliable = true;
drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 01b0932757ed..18178b01375e 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1710,10 +1710,22 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
modifier == I915_FORMAT_MOD_4_TILED ||
modifier == I915_FORMAT_MOD_Yf_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
wp->width = width;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 0bcde53c50c6..1e29b1e6d186 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1387,14 +1387,8 @@ kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
*/
for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine;
- bool skip = false;
- if (exit)
- skip = intel_context_set_exiting(ce);
- else if (!persistent)
- skip = intel_context_exit_nonpersistent(ce, NULL);
-
- if (skip)
+ if ((exit || !persistent) && intel_context_revoke(ce))
continue; /* Already marked. */
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 7ff9c7877bec..369006c5317f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -653,6 +653,41 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
enum intel_region_id id)
{
+ return __i915_gem_object_migrate(obj, ww, id, obj->flags);
+}
+
+/**
+ * __i915_gem_object_migrate - Migrate an object to the desired region id, with
+ * control of the extra flags
+ * @obj: The object to migrate.
+ * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
+ * not be successful in evicting other objects to make room for this object.
+ * @id: The region id to migrate to.
+ * @flags: The object flags. Normally just obj->flags.
+ *
+ * Attempt to migrate the object to the desired memory region. The
+ * object backend must support migration and the object may not be
+ * pinned, (explicitly pinned pages or pinned vmas). The object must
+ * be locked.
+ * On successful completion, the object will have pages pointing to
+ * memory in the new region, but an async migration task may not have
+ * completed yet, and to accomplish that, i915_gem_object_wait_migration()
+ * must be called.
+ *
+ * Note: the @ww parameter is not used yet, but included to make sure
+ * callers put some effort into obtaining a valid ww ctx if one is
+ * available.
+ *
+ * Return: 0 on success. Negative error code on failure. In particular may
+ * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
+ * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
+ * -EBUSY if the object is pinned.
+ */
+int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ enum intel_region_id id,
+ unsigned int flags)
+{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_memory_region *mr;
@@ -672,7 +707,7 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
return 0;
}
- return obj->ops->migrate(obj, mr);
+ return obj->ops->migrate(obj, mr, flags);
}
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 7317d4102955..1723af9b0f6a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -608,6 +608,10 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
enum intel_region_id id);
+int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ enum intel_region_id id,
+ unsigned int flags);
bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
enum intel_region_id id);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 40305e2bcd49..d0d6772e6f36 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -107,7 +107,8 @@ struct drm_i915_gem_object_ops {
* pinning or for as long as the object lock is held.
*/
int (*migrate)(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mr);
+ struct intel_memory_region *mr,
+ unsigned int flags);
void (*release)(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index e3fc38dd5db0..4f861782c3e8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -848,9 +848,10 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
}
static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mr)
+ struct intel_memory_region *mr,
+ unsigned int flags)
{
- return __i915_ttm_migrate(obj, mr, obj->flags);
+ return __i915_ttm_migrate(obj, mr, flags);
}
static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 8423df021b71..d4398948f016 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -426,12 +426,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
static int
probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
{
- const unsigned long end = addr + len;
+ VMA_ITERATOR(vmi, mm, addr);
struct vm_area_struct *vma;
- int ret = -EFAULT;
mmap_read_lock(mm);
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ for_each_vma_range(vmi, vma, addr + len) {
/* Check for holes, note that we also update the addr below */
if (vma->vm_start > addr)
break;
@@ -439,16 +438,13 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
break;
- if (vma->vm_end >= end) {
- ret = 0;
- break;
- }
-
addr = vma->vm_end;
}
mmap_read_unlock(mm);
- return ret;
+ if (vma)
+ return -EFAULT;
+ return 0;
}
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 654a092ed3d6..e94365b08f1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -614,13 +614,12 @@ bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
return ret;
}
-bool intel_context_exit_nonpersistent(struct intel_context *ce,
- struct i915_request *rq)
+bool intel_context_revoke(struct intel_context *ce)
{
bool ret = intel_context_set_exiting(ce);
if (ce->ops->revoke)
- ce->ops->revoke(ce, rq, ce->engine->props.preempt_timeout_ms);
+ ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 8e2d70630c49..be09fb2e883a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -329,8 +329,7 @@ static inline bool intel_context_set_exiting(struct intel_context *ce)
return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
}
-bool intel_context_exit_nonpersistent(struct intel_context *ce,
- struct i915_request *rq);
+bool intel_context_revoke(struct intel_context *ce);
static inline bool
intel_context_force_single_submission(const struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 30cf5c3369d9..2049a00417af 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -1275,10 +1275,16 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- if (!retained_ptes)
+ if (!retained_ptes) {
+ /*
+ * Clear the bound flags of the vma resource to allow
+ * ptes to be repopulated.
+ */
+ vma->resource->bound_flags = 0;
vma->ops->bind_vma(vm, NULL, vma->resource,
obj ? obj->cache_level : 0,
was_bound);
+ }
if (obj) { /* only used during resume => exclusive access */
write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index c6ebe2781076..152244d7f62a 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -207,6 +207,14 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
+ /* Bypass LLC - Uncached (EHL+) */ \
+ MOCS_ENTRY(16, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_1_UC), \
+ /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
+ MOCS_ENTRY(17, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 22ba66e48a9b..1db59eeb34db 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -684,7 +684,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
* Corner case where requests were sitting in the priority list or a
* request resubmitted after the context was banned.
*/
- if (unlikely(intel_context_is_banned(ce))) {
+ if (unlikely(!intel_context_is_schedulable(ce))) {
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine);
return 0;
@@ -870,15 +870,15 @@ static int guc_wq_item_append(struct intel_guc *guc,
struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
- int ret = 0;
+ int ret;
- if (likely(!intel_context_is_banned(ce))) {
- ret = __guc_wq_item_append(rq);
+ if (unlikely(!intel_context_is_schedulable(ce)))
+ return 0;
- if (unlikely(ret == -EBUSY)) {
- guc->stalled_request = rq;
- guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
- }
+ ret = __guc_wq_item_append(rq);
+ if (unlikely(ret == -EBUSY)) {
+ guc->stalled_request = rq;
+ guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
}
return ret;
@@ -897,7 +897,7 @@ static bool multi_lrc_submit(struct i915_request *rq)
* submitting all the requests generated in parallel.
*/
return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
- intel_context_is_banned(ce);
+ !intel_context_is_schedulable(ce);
}
static int guc_dequeue_one_context(struct intel_guc *guc)
@@ -966,7 +966,7 @@ register_context:
struct intel_context *ce = request_to_scheduling_context(last);
if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
- !intel_context_is_banned(ce))) {
+ intel_context_is_schedulable(ce))) {
ret = try_context_registration(ce, false);
if (unlikely(ret == -EPIPE)) {
goto deadlk;
@@ -1576,7 +1576,7 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
{
struct intel_engine_cs *engine = __context_to_physical_engine(ce);
- if (intel_context_is_banned(ce))
+ if (!intel_context_is_schedulable(ce))
return;
GEM_BUG_ON(!intel_context_is_pinned(ce));
@@ -4424,12 +4424,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- if (likely(!intel_context_is_banned(ce))) {
+ if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
} else {
drm_info(&guc_to_gt(guc)->i915->drm,
- "Ignoring context reset notification of banned context 0x%04X on %s",
+ "Ignoring context reset notification of exiting context 0x%04X on %s",
ce->guc_id.id, ce->engine->name);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 3b81a6d35a7b..076c779f776a 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -240,13 +240,13 @@ static void free_resource(struct intel_vgpu *vgpu)
}
static int alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
+ const struct intel_vgpu_config *conf)
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned long request, avail, max, taken;
const char *item;
- if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
+ if (!conf->low_mm || !conf->high_mm || !conf->fence) {
gvt_vgpu_err("Invalid vGPU creation params\n");
return -EINVAL;
}
@@ -255,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
taken = gvt->gm.vgpu_allocated_low_gm_size;
avail = max - taken;
- request = MB_TO_BYTES(param->low_gm_sz);
+ request = conf->low_mm;
if (request > avail)
goto no_enough_resource;
@@ -266,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
taken = gvt->gm.vgpu_allocated_high_gm_size;
avail = max - taken;
- request = MB_TO_BYTES(param->high_gm_sz);
+ request = conf->high_mm;
if (request > avail)
goto no_enough_resource;
@@ -277,16 +277,16 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_fence_sz(gvt) - HOST_FENCE;
taken = gvt->fence.vgpu_allocated_fence_num;
avail = max - taken;
- request = param->fence_sz;
+ request = conf->fence;
if (request > avail)
goto no_enough_resource;
vgpu_fence_sz(vgpu) = request;
- gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
- gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
- gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
+ gvt->gm.vgpu_allocated_low_gm_size += conf->low_mm;
+ gvt->gm.vgpu_allocated_high_gm_size += conf->high_mm;
+ gvt->fence.vgpu_allocated_fence_num += conf->fence;
return 0;
no_enough_resource:
@@ -340,11 +340,11 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
*
*/
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
+ const struct intel_vgpu_config *conf)
{
int ret;
- ret = alloc_resource(vgpu, param);
+ ret = alloc_resource(vgpu, conf);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 705689e64011..dbf8d7470b2c 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -36,6 +36,7 @@
#include <uapi/linux/pci_regs.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/mdev.h>
#include "i915_drv.h"
#include "intel_gvt.h"
@@ -172,6 +173,7 @@ struct intel_vgpu_submission {
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
struct intel_vgpu {
+ struct vfio_device vfio_device;
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
@@ -211,7 +213,6 @@ struct intel_vgpu {
u32 scan_nonprivbb;
- struct vfio_device vfio_device;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
@@ -294,15 +295,25 @@ struct intel_gvt_firmware {
bool firmware_loaded;
};
-#define NR_MAX_INTEL_VGPU_TYPES 20
-struct intel_vgpu_type {
- char name[16];
- unsigned int avail_instance;
- unsigned int low_gm_size;
- unsigned int high_gm_size;
+struct intel_vgpu_config {
+ unsigned int low_mm;
+ unsigned int high_mm;
unsigned int fence;
+
+ /*
+ * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with
+ * a weight of 4 on a contended host, different vGPU type has different
+ * weight set. Legal weights range from 1 to 16.
+ */
unsigned int weight;
- enum intel_vgpu_edid resolution;
+ enum intel_vgpu_edid edid;
+ const char *name;
+};
+
+struct intel_vgpu_type {
+ struct mdev_type type;
+ char name[16];
+ const struct intel_vgpu_config *conf;
};
struct intel_gvt {
@@ -326,6 +337,8 @@ struct intel_gvt {
struct intel_gvt_workload_scheduler scheduler;
struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+ struct mdev_parent parent;
+ struct mdev_type **mdev_types;
struct intel_vgpu_type *types;
unsigned int num_types;
struct intel_vgpu *idle_vgpu;
@@ -436,19 +449,8 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* ring context size i.e. the first 0x50 dwords*/
#define RING_CTX_SIZE 320
-struct intel_vgpu_creation_params {
- __u64 low_gm_sz; /* in MB */
- __u64 high_gm_sz; /* in MB */
- __u64 fence_sz;
- __u64 resolution;
- __s32 primary;
- __u64 vgpu_id;
-
- __u32 weight;
-};
-
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param);
+ const struct intel_vgpu_config *conf);
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
@@ -494,8 +496,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_type *type);
+int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
+ const struct intel_vgpu_config *conf);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index e3cd58946477..7a45e5360caf 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -34,7 +34,6 @@
*/
#include <linux/init.h>
-#include <linux/device.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
@@ -43,7 +42,6 @@
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/eventfd.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include <linux/debugfs.h>
@@ -115,117 +113,18 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node);
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
+static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf)
{
- struct intel_vgpu_type *type;
- unsigned int num = 0;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- num = 0;
- else
- num = type->avail_instance;
-
- return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- return 0;
+ struct intel_vgpu_type *type =
+ container_of(mtype, struct intel_vgpu_type, type);
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
"fence: %d\nresolution: %s\n"
"weight: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution),
- type->weight);
-}
-
-static ssize_t name_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- return 0;
-
- return sprintf(buf, "%s\n", type->name);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-static MDEV_TYPE_ATTR_RO(name);
-
-static struct attribute *gvt_type_attrs[] = {
- &mdev_type_attr_available_instances.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_description.attr,
- &mdev_type_attr_name.attr,
- NULL,
-};
-
-static struct attribute_group *gvt_vgpu_type_groups[] = {
- [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i, j;
- struct intel_vgpu_type *type;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- type = &gvt->types[i];
-
- group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
- if (!group)
- goto unwind;
-
- group->name = type->name;
- group->attrs = gvt_type_attrs;
- gvt_vgpu_type_groups[i] = group;
- }
-
- return 0;
-
-unwind:
- for (j = 0; j < i; j++) {
- group = gvt_vgpu_type_groups[j];
- kfree(group);
- }
-
- return -ENOMEM;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- group = gvt_vgpu_type_groups[i];
- gvt_vgpu_type_groups[i] = NULL;
- kfree(group);
- }
+ BYTES_TO_MB(type->conf->low_mm),
+ BYTES_TO_MB(type->conf->high_mm),
+ type->conf->fence, vgpu_edid_str(type->conf->edid),
+ type->conf->weight);
}
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
@@ -1546,7 +1445,28 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
+static int intel_vgpu_init_dev(struct vfio_device *vfio_dev)
+{
+ struct mdev_device *mdev = to_mdev_device(vfio_dev->dev);
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+ struct intel_vgpu_type *type =
+ container_of(mdev->type, struct intel_vgpu_type, type);
+
+ vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt;
+ return intel_gvt_create_vgpu(vgpu, type->conf);
+}
+
+static void intel_vgpu_release_dev(struct vfio_device *vfio_dev)
+{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+
+ intel_gvt_destroy_vgpu(vgpu);
+ vfio_free_device(vfio_dev);
+}
+
static const struct vfio_device_ops intel_vgpu_dev_ops = {
+ .init = intel_vgpu_init_dev,
+ .release = intel_vgpu_release_dev,
.open_device = intel_vgpu_open_device,
.close_device = intel_vgpu_close_device,
.read = intel_vgpu_read,
@@ -1558,35 +1478,28 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
static int intel_vgpu_probe(struct mdev_device *mdev)
{
- struct device *pdev = mdev_parent_dev(mdev);
- struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt;
- struct intel_vgpu_type *type;
struct intel_vgpu *vgpu;
int ret;
- type = &gvt->types[mdev_get_type_group_id(mdev)];
- if (!type)
- return -EINVAL;
-
- vgpu = intel_gvt_create_vgpu(gvt, type);
+ vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev,
+ &intel_vgpu_dev_ops);
if (IS_ERR(vgpu)) {
gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
return PTR_ERR(vgpu);
}
- vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev,
- &intel_vgpu_dev_ops);
-
dev_set_drvdata(&mdev->dev, vgpu);
ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
- if (ret) {
- intel_gvt_destroy_vgpu(vgpu);
- return ret;
- }
+ if (ret)
+ goto out_put_vdev;
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
dev_name(mdev_dev(mdev)));
return 0;
+
+out_put_vdev:
+ vfio_put_device(&vgpu->vfio_device);
+ return ret;
}
static void intel_vgpu_remove(struct mdev_device *mdev)
@@ -1595,18 +1508,43 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
if (WARN_ON_ONCE(vgpu->attached))
return;
- intel_gvt_destroy_vgpu(vgpu);
+
+ vfio_unregister_group_dev(&vgpu->vfio_device);
+ vfio_put_device(&vgpu->vfio_device);
+}
+
+static unsigned int intel_vgpu_get_available(struct mdev_type *mtype)
+{
+ struct intel_vgpu_type *type =
+ container_of(mtype, struct intel_vgpu_type, type);
+ struct intel_gvt *gvt = kdev_to_i915(mtype->parent->dev)->gvt;
+ unsigned int low_gm_avail, high_gm_avail, fence_avail;
+
+ mutex_lock(&gvt->lock);
+ low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
+ gvt->gm.vgpu_allocated_low_gm_size;
+ high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
+ gvt->gm.vgpu_allocated_high_gm_size;
+ fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
+ gvt->fence.vgpu_allocated_fence_num;
+ mutex_unlock(&gvt->lock);
+
+ return min3(low_gm_avail / type->conf->low_mm,
+ high_gm_avail / type->conf->high_mm,
+ fence_avail / type->conf->fence);
}
static struct mdev_driver intel_vgpu_mdev_driver = {
+ .device_api = VFIO_DEVICE_API_PCI_STRING,
.driver = {
.name = "intel_vgpu_mdev",
.owner = THIS_MODULE,
.dev_groups = intel_vgpu_groups,
},
- .probe = intel_vgpu_probe,
- .remove = intel_vgpu_remove,
- .supported_type_groups = gvt_vgpu_type_groups,
+ .probe = intel_vgpu_probe,
+ .remove = intel_vgpu_remove,
+ .get_available = intel_vgpu_get_available,
+ .show_description = intel_vgpu_show_description,
};
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
@@ -1904,8 +1842,7 @@ static void intel_gvt_clean_device(struct drm_i915_private *i915)
if (drm_WARN_ON(&i915->drm, !gvt))
return;
- mdev_unregister_device(i915->drm.dev);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
+ mdev_unregister_parent(&gvt->parent);
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_clean_vgpu_types(gvt);
@@ -2005,19 +1942,15 @@ static int intel_gvt_init_device(struct drm_i915_private *i915)
intel_gvt_debugfs_init(gvt);
- ret = intel_gvt_init_vgpu_type_groups(gvt);
+ ret = mdev_register_parent(&gvt->parent, i915->drm.dev,
+ &intel_vgpu_mdev_driver,
+ gvt->mdev_types, gvt->num_types);
if (ret)
goto out_destroy_idle_vgpu;
- ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver);
- if (ret)
- goto out_cleanup_vgpu_type_groups;
-
gvt_dbg_core("gvt device initialization is done\n");
return 0;
-out_cleanup_vgpu_type_groups:
- intel_gvt_cleanup_vgpu_type_groups(gvt);
out_destroy_idle_vgpu:
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_debugfs_clean(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 46da19b3225d..56c71474008a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -73,24 +73,21 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
+/*
+ * vGPU type name is defined as GVTg_Vx_y which contains the physical GPU
+ * generation type (e.g V4 as BDW server, V5 as SKL server).
+ *
+ * Depening on the physical SKU resource, we might see vGPU types like
+ * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of
+ * vGPU on same physical GPU depending on available resource. Each vGPU
+ * type will have a different number of avail_instance to indicate how
+ * many vGPU instance can be created for this type.
+ */
#define VGPU_MAX_WEIGHT 16
#define VGPU_WEIGHT(vgpu_num) \
(VGPU_MAX_WEIGHT / (vgpu_num))
-static const struct {
- unsigned int low_mm;
- unsigned int high_mm;
- unsigned int fence;
-
- /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
- * with a weight of 4 on a contended host, different vGPU type has
- * different weight set. Legal weights range from 1 to 16.
- */
- unsigned int weight;
- enum intel_vgpu_edid edid;
- const char *name;
-} vgpu_types[] = {
-/* Fixed vGPU type table */
+static const struct intel_vgpu_config intel_vgpu_configs[] = {
{ MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
@@ -106,102 +103,58 @@ static const struct {
*/
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
- unsigned int num_types;
- unsigned int i, low_avail, high_avail;
- unsigned int min_low;
-
- /* vGPU type name is defined as GVTg_Vx_y which contains
- * physical GPU generation type (e.g V4 as BDW server, V5 as
- * SKL server).
- *
- * Depend on physical SKU resource, might see vGPU types like
- * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
- * different types of vGPU on same physical GPU depending on
- * available resource. Each vGPU type will have "avail_instance"
- * to indicate how many vGPU instance can be created for this
- * type.
- *
- */
- low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
- high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
- num_types = ARRAY_SIZE(vgpu_types);
+ unsigned int low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+ unsigned int high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
+ unsigned int num_types = ARRAY_SIZE(intel_vgpu_configs);
+ unsigned int i;
gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
GFP_KERNEL);
if (!gvt->types)
return -ENOMEM;
- min_low = MB_TO_BYTES(32);
- for (i = 0; i < num_types; ++i) {
- if (low_avail / vgpu_types[i].low_mm == 0)
- break;
-
- gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
- gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
- gvt->types[i].fence = vgpu_types[i].fence;
+ gvt->mdev_types = kcalloc(num_types, sizeof(*gvt->mdev_types),
+ GFP_KERNEL);
+ if (!gvt->mdev_types)
+ goto out_free_types;
- if (vgpu_types[i].weight < 1 ||
- vgpu_types[i].weight > VGPU_MAX_WEIGHT)
- return -EINVAL;
+ for (i = 0; i < num_types; ++i) {
+ const struct intel_vgpu_config *conf = &intel_vgpu_configs[i];
- gvt->types[i].weight = vgpu_types[i].weight;
- gvt->types[i].resolution = vgpu_types[i].edid;
- gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
- high_avail / vgpu_types[i].high_mm);
+ if (low_avail / conf->low_mm == 0)
+ break;
+ if (conf->weight < 1 || conf->weight > VGPU_MAX_WEIGHT)
+ goto out_free_mdev_types;
- if (GRAPHICS_VER(gvt->gt->i915) == 8)
- sprintf(gvt->types[i].name, "GVTg_V4_%s",
- vgpu_types[i].name);
- else if (GRAPHICS_VER(gvt->gt->i915) == 9)
- sprintf(gvt->types[i].name, "GVTg_V5_%s",
- vgpu_types[i].name);
+ sprintf(gvt->types[i].name, "GVTg_V%u_%s",
+ GRAPHICS_VER(gvt->gt->i915) == 8 ? 4 : 5, conf->name);
+ gvt->types[i].conf = conf;
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
- gvt->types[i].avail_instance,
- gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence,
- gvt->types[i].weight,
- vgpu_edid_str(gvt->types[i].resolution));
+ min(low_avail / conf->low_mm,
+ high_avail / conf->high_mm),
+ conf->low_mm, conf->high_mm, conf->fence,
+ conf->weight, vgpu_edid_str(conf->edid));
+
+ gvt->mdev_types[i] = &gvt->types[i].type;
+ gvt->mdev_types[i]->sysfs_name = gvt->types[i].name;
}
gvt->num_types = i;
return 0;
-}
-void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
-{
+out_free_mdev_types:
+ kfree(gvt->mdev_types);
+out_free_types:
kfree(gvt->types);
+ return -EINVAL;
}
-static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
{
- int i;
- unsigned int low_gm_avail, high_gm_avail, fence_avail;
- unsigned int low_gm_min, high_gm_min, fence_min;
-
- /* Need to depend on maxium hw resource size but keep on
- * static config for now.
- */
- low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
- gvt->gm.vgpu_allocated_low_gm_size;
- high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
- gvt->gm.vgpu_allocated_high_gm_size;
- fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
- gvt->fence.vgpu_allocated_fence_num;
-
- for (i = 0; i < gvt->num_types; i++) {
- low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
- high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
- fence_min = fence_avail / gvt->types[i].fence;
- gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
- fence_min);
-
- gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
- i, gvt->types[i].name,
- gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence);
- }
+ kfree(gvt->mdev_types);
+ kfree(gvt->types);
}
/**
@@ -298,12 +251,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&vgpu->vgpu_lock);
-
- mutex_lock(&gvt->lock);
- intel_gvt_update_vgpu_types(gvt);
- mutex_unlock(&gvt->lock);
-
- vfree(vgpu);
}
#define IDLE_VGPU_IDR 0
@@ -363,42 +310,38 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
vfree(vgpu);
}
-static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_creation_params *param)
+int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
+ const struct intel_vgpu_config *conf)
{
+ struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->gt->i915;
- struct intel_vgpu *vgpu;
int ret;
- gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
- param->low_gm_sz, param->high_gm_sz,
- param->fence_sz);
-
- vgpu = vzalloc(sizeof(*vgpu));
- if (!vgpu)
- return ERR_PTR(-ENOMEM);
+ gvt_dbg_core("low %u MB high %u MB fence %u\n",
+ BYTES_TO_MB(conf->low_mm), BYTES_TO_MB(conf->high_mm),
+ conf->fence);
+ mutex_lock(&gvt->lock);
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
- goto out_free_vgpu;
+ goto out_unlock;;
vgpu->id = ret;
- vgpu->gvt = gvt;
- vgpu->sched_ctl.weight = param->weight;
+ vgpu->sched_ctl.weight = conf->weight;
mutex_init(&vgpu->vgpu_lock);
mutex_init(&vgpu->dmabuf_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init_base(&vgpu->object_idr, 1);
- intel_vgpu_init_cfg_space(vgpu, param->primary);
+ intel_vgpu_init_cfg_space(vgpu, 1);
vgpu->d3_entered = false;
ret = intel_vgpu_init_mmio(vgpu);
if (ret)
goto out_clean_idr;
- ret = intel_vgpu_alloc_resource(vgpu, param);
+ ret = intel_vgpu_alloc_resource(vgpu, conf);
if (ret)
goto out_clean_vgpu_mmio;
@@ -412,7 +355,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_gtt;
- ret = intel_vgpu_init_display(vgpu, param->resolution);
+ ret = intel_vgpu_init_display(vgpu, conf->edid);
if (ret)
goto out_clean_opregion;
@@ -437,7 +380,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- return vgpu;
+ intel_gvt_update_reg_whitelist(vgpu);
+ mutex_unlock(&gvt->lock);
+ return 0;
out_clean_sched_policy:
intel_vgpu_clean_sched_policy(vgpu);
@@ -455,48 +400,9 @@ out_clean_vgpu_mmio:
intel_vgpu_clean_mmio(vgpu);
out_clean_idr:
idr_remove(&gvt->vgpu_idr, vgpu->id);
-out_free_vgpu:
- vfree(vgpu);
- return ERR_PTR(ret);
-}
-
-/**
- * intel_gvt_create_vgpu - create a virtual GPU
- * @gvt: GVT device
- * @type: type of the vGPU to create
- *
- * This function is called when user wants to create a virtual GPU.
- *
- * Returns:
- * pointer to intel_vgpu, error pointer if failed.
- */
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_type *type)
-{
- struct intel_vgpu_creation_params param;
- struct intel_vgpu *vgpu;
-
- param.primary = 1;
- param.low_gm_sz = type->low_gm_size;
- param.high_gm_sz = type->high_gm_size;
- param.fence_sz = type->fence;
- param.weight = type->weight;
- param.resolution = type->resolution;
-
- /* XXX current param based on MB */
- param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
- param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
-
- mutex_lock(&gvt->lock);
- vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (!IS_ERR(vgpu)) {
- /* calculate left instance change for types */
- intel_gvt_update_vgpu_types(gvt);
- intel_gvt_update_reg_whitelist(vgpu);
- }
+out_unlock:
mutex_unlock(&gvt->lock);
-
- return vgpu;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1a9bd829fc7e..0b287a59dc2f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2157,10 +2157,18 @@
#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
0 : ((trans) - TRANSCODER_A + 1) * 8)
-#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
+#define TGL_PSR_MASK REG_GENMASK(2, 0)
+#define TGL_PSR_ERROR REG_BIT(2)
+#define TGL_PSR_POST_EXIT REG_BIT(1)
+#define TGL_PSR_PRE_ENTRY REG_BIT(0)
+#define EDP_PSR_MASK(trans) (TGL_PSR_MASK << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_ERROR(trans) (TGL_PSR_ERROR << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_POST_EXIT(trans) (TGL_PSR_POST_EXIT << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \
+ _EDP_PSR_TRANS_SHIFT(trans))
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 81e7e404a5fc..2ca6ab600bc9 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1014,7 +1014,8 @@ static const char *absolutes[ABS_CNT] = {
[ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
[ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
[ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "ToolWidth",
- [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
+ [ABS_VOLUME] = "Volume", [ABS_PROFILE] = "Profile",
+ [ABS_MISC] = "Misc",
[ABS_MT_TOUCH_MAJOR] = "MTMajor",
[ABS_MT_TOUCH_MINOR] = "MTMinor",
[ABS_MT_WIDTH_MAJOR] = "MTMajorW",
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index eca7afd366d6..9dc27e5d367a 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -431,34 +431,29 @@ struct vmbus_channel *relid2channel(u32 relid)
void vmbus_on_event(unsigned long data)
{
struct vmbus_channel *channel = (void *) data;
- unsigned long time_limit = jiffies + 2;
+ void (*callback_fn)(void *context);
trace_vmbus_on_event(channel);
hv_debug_delay_test(channel, INTERRUPT_DELAY);
- do {
- void (*callback_fn)(void *);
- /* A channel once created is persistent even when
- * there is no driver handling the device. An
- * unloading driver sets the onchannel_callback to NULL.
- */
- callback_fn = READ_ONCE(channel->onchannel_callback);
- if (unlikely(callback_fn == NULL))
- return;
-
- (*callback_fn)(channel->channel_callback_context);
+ /* A channel once created is persistent even when
+ * there is no driver handling the device. An
+ * unloading driver sets the onchannel_callback to NULL.
+ */
+ callback_fn = READ_ONCE(channel->onchannel_callback);
+ if (unlikely(!callback_fn))
+ return;
- if (channel->callback_mode != HV_CALL_BATCHED)
- return;
+ (*callback_fn)(channel->channel_callback_context);
- if (likely(hv_end_read(&channel->inbound) == 0))
- return;
+ if (channel->callback_mode != HV_CALL_BATCHED)
+ return;
- hv_begin_read(&channel->inbound);
- } while (likely(time_before(jiffies, time_limit)));
+ if (likely(hv_end_read(&channel->inbound) == 0))
+ return;
- /* The time limit (2 jiffies) has been reached */
+ hv_begin_read(&channel->inbound);
tasklet_schedule(&channel->callback_event);
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 7b9f3fc3adf7..8b2e413bf19c 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -46,8 +46,6 @@ struct vmbus_dynid {
static struct acpi_device *hv_acpi_dev;
-static struct completion probe_event;
-
static int hyperv_cpuhp_online;
static void *hv_panic_page;
@@ -1132,7 +1130,8 @@ void vmbus_on_msg_dpc(unsigned long data)
return;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
- memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
+ ctx->msg.header = msg_copy.header;
+ memcpy(&ctx->msg.payload, msg_copy.u.payload, payload_size);
/*
* The host can generate a rescind message while we
@@ -1573,7 +1572,7 @@ err_setup:
}
/**
- * __vmbus_child_driver_register() - Register a vmbus's driver
+ * __vmbus_driver_register() - Register a vmbus's driver
* @hv_driver: Pointer to driver structure you want to register
* @owner: owner module of the drv
* @mod_name: module name string
@@ -2052,7 +2051,7 @@ struct hv_device *vmbus_device_create(const guid_t *type,
child_device_obj->channel = channel;
guid_copy(&child_device_obj->dev_type, type);
guid_copy(&child_device_obj->dev_instance, instance);
- child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
+ child_device_obj->vendor_id = PCI_VENDOR_ID_MICROSOFT;
return child_device_obj;
}
@@ -2468,7 +2467,6 @@ static int vmbus_acpi_add(struct acpi_device *device)
ret_val = 0;
acpi_walk_err:
- complete(&probe_event);
if (ret_val)
vmbus_acpi_remove(device);
return ret_val;
@@ -2647,6 +2645,7 @@ static struct acpi_driver vmbus_acpi_driver = {
.remove = vmbus_acpi_remove,
},
.drv.pm = &vmbus_bus_pm,
+ .drv.probe_type = PROBE_FORCE_SYNCHRONOUS,
};
static void hv_kexec_handler(void)
@@ -2719,7 +2718,7 @@ static struct syscore_ops hv_synic_syscore_ops = {
static int __init hv_acpi_init(void)
{
- int ret, t;
+ int ret;
if (!hv_is_hyperv_initialized())
return -ENODEV;
@@ -2727,8 +2726,6 @@ static int __init hv_acpi_init(void)
if (hv_root_partition)
return 0;
- init_completion(&probe_event);
-
/*
* Get ACPI resources first.
*/
@@ -2737,9 +2734,8 @@ static int __init hv_acpi_init(void)
if (ret)
return ret;
- t = wait_for_completion_timeout(&probe_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
+ if (!hv_acpi_dev) {
+ ret = -ENODEV;
goto cleanup;
}
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 185dedfebbac..c64c381b69b7 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -244,6 +244,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
u32 command, irq_handled = 0;
struct i2c_client *slave = bus->slave;
u8 value;
+ int ret;
if (!slave)
return 0;
@@ -311,7 +312,13 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
break;
case ASPEED_I2C_SLAVE_WRITE_REQUESTED:
bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED;
- i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ /*
+ * Slave ACK's on this address phase already but as the backend driver
+ * returns an errno, the bus driver should nack the next incoming byte.
+ */
+ if (ret < 0)
+ writel(ASPEED_I2CD_M_S_RX_CMD_LAST, bus->base + ASPEED_I2C_CMD_REG);
break;
case ASPEED_I2C_SLAVE_WRITE_RECEIVED:
i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 70b80e710990..4d3a3b464ecd 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -126,8 +126,9 @@
* status codes
*/
#define STATUS_IDLE 0x0
-#define STATUS_WRITE_IN_PROGRESS 0x1
-#define STATUS_READ_IN_PROGRESS 0x2
+#define STATUS_ACTIVE 0x1
+#define STATUS_WRITE_IN_PROGRESS 0x2
+#define STATUS_READ_IN_PROGRESS 0x4
/*
* operation modes
@@ -334,12 +335,14 @@ void i2c_dw_disable_int(struct dw_i2c_dev *dev);
static inline void __i2c_dw_enable(struct dw_i2c_dev *dev)
{
+ dev->status |= STATUS_ACTIVE;
regmap_write(dev->map, DW_IC_ENABLE, 1);
}
static inline void __i2c_dw_disable_nowait(struct dw_i2c_dev *dev)
{
regmap_write(dev->map, DW_IC_ENABLE, 0);
+ dev->status &= ~STATUS_ACTIVE;
}
void __i2c_dw_disable(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 44a94b225ed8..dc3c5a15a95b 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -716,6 +716,19 @@ static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev)
u32 stat;
stat = i2c_dw_read_clear_intrbits(dev);
+
+ if (!(dev->status & STATUS_ACTIVE)) {
+ /*
+ * Unexpected interrupt in driver point of view. State
+ * variables are either unset or stale so acknowledge and
+ * disable interrupts for suppressing further interrupts if
+ * interrupt really came from this HW (E.g. firmware has left
+ * the HW active).
+ */
+ regmap_write(dev->map, DW_IC_INTR_MASK, 0);
+ return 0;
+ }
+
if (stat & DW_IC_INTR_TX_ABRT) {
dev->cmd_err |= DW_IC_ERR_TX_ABRT;
dev->status = STATUS_IDLE;
diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
index f5342201eb6b..09af75921147 100644
--- a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
@@ -708,7 +708,7 @@ static void pci1xxxx_i2c_init(struct pci1xxxx_i2c *i2c)
void __iomem *p2 = i2c->i2c_base + SMBUS_STATUS_REG_OFF;
void __iomem *p1 = i2c->i2c_base + SMB_GPR_REG;
u8 regval;
- u8 ret;
+ int ret;
ret = set_sys_lock(i2c);
if (ret == -EPERM) {
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index ea48e6a9cfca..87739fb4388b 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -807,6 +807,7 @@ static const struct cci_data cci_v2_data = {
};
static const struct of_device_id cci_dt_match[] = {
+ { .compatible = "qcom,msm8226-cci", .data = &cci_v1_data},
{ .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
{ .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index da6568a20177..4dd777cc0c89 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -137,6 +137,11 @@ static const struct acpi_device_id i2c_acpi_ignored_device_ids[] = {
{}
};
+struct i2c_acpi_irq_context {
+ int irq;
+ bool wake_capable;
+};
+
static int i2c_acpi_do_lookup(struct acpi_device *adev,
struct i2c_acpi_lookup *lookup)
{
@@ -168,13 +173,19 @@ static int i2c_acpi_do_lookup(struct acpi_device *adev,
return 0;
}
-static int i2c_acpi_add_resource(struct acpi_resource *ares, void *data)
+static int i2c_acpi_add_irq_resource(struct acpi_resource *ares, void *data)
{
- int *irq = data;
+ struct i2c_acpi_irq_context *irq_ctx = data;
struct resource r;
- if (*irq <= 0 && acpi_dev_resource_interrupt(ares, 0, &r))
- *irq = i2c_dev_irq_from_resources(&r, 1);
+ if (irq_ctx->irq > 0)
+ return 1;
+
+ if (!acpi_dev_resource_interrupt(ares, 0, &r))
+ return 1;
+
+ irq_ctx->irq = i2c_dev_irq_from_resources(&r, 1);
+ irq_ctx->wake_capable = r.flags & IORESOURCE_IRQ_WAKECAPABLE;
return 1; /* No need to add resource to the list */
}
@@ -182,31 +193,40 @@ static int i2c_acpi_add_resource(struct acpi_resource *ares, void *data)
/**
* i2c_acpi_get_irq - get device IRQ number from ACPI
* @client: Pointer to the I2C client device
+ * @wake_capable: Set to true if the IRQ is wake capable
*
* Find the IRQ number used by a specific client device.
*
* Return: The IRQ number or an error code.
*/
-int i2c_acpi_get_irq(struct i2c_client *client)
+int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable)
{
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
struct list_head resource_list;
- int irq = -ENOENT;
+ struct i2c_acpi_irq_context irq_ctx = {
+ .irq = -ENOENT,
+ };
int ret;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
- i2c_acpi_add_resource, &irq);
+ i2c_acpi_add_irq_resource, &irq_ctx);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&resource_list);
- if (irq == -ENOENT)
- irq = acpi_dev_gpio_irq_get(adev, 0);
+ if (irq_ctx.irq == -ENOENT)
+ irq_ctx.irq = acpi_dev_gpio_irq_wake_get(adev, 0, &irq_ctx.wake_capable);
+
+ if (irq_ctx.irq < 0)
+ return irq_ctx.irq;
+
+ if (wake_capable)
+ *wake_capable = irq_ctx.wake_capable;
- return irq;
+ return irq_ctx.irq;
}
static int i2c_acpi_get_info(struct acpi_device *adev,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 8c7e3494ca5f..b4edf10e8fd0 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -487,7 +487,11 @@ static int i2c_device_probe(struct device *dev)
if (irq == -EINVAL || irq == -ENODATA)
irq = of_irq_get(dev->of_node, 0);
} else if (ACPI_COMPANION(dev)) {
- irq = i2c_acpi_get_irq(client);
+ bool wake_capable;
+
+ irq = i2c_acpi_get_irq(client, &wake_capable);
+ if (irq > 0 && wake_capable)
+ client->flags |= I2C_CLIENT_WAKE;
}
if (irq == -EPROBE_DEFER) {
status = irq;
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 87e2c914f1c5..1247e6e6e975 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -61,11 +61,11 @@ static inline int __i2c_check_suspended(struct i2c_adapter *adap)
#ifdef CONFIG_ACPI
void i2c_acpi_register_devices(struct i2c_adapter *adap);
-int i2c_acpi_get_irq(struct i2c_client *client);
+int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable);
#else /* CONFIG_ACPI */
static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
-static inline int i2c_acpi_get_irq(struct i2c_client *client)
+static inline int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable)
{
return 0;
}
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index fa8d1a466014..16231fe080b0 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -6,9 +6,6 @@
* Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru>
*/
-/*
- */
-
/* #define DEBUG */
#include <linux/input.h>
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 8229a9006917..c321cdabd214 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -6,9 +6,6 @@
* Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru>
*/
-/*
- */
-
/* #define DEBUG */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index 11bbd1edfdb4..76ce41e58df0 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -7,9 +7,6 @@
* EMU10k1 - SB Live / Audigy - gameport driver for Linux
*/
-/*
- */
-
#include <asm/io.h>
#include <linux/module.h>
diff --git a/drivers/input/gameport/lightning.c b/drivers/input/gameport/lightning.c
index 87eeb4b5b5b5..2ce717b25a84 100644
--- a/drivers/input/gameport/lightning.c
+++ b/drivers/input/gameport/lightning.c
@@ -7,9 +7,6 @@
* PDPI Lightning 4 gamecard driver for Linux.
*/
-/*
- */
-
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/errno.h>
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index 2f80b7f1b736..91a8cd346e9b 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -8,9 +8,6 @@
* NS558 based standard IBM game port driver for Linux
*/
-/*
- */
-
#include <asm/io.h>
#include <linux/module.h>
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index b45ddb457002..5824bca02e5a 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -746,7 +746,7 @@ static void joydev_cleanup(struct joydev *joydev)
}
/*
- * These codes are copied from from hid-ids.h, unfortunately there is no common
+ * These codes are copied from hid-ids.h, unfortunately there is no common
* usb_ids/bt_ids.h header.
*/
#define USB_VENDOR_ID_SONY 0x054c
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 68475fad177c..fd1827baf27c 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -7,9 +7,6 @@
* FP-Gaming Assassin 3D joystick driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/adc-joystick.c b/drivers/input/joystick/adc-joystick.c
index e0cfdc84763f..c0deff5d4282 100644
--- a/drivers/input/joystick/adc-joystick.c
+++ b/drivers/input/joystick/adc-joystick.c
@@ -26,8 +26,23 @@ struct adc_joystick {
struct adc_joystick_axis *axes;
struct iio_channel *chans;
int num_chans;
+ bool polled;
};
+static void adc_joystick_poll(struct input_dev *input)
+{
+ struct adc_joystick *joy = input_get_drvdata(input);
+ int i, val, ret;
+
+ for (i = 0; i < joy->num_chans; i++) {
+ ret = iio_read_channel_raw(&joy->chans[i], &val);
+ if (ret < 0)
+ return;
+ input_report_abs(input, joy->axes[i].code, val);
+ }
+ input_sync(input);
+}
+
static int adc_joystick_handle(const void *data, void *private)
{
struct adc_joystick *joy = private;
@@ -179,6 +194,7 @@ static int adc_joystick_probe(struct platform_device *pdev)
int error;
int bits;
int i;
+ unsigned int poll_interval;
joy = devm_kzalloc(dev, sizeof(*joy), GFP_KERNEL);
if (!joy)
@@ -192,8 +208,25 @@ static int adc_joystick_probe(struct platform_device *pdev)
return error;
}
- /* Count how many channels we got. NULL terminated. */
+ error = device_property_read_u32(dev, "poll-interval", &poll_interval);
+ if (error) {
+ /* -EINVAL means the property is absent. */
+ if (error != -EINVAL)
+ return error;
+ } else if (poll_interval == 0) {
+ dev_err(dev, "Unable to get poll-interval\n");
+ return -EINVAL;
+ } else {
+ joy->polled = true;
+ }
+
+ /*
+ * Count how many channels we got. NULL terminated.
+ * Do not check the storage size if using polling.
+ */
for (i = 0; joy->chans[i].indio_dev; i++) {
+ if (joy->polled)
+ continue;
bits = joy->chans[i].channel->scan_type.storagebits;
if (!bits || bits > 16) {
dev_err(dev, "Unsupported channel storage size\n");
@@ -215,23 +248,31 @@ static int adc_joystick_probe(struct platform_device *pdev)
joy->input = input;
input->name = pdev->name;
input->id.bustype = BUS_HOST;
- input->open = adc_joystick_open;
- input->close = adc_joystick_close;
error = adc_joystick_set_axes(dev, joy);
if (error)
return error;
- joy->buffer = iio_channel_get_all_cb(dev, adc_joystick_handle, joy);
- if (IS_ERR(joy->buffer)) {
- dev_err(dev, "Unable to allocate callback buffer\n");
- return PTR_ERR(joy->buffer);
- }
+ if (joy->polled) {
+ input_setup_polling(input, adc_joystick_poll);
+ input_set_poll_interval(input, poll_interval);
+ } else {
+ input->open = adc_joystick_open;
+ input->close = adc_joystick_close;
+
+ joy->buffer = iio_channel_get_all_cb(dev, adc_joystick_handle,
+ joy);
+ if (IS_ERR(joy->buffer)) {
+ dev_err(dev, "Unable to allocate callback buffer\n");
+ return PTR_ERR(joy->buffer);
+ }
- error = devm_add_action_or_reset(dev, adc_joystick_cleanup, joy->buffer);
- if (error) {
- dev_err(dev, "Unable to add action\n");
- return error;
+ error = devm_add_action_or_reset(dev, adc_joystick_cleanup,
+ joy->buffer);
+ if (error) {
+ dev_err(dev, "Unable to add action\n");
+ return error;
+ }
}
input_set_drvdata(input, joy);
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index e10d57bf1180..f1a720be458b 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -7,9 +7,6 @@
* Logitech ADI joystick family driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index 12456a196dc7..3752dc2a2086 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -7,9 +7,6 @@
* Driver for Amiga joysticks for Linux/m68k
*/
-/*
- */
-
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3088c5b829f0..0c9e172a9818 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -7,9 +7,6 @@
* Analog joystick and gamepad driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index 41e1936a847b..7ff78c9388bd 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -7,9 +7,6 @@
* Creative Labs Blaster GamePad Cobra driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index 434d265fa2e8..4fba28b1a1e7 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -10,9 +10,6 @@
* Atari, Amstrad, Commodore, Amiga, Sega, etc. joystick driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index d37645e496ff..41d5dac05448 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -11,9 +11,6 @@
* Raphael Assenat
*/
-/*
- */
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 920feba967f6..abefbd1484df 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -7,9 +7,6 @@
* Genius Flight 2000 joystick driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index fe798bc87950..0e86b269a90e 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -7,9 +7,6 @@
* Gravis/Kensington GrIP protocol joystick and gamepad driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index 8eeacdb007c1..205eb6f8b84d 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -7,9 +7,6 @@
* Guillemot Digital Interface Protocol driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index ca22d84e5c84..03a9f0829f7e 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -10,9 +10,6 @@
* InterAct digital gamepad/joystick driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/joydump.c b/drivers/input/joystick/joydump.c
index 70f63f9550e7..865652a7821d 100644
--- a/drivers/input/joystick/joydump.c
+++ b/drivers/input/joystick/joydump.c
@@ -8,9 +8,6 @@
* out of the joystick port into the syslog ...
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/gameport.h>
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index edb8e1982e26..017ef8c6170b 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -7,9 +7,6 @@
* Magellan and Space Mouse 6dof controller driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 8e9672deb1eb..7282301c3ae7 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -7,9 +7,6 @@
* Microsoft SideWinder joystick family driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index a85a4f33aea8..fa8ec533cd69 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -11,9 +11,6 @@
* SpaceTec SpaceBall 2003/3003/4000 FLX driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index 557171483256..dbbc69f17c89 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -10,9 +10,6 @@
* SpaceTec SpaceOrb 360 and Avenger 6dof controller driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index c20425f52bd8..530de468cb61 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -8,9 +8,6 @@
* Gravis Stinger gamepad driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index 7416de84b955..93562ecc0ca1 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -10,9 +10,6 @@
* ThrustMaster DirectConnect (BSP) joystick family driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index dfe7a2cacce2..dfb9c684651f 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -10,9 +10,6 @@
* TurboGraFX parallel port interface driver for Linux.
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/parport.h>
#include <linux/input.h>
diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c
index 174c69a188fb..9b6792ac27f1 100644
--- a/drivers/input/joystick/twidjoy.c
+++ b/drivers/input/joystick/twidjoy.c
@@ -32,9 +32,6 @@
* Arndt Schoenewald <arndt@quelltext.com>
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index 42bdbc28d95d..f66bddf145c2 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -7,9 +7,6 @@
* Logitech WingMan Warrior joystick driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 18190b529bca..2959d80f7fdb 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -61,6 +61,7 @@
* Later changes can be tracked in SCM.
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/rcupdate.h>
@@ -80,6 +81,9 @@
#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
#define MAP_STICKS_TO_NULL (1 << 2)
#define MAP_SELECT_BUTTON (1 << 3)
+#define MAP_PADDLES (1 << 4)
+#define MAP_PROFILE_BUTTON (1 << 5)
+
#define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \
MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL)
@@ -89,6 +93,17 @@
#define XTYPE_XBOXONE 3
#define XTYPE_UNKNOWN 4
+/* Send power-off packet to xpad360w after holding the mode button for this many
+ * seconds
+ */
+#define XPAD360W_POWEROFF_TIMEOUT 5
+
+#define PKT_XB 0
+#define PKT_XBE1 1
+#define PKT_XBE2_FW_OLD 2
+#define PKT_XBE2_FW_5_EARLY 3
+#define PKT_XBE2_FW_5_11 4
+
static bool dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
@@ -111,8 +126,11 @@ static const struct xpad_device {
char *name;
u8 mapping;
u8 xtype;
+ u8 packet_type;
} xpad_device[] = {
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
+ { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
+ { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -128,9 +146,11 @@ static const struct xpad_device {
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
- { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", MAP_PADDLES, XTYPE_XBOXONE },
+ { 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE },
{ 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ { 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE },
{ 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
@@ -244,6 +264,7 @@ static const struct xpad_device {
{ 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
@@ -260,6 +281,7 @@ static const struct xpad_device {
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
@@ -325,6 +347,7 @@ static const struct xpad_device {
{ 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
@@ -334,6 +357,14 @@ static const struct xpad_device {
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
+ { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
@@ -390,6 +421,13 @@ static const signed short xpad_abs_triggers[] = {
-1
};
+/* used when the controller has extra paddle buttons */
+static const signed short xpad_btn_paddles[] = {
+ BTN_TRIGGER_HAPPY5, BTN_TRIGGER_HAPPY6, /* paddle upper right, lower right */
+ BTN_TRIGGER_HAPPY7, BTN_TRIGGER_HAPPY8, /* paddle upper left, lower left */
+ -1 /* terminating entry */
+};
+
/*
* Xbox 360 has a vendor-specific class, so we cannot match it with only
* USB_INTERFACE_INFO (also specifically refused by USB subsystem), so we
@@ -419,6 +457,7 @@ static const signed short xpad_abs_triggers[] = {
static const struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
+ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
@@ -429,6 +468,7 @@ static const struct usb_device_id xpad_table[] = {
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */
+ XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
@@ -450,8 +490,12 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
+ XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
+ XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
{ }
};
@@ -473,13 +517,52 @@ struct xboxone_init_packet {
.len = ARRAY_SIZE(_data), \
}
+/*
+ * starting with xbox one, the game input protocol is used
+ * magic numbers are taken from
+ * - https://github.com/xpadneo/gip-dissector/blob/main/src/gip-dissector.lua
+ * - https://github.com/medusalix/xone/blob/master/bus/protocol.c
+ */
+#define GIP_CMD_ACK 0x01
+#define GIP_CMD_IDENTIFY 0x04
+#define GIP_CMD_POWER 0x05
+#define GIP_CMD_AUTHENTICATE 0x06
+#define GIP_CMD_VIRTUAL_KEY 0x07
+#define GIP_CMD_RUMBLE 0x09
+#define GIP_CMD_LED 0x0a
+#define GIP_CMD_FIRMWARE 0x0c
+#define GIP_CMD_INPUT 0x20
+
+#define GIP_SEQ0 0x00
+
+#define GIP_OPT_ACK 0x10
+#define GIP_OPT_INTERNAL 0x20
+
+/*
+ * length of the command payload encoded with
+ * https://en.wikipedia.org/wiki/LEB128
+ * which is a no-op for N < 128
+ */
+#define GIP_PL_LEN(N) (N)
+
+/*
+ * payload specific defines
+ */
+#define GIP_PWR_ON 0x00
+#define GIP_LED_ON 0x01
+
+#define GIP_MOTOR_R BIT(0)
+#define GIP_MOTOR_L BIT(1)
+#define GIP_MOTOR_RT BIT(2)
+#define GIP_MOTOR_LT BIT(3)
+#define GIP_MOTOR_ALL (GIP_MOTOR_R | GIP_MOTOR_L | GIP_MOTOR_RT | GIP_MOTOR_LT)
/*
* This packet is required for all Xbox One pads with 2015
* or later firmware installed (or present from the factory).
*/
-static const u8 xboxone_fw2015_init[] = {
- 0x05, 0x20, 0x00, 0x01, 0x00
+static const u8 xboxone_power_on[] = {
+ GIP_CMD_POWER, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(1), GIP_PWR_ON
};
/*
@@ -489,7 +572,16 @@ static const u8 xboxone_fw2015_init[] = {
* Bluetooth mode.
*/
static const u8 xboxone_s_init[] = {
- 0x05, 0x20, 0x00, 0x0f, 0x06
+ GIP_CMD_POWER, GIP_OPT_INTERNAL, GIP_SEQ0, 0x0f, 0x06
+};
+
+/*
+ * This packet is required to get additional input data
+ * from Xbox One Elite Series 2 (0x045e:0x0b00) pads.
+ * We mostly do this right now to get paddle data
+ */
+static const u8 extra_input_packet_init[] = {
+ 0x4d, 0x10, 0x01, 0x02, 0x07, 0x00
};
/*
@@ -497,9 +589,9 @@ static const u8 xboxone_s_init[] = {
* (0x0e6f:0x0165) to finish initialization and for Hori pads
* (0x0f0d:0x0067) to make the analog sticks work.
*/
-static const u8 xboxone_hori_init[] = {
- 0x01, 0x20, 0x00, 0x09, 0x00, 0x04, 0x20, 0x3a,
- 0x00, 0x00, 0x00, 0x80, 0x00
+static const u8 xboxone_hori_ack_id[] = {
+ GIP_CMD_ACK, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(9),
+ 0x00, GIP_CMD_IDENTIFY, GIP_OPT_INTERNAL, 0x3a, 0x00, 0x00, 0x00, 0x80, 0x00
};
/*
@@ -507,8 +599,8 @@ static const u8 xboxone_hori_init[] = {
* sending input reports. These pads include: (0x0e6f:0x02ab),
* (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_init1[] = {
- 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
+static const u8 xboxone_pdp_led_on[] = {
+ GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14
};
/*
@@ -516,8 +608,8 @@ static const u8 xboxone_pdp_init1[] = {
* sending input reports. These pads include: (0x0e6f:0x02ab),
* (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_init2[] = {
- 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
+static const u8 xboxone_pdp_auth[] = {
+ GIP_CMD_AUTHENTICATE, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(2), 0x01, 0x00
};
/*
@@ -525,8 +617,8 @@ static const u8 xboxone_pdp_init2[] = {
* sending input reports. One of those pads is (0x24c6:0x543a).
*/
static const u8 xboxone_rumblebegin_init[] = {
- 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
- 0x1D, 0x1D, 0xFF, 0x00, 0x00
+ GIP_CMD_RUMBLE, 0x00, GIP_SEQ0, GIP_PL_LEN(9),
+ 0x00, GIP_MOTOR_ALL, 0x00, 0x00, 0x1D, 0x1D, 0xFF, 0x00, 0x00
};
/*
@@ -536,8 +628,8 @@ static const u8 xboxone_rumblebegin_init[] = {
* spin up to enough speed to actually vibrate the gamepad.
*/
static const u8 xboxone_rumbleend_init[] = {
- 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00
+ GIP_CMD_RUMBLE, 0x00, GIP_SEQ0, GIP_PL_LEN(9),
+ 0x00, GIP_MOTOR_ALL, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/*
@@ -547,13 +639,14 @@ static const u8 xboxone_rumbleend_init[] = {
* packet is going to be sent.
*/
static const struct xboxone_init_packet xboxone_init_packets[] = {
- XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
- XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
- XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+ XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_ack_id),
+ XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_ack_id),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_power_on),
XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
+ XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
+ XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
+ XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
@@ -608,14 +701,17 @@ struct usb_xpad {
int mapping; /* map d-pad to buttons or to axes */
int xtype; /* type of xbox device */
+ int packet_type; /* type of the extended packet */
int pad_nr; /* the order x360 pads were attached */
const char *name; /* name of the device */
struct work_struct work; /* init/remove device from callback */
+ time64_t mode_btn_down_ts;
};
static int xpad_init_input(struct usb_xpad *xpad);
static void xpad_deinit_input(struct usb_xpad *xpad);
static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num);
+static void xpad360w_poweroff_controller(struct usb_xpad *xpad);
/*
* xpad_process_packet
@@ -656,10 +752,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & 0x04);
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & 0x08);
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & 0x01);
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & 0x02);
+ input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & BIT(3));
+ input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & BIT(1));
} else {
input_report_abs(dev, ABS_HAT0X,
!!(data[2] & 0x08) - !!(data[2] & 0x04));
@@ -668,10 +764,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
}
/* start/back buttons and stick press left/right */
- input_report_key(dev, BTN_START, data[2] & 0x10);
- input_report_key(dev, BTN_SELECT, data[2] & 0x20);
- input_report_key(dev, BTN_THUMBL, data[2] & 0x40);
- input_report_key(dev, BTN_THUMBR, data[2] & 0x80);
+ input_report_key(dev, BTN_START, data[2] & BIT(4));
+ input_report_key(dev, BTN_SELECT, data[2] & BIT(5));
+ input_report_key(dev, BTN_THUMBL, data[2] & BIT(6));
+ input_report_key(dev, BTN_THUMBR, data[2] & BIT(7));
/* "analog" buttons A, B, X, Y */
input_report_key(dev, BTN_A, data[4]);
@@ -683,6 +779,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
input_report_key(dev, BTN_C, data[8]);
input_report_key(dev, BTN_Z, data[9]);
+ /* Profile button has a value of 0-3, so it is reported as an axis */
+ if (xpad->mapping & MAP_PROFILE_BUTTON)
+ input_report_abs(dev, ABS_PROFILE, data[34]);
+
input_sync(dev);
}
@@ -706,10 +806,10 @@ static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & 0x04);
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & 0x08);
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & 0x01);
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & 0x02);
+ input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & BIT(3));
+ input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & BIT(1));
}
/*
@@ -727,21 +827,21 @@ static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
}
/* start/back buttons */
- input_report_key(dev, BTN_START, data[2] & 0x10);
- input_report_key(dev, BTN_SELECT, data[2] & 0x20);
+ input_report_key(dev, BTN_START, data[2] & BIT(4));
+ input_report_key(dev, BTN_SELECT, data[2] & BIT(5));
/* stick press left/right */
- input_report_key(dev, BTN_THUMBL, data[2] & 0x40);
- input_report_key(dev, BTN_THUMBR, data[2] & 0x80);
+ input_report_key(dev, BTN_THUMBL, data[2] & BIT(6));
+ input_report_key(dev, BTN_THUMBR, data[2] & BIT(7));
/* buttons A,B,X,Y,TL,TR and MODE */
- input_report_key(dev, BTN_A, data[3] & 0x10);
- input_report_key(dev, BTN_B, data[3] & 0x20);
- input_report_key(dev, BTN_X, data[3] & 0x40);
- input_report_key(dev, BTN_Y, data[3] & 0x80);
- input_report_key(dev, BTN_TL, data[3] & 0x01);
- input_report_key(dev, BTN_TR, data[3] & 0x02);
- input_report_key(dev, BTN_MODE, data[3] & 0x04);
+ input_report_key(dev, BTN_A, data[3] & BIT(4));
+ input_report_key(dev, BTN_B, data[3] & BIT(5));
+ input_report_key(dev, BTN_X, data[3] & BIT(6));
+ input_report_key(dev, BTN_Y, data[3] & BIT(7));
+ input_report_key(dev, BTN_TL, data[3] & BIT(0));
+ input_report_key(dev, BTN_TR, data[3] & BIT(1));
+ input_report_key(dev, BTN_MODE, data[3] & BIT(2));
if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
/* left stick */
@@ -767,6 +867,23 @@ static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
}
input_sync(dev);
+
+ /* XBOX360W controllers can't be turned off without driver assistance */
+ if (xpad->xtype == XTYPE_XBOX360W) {
+ if (xpad->mode_btn_down_ts > 0 && xpad->pad_present &&
+ ((ktime_get_seconds() - xpad->mode_btn_down_ts) >=
+ XPAD360W_POWEROFF_TIMEOUT)) {
+ xpad360w_poweroff_controller(xpad);
+ xpad->mode_btn_down_ts = 0;
+ return;
+ }
+
+ /* mode button down/up */
+ if (data[3] & BIT(2))
+ xpad->mode_btn_down_ts = ktime_get_seconds();
+ else
+ xpad->mode_btn_down_ts = 0;
+ }
}
static void xpad_presence_work(struct work_struct *work)
@@ -846,87 +963,154 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
{
struct input_dev *dev = xpad->dev;
+ bool do_sync = false;
/* the xbox button has its own special report */
- if (data[0] == 0X07) {
+ if (data[0] == GIP_CMD_VIRTUAL_KEY) {
/*
* The Xbox One S controller requires these reports to be
* acked otherwise it continues sending them forever and
* won't report further mode button events.
*/
- if (data[1] == 0x30)
+ if (data[1] == (GIP_OPT_ACK | GIP_OPT_INTERNAL))
xpadone_ack_mode_report(xpad, data[2]);
- input_report_key(dev, BTN_MODE, data[4] & 0x01);
+ input_report_key(dev, BTN_MODE, data[4] & GENMASK(1, 0));
input_sync(dev);
- return;
- }
- /* check invalid packet */
- else if (data[0] != 0X20)
- return;
- /* menu/view buttons */
- input_report_key(dev, BTN_START, data[4] & 0x04);
- input_report_key(dev, BTN_SELECT, data[4] & 0x08);
- if (xpad->mapping & MAP_SELECT_BUTTON)
- input_report_key(dev, KEY_RECORD, data[22] & 0x01);
+ do_sync = true;
+ } else if (data[0] == GIP_CMD_FIRMWARE) {
+ /* Some packet formats force us to use this separate to poll paddle inputs */
+ if (xpad->packet_type == PKT_XBE2_FW_5_11) {
+ /* Mute paddles if controller is in a custom profile slot
+ * Checked by looking at the active profile slot to
+ * verify it's the default slot
+ */
+ if (data[19] != 0)
+ data[18] = 0;
- /* buttons A,B,X,Y */
- input_report_key(dev, BTN_A, data[4] & 0x10);
- input_report_key(dev, BTN_B, data[4] & 0x20);
- input_report_key(dev, BTN_X, data[4] & 0x40);
- input_report_key(dev, BTN_Y, data[4] & 0x80);
+ /* Elite Series 2 split packet paddle bits */
+ input_report_key(dev, BTN_TRIGGER_HAPPY5, data[18] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY6, data[18] & BIT(1));
+ input_report_key(dev, BTN_TRIGGER_HAPPY7, data[18] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY8, data[18] & BIT(3));
- /* digital pad */
- if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
- /* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[5] & 0x04);
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[5] & 0x08);
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[5] & 0x01);
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[5] & 0x02);
- } else {
- input_report_abs(dev, ABS_HAT0X,
- !!(data[5] & 0x08) - !!(data[5] & 0x04));
- input_report_abs(dev, ABS_HAT0Y,
- !!(data[5] & 0x02) - !!(data[5] & 0x01));
- }
-
- /* TL/TR */
- input_report_key(dev, BTN_TL, data[5] & 0x10);
- input_report_key(dev, BTN_TR, data[5] & 0x20);
+ do_sync = true;
+ }
+ } else if (data[0] == GIP_CMD_INPUT) { /* The main valid packet type for inputs */
+ /* menu/view buttons */
+ input_report_key(dev, BTN_START, data[4] & BIT(2));
+ input_report_key(dev, BTN_SELECT, data[4] & BIT(3));
+ if (xpad->mapping & MAP_SELECT_BUTTON)
+ input_report_key(dev, KEY_RECORD, data[22] & BIT(0));
+
+ /* buttons A,B,X,Y */
+ input_report_key(dev, BTN_A, data[4] & BIT(4));
+ input_report_key(dev, BTN_B, data[4] & BIT(5));
+ input_report_key(dev, BTN_X, data[4] & BIT(6));
+ input_report_key(dev, BTN_Y, data[4] & BIT(7));
+
+ /* digital pad */
+ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
+ /* dpad as buttons (left, right, up, down) */
+ input_report_key(dev, BTN_TRIGGER_HAPPY1, data[5] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY2, data[5] & BIT(3));
+ input_report_key(dev, BTN_TRIGGER_HAPPY3, data[5] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY4, data[5] & BIT(1));
+ } else {
+ input_report_abs(dev, ABS_HAT0X,
+ !!(data[5] & 0x08) - !!(data[5] & 0x04));
+ input_report_abs(dev, ABS_HAT0Y,
+ !!(data[5] & 0x02) - !!(data[5] & 0x01));
+ }
- /* stick press left/right */
- input_report_key(dev, BTN_THUMBL, data[5] & 0x40);
- input_report_key(dev, BTN_THUMBR, data[5] & 0x80);
+ /* TL/TR */
+ input_report_key(dev, BTN_TL, data[5] & BIT(4));
+ input_report_key(dev, BTN_TR, data[5] & BIT(5));
+
+ /* stick press left/right */
+ input_report_key(dev, BTN_THUMBL, data[5] & BIT(6));
+ input_report_key(dev, BTN_THUMBR, data[5] & BIT(7));
+
+ if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
+ /* left stick */
+ input_report_abs(dev, ABS_X,
+ (__s16) le16_to_cpup((__le16 *)(data + 10)));
+ input_report_abs(dev, ABS_Y,
+ ~(__s16) le16_to_cpup((__le16 *)(data + 12)));
+
+ /* right stick */
+ input_report_abs(dev, ABS_RX,
+ (__s16) le16_to_cpup((__le16 *)(data + 14)));
+ input_report_abs(dev, ABS_RY,
+ ~(__s16) le16_to_cpup((__le16 *)(data + 16)));
+ }
- if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
- /* left stick */
- input_report_abs(dev, ABS_X,
- (__s16) le16_to_cpup((__le16 *)(data + 10)));
- input_report_abs(dev, ABS_Y,
- ~(__s16) le16_to_cpup((__le16 *)(data + 12)));
+ /* triggers left/right */
+ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
+ input_report_key(dev, BTN_TL2,
+ (__u16) le16_to_cpup((__le16 *)(data + 6)));
+ input_report_key(dev, BTN_TR2,
+ (__u16) le16_to_cpup((__le16 *)(data + 8)));
+ } else {
+ input_report_abs(dev, ABS_Z,
+ (__u16) le16_to_cpup((__le16 *)(data + 6)));
+ input_report_abs(dev, ABS_RZ,
+ (__u16) le16_to_cpup((__le16 *)(data + 8)));
+ }
- /* right stick */
- input_report_abs(dev, ABS_RX,
- (__s16) le16_to_cpup((__le16 *)(data + 14)));
- input_report_abs(dev, ABS_RY,
- ~(__s16) le16_to_cpup((__le16 *)(data + 16)));
- }
+ /* paddle handling */
+ /* based on SDL's SDL_hidapi_xboxone.c */
+ if (xpad->mapping & MAP_PADDLES) {
+ if (xpad->packet_type == PKT_XBE1) {
+ /* Mute paddles if controller has a custom mapping applied.
+ * Checked by comparing the current mapping
+ * config against the factory mapping config
+ */
+ if (memcmp(&data[4], &data[18], 2) != 0)
+ data[32] = 0;
+
+ /* OG Elite Series Controller paddle bits */
+ input_report_key(dev, BTN_TRIGGER_HAPPY5, data[32] & BIT(1));
+ input_report_key(dev, BTN_TRIGGER_HAPPY6, data[32] & BIT(3));
+ input_report_key(dev, BTN_TRIGGER_HAPPY7, data[32] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY8, data[32] & BIT(2));
+ } else if (xpad->packet_type == PKT_XBE2_FW_OLD) {
+ /* Mute paddles if controller has a custom mapping applied.
+ * Checked by comparing the current mapping
+ * config against the factory mapping config
+ */
+ if (data[19] != 0)
+ data[18] = 0;
+
+ /* Elite Series 2 4.x firmware paddle bits */
+ input_report_key(dev, BTN_TRIGGER_HAPPY5, data[18] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY6, data[18] & BIT(1));
+ input_report_key(dev, BTN_TRIGGER_HAPPY7, data[18] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY8, data[18] & BIT(3));
+ } else if (xpad->packet_type == PKT_XBE2_FW_5_EARLY) {
+ /* Mute paddles if controller has a custom mapping applied.
+ * Checked by comparing the current mapping
+ * config against the factory mapping config
+ */
+ if (data[23] != 0)
+ data[22] = 0;
+
+ /* Elite Series 2 5.x firmware paddle bits
+ * (before the packet was split)
+ */
+ input_report_key(dev, BTN_TRIGGER_HAPPY5, data[22] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY6, data[22] & BIT(1));
+ input_report_key(dev, BTN_TRIGGER_HAPPY7, data[22] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY8, data[22] & BIT(3));
+ }
+ }
- /* triggers left/right */
- if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
- input_report_key(dev, BTN_TL2,
- (__u16) le16_to_cpup((__le16 *)(data + 6)));
- input_report_key(dev, BTN_TR2,
- (__u16) le16_to_cpup((__le16 *)(data + 8)));
- } else {
- input_report_abs(dev, ABS_Z,
- (__u16) le16_to_cpup((__le16 *)(data + 6)));
- input_report_abs(dev, ABS_RZ,
- (__u16) le16_to_cpup((__le16 *)(data + 8)));
+ do_sync = true;
}
- input_sync(dev);
+ if (do_sync)
+ input_sync(dev);
}
static void xpad_irq_in(struct urb *urb)
@@ -1226,8 +1410,8 @@ static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num)
struct xpad_output_packet *packet =
&xpad->out_packets[XPAD_OUT_CMD_IDX];
static const u8 mode_report_ack[] = {
- 0x01, 0x20, 0x00, 0x09, 0x00, 0x07, 0x20, 0x02,
- 0x00, 0x00, 0x00, 0x00, 0x00
+ GIP_CMD_ACK, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(9),
+ 0x00, GIP_CMD_VIRTUAL_KEY, GIP_OPT_INTERNAL, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00
};
spin_lock_irqsave(&xpad->odata_lock, flags);
@@ -1305,14 +1489,14 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
break;
case XTYPE_XBOXONE:
- packet->data[0] = 0x09; /* activate rumble */
+ packet->data[0] = GIP_CMD_RUMBLE; /* activate rumble */
packet->data[1] = 0x00;
packet->data[2] = xpad->odata_serial++;
- packet->data[3] = 0x09;
+ packet->data[3] = GIP_PL_LEN(9);
packet->data[4] = 0x00;
- packet->data[5] = 0x0F;
- packet->data[6] = 0x00;
- packet->data[7] = 0x00;
+ packet->data[5] = GIP_MOTOR_ALL;
+ packet->data[6] = 0x00; /* left trigger */
+ packet->data[7] = 0x00; /* right trigger */
packet->data[8] = strong / 512; /* left actuator */
packet->data[9] = weak / 512; /* right actuator */
packet->data[10] = 0xFF; /* on period */
@@ -1622,6 +1806,9 @@ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */
input_set_abs_params(input_dev, abs, -1, 1, 0, 0);
break;
+ case ABS_PROFILE: /* 4 value profile button (such as on XAC) */
+ input_set_abs_params(input_dev, abs, 0, 4, 0, 0);
+ break;
default:
input_set_abs_params(input_dev, abs, 0, 0, 0, 0);
break;
@@ -1693,6 +1880,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
xpad_btn_pad[i]);
}
+ /* set up paddles if the controller has them */
+ if (xpad->mapping & MAP_PADDLES) {
+ for (i = 0; xpad_btn_paddles[i] >= 0; i++)
+ input_set_capability(input_dev, EV_KEY, xpad_btn_paddles[i]);
+ }
+
/*
* This should be a simple else block. However historically
* xbox360w has mapped DPAD to buttons while xbox360 did not. This
@@ -1714,6 +1907,10 @@ static int xpad_init_input(struct usb_xpad *xpad)
xpad_set_up_abs(input_dev, xpad_abs_triggers[i]);
}
+ /* setup profile button as an axis with 4 possible values */
+ if (xpad->mapping & MAP_PROFILE_BUTTON)
+ xpad_set_up_abs(input_dev, ABS_PROFILE);
+
error = xpad_init_ff(xpad);
if (error)
goto err_free_input;
@@ -1779,6 +1976,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->mapping = xpad_device[i].mapping;
xpad->xtype = xpad_device[i].xtype;
xpad->name = xpad_device[i].name;
+ xpad->packet_type = PKT_XB;
INIT_WORK(&xpad->work, xpad_presence_work);
if (xpad->xtype == XTYPE_UNKNOWN) {
@@ -1844,6 +2042,38 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
usb_set_intfdata(intf, xpad);
+ /* Packet type detection */
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x045e) { /* Microsoft controllers */
+ if (le16_to_cpu(udev->descriptor.idProduct) == 0x02e3) {
+ /* The original elite controller always uses the oldest
+ * type of extended packet
+ */
+ xpad->packet_type = PKT_XBE1;
+ } else if (le16_to_cpu(udev->descriptor.idProduct) == 0x0b00) {
+ /* The elite 2 controller has seen multiple packet
+ * revisions. These are tied to specific firmware
+ * versions
+ */
+ if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x0500) {
+ /* This is the format that the Elite 2 used
+ * prior to the BLE update
+ */
+ xpad->packet_type = PKT_XBE2_FW_OLD;
+ } else if (le16_to_cpu(udev->descriptor.bcdDevice) <
+ 0x050b) {
+ /* This is the format that the Elite 2 used
+ * prior to the update that split the packet
+ */
+ xpad->packet_type = PKT_XBE2_FW_5_EARLY;
+ } else {
+ /* The split packet format that was introduced
+ * in firmware v5.11
+ */
+ xpad->packet_type = PKT_XBE2_FW_5_11;
+ }
+ }
+ }
+
if (xpad->xtype == XTYPE_XBOX360W) {
/*
* Submit the int URB immediately rather than waiting for open
@@ -1972,7 +2202,6 @@ static struct usb_driver xpad_driver = {
.disconnect = xpad_disconnect,
.suspend = xpad_suspend,
.resume = xpad_resume,
- .reset_resume = xpad_resume,
.id_table = xpad_table,
};
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index d5531179b01f..3f2460e2b095 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -28,9 +28,6 @@
* coder :-(
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a20ee693b22b..00292118b79b 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -40,6 +40,9 @@ config KEYBOARD_ADP5520
config KEYBOARD_ADP5588
tristate "ADP5588/87 I2C QWERTY Keypad and IO Expander"
depends on I2C
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use a ADP5588/87 attached to your
system I2C bus.
@@ -186,7 +189,7 @@ config KEYBOARD_QT2160
config KEYBOARD_CLPS711X
tristate "CLPS711X Keypad support"
- depends on OF_GPIO && (ARCH_CLPS711X || COMPILE_TEST)
+ depends on ARCH_CLPS711X || COMPILE_TEST
select INPUT_MATRIXKMAP
help
Say Y here to enable the matrix keypad on the Cirrus Logic
@@ -524,6 +527,19 @@ config KEYBOARD_OPENCORES
To compile this driver as a module, choose M here; the
module will be called opencores-kbd.
+config KEYBOARD_PINEPHONE
+ tristate "Pine64 PinePhone Keyboard"
+ depends on I2C && REGULATOR
+ select CRC8
+ select INPUT_MATRIXKMAP
+ help
+ Say Y here to enable support for the keyboard in the Pine64 PinePhone
+ keyboard case. This driver supports the FLOSS firmware available at
+ https://megous.com/git/pinephone-keyboard/
+
+ To compile this driver as a module, choose M here; the
+ module will be called pinephone-keyboard.
+
config KEYBOARD_PXA27x
tristate "PXA27x/PXA3xx keypad support"
depends on PXA27x || PXA3xx || ARCH_MMP
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 721936e90290..5f67196bb2c1 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_KEYBOARD_NSPIRE) += nspire-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
+obj-$(CONFIG_KEYBOARD_PINEPHONE) += pinephone-keyboard.o
obj-$(CONFIG_KEYBOARD_PMIC8XXX) += pmic8xxx-keypad.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index e2719737360a..7cd83c8e7110 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -8,27 +8,163 @@
* Copyright (C) 2008-2010 Analog Devices Inc.
*/
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/ktime.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/timekeeping.h>
-#include <linux/platform_data/adp5588.h>
+#define DEV_ID 0x00 /* Device ID */
+#define CFG 0x01 /* Configuration Register1 */
+#define INT_STAT 0x02 /* Interrupt Status Register */
+#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */
+#define KEY_EVENTA 0x04 /* Key Event Register A */
+#define KEY_EVENTB 0x05 /* Key Event Register B */
+#define KEY_EVENTC 0x06 /* Key Event Register C */
+#define KEY_EVENTD 0x07 /* Key Event Register D */
+#define KEY_EVENTE 0x08 /* Key Event Register E */
+#define KEY_EVENTF 0x09 /* Key Event Register F */
+#define KEY_EVENTG 0x0A /* Key Event Register G */
+#define KEY_EVENTH 0x0B /* Key Event Register H */
+#define KEY_EVENTI 0x0C /* Key Event Register I */
+#define KEY_EVENTJ 0x0D /* Key Event Register J */
+#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */
+#define UNLOCK1 0x0F /* Unlock Key1 */
+#define UNLOCK2 0x10 /* Unlock Key2 */
+#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */
+#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */
+#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */
+#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */
+#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */
+#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */
+#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */
+#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */
+#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */
+#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */
+#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */
+#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */
+#define GPI_EM1 0x20 /* GPI Event Mode 1 */
+#define GPI_EM2 0x21 /* GPI Event Mode 2 */
+#define GPI_EM3 0x22 /* GPI Event Mode 3 */
+#define GPIO_DIR1 0x23 /* GPIO Data Direction */
+#define GPIO_DIR2 0x24 /* GPIO Data Direction */
+#define GPIO_DIR3 0x25 /* GPIO Data Direction */
+#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */
+#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */
+#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */
+#define DEBOUNCE_DIS1 0x29 /* Debounce Disable */
+#define DEBOUNCE_DIS2 0x2A /* Debounce Disable */
+#define DEBOUNCE_DIS3 0x2B /* Debounce Disable */
+#define GPIO_PULL1 0x2C /* GPIO Pull Disable */
+#define GPIO_PULL2 0x2D /* GPIO Pull Disable */
+#define GPIO_PULL3 0x2E /* GPIO Pull Disable */
+#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */
+#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */
+#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */
+#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */
+#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */
+#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */
+#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */
+#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */
+#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */
+#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */
+#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */
+#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */
+#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */
+#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */
+#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */
+
+#define ADP5588_DEVICE_ID_MASK 0xF
+
+ /* Configuration Register1 */
+#define ADP5588_AUTO_INC BIT(7)
+#define ADP5588_GPIEM_CFG BIT(6)
+#define ADP5588_OVR_FLOW_M BIT(5)
+#define ADP5588_INT_CFG BIT(4)
+#define ADP5588_OVR_FLOW_IEN BIT(3)
+#define ADP5588_K_LCK_IM BIT(2)
+#define ADP5588_GPI_IEN BIT(1)
+#define ADP5588_KE_IEN BIT(0)
+
+/* Interrupt Status Register */
+#define ADP5588_CMP2_INT BIT(5)
+#define ADP5588_CMP1_INT BIT(4)
+#define ADP5588_OVR_FLOW_INT BIT(3)
+#define ADP5588_K_LCK_INT BIT(2)
+#define ADP5588_GPI_INT BIT(1)
+#define ADP5588_KE_INT BIT(0)
+
+/* Key Lock and Event Counter Register */
+#define ADP5588_K_LCK_EN BIT(6)
+#define ADP5588_LCK21 0x30
+#define ADP5588_KEC GENMASK(3, 0)
+
+#define ADP5588_MAXGPIO 18
+#define ADP5588_BANK(offs) ((offs) >> 3)
+#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
+
+/* Put one of these structures in i2c_board_info platform_data */
+
+/*
+ * 128 so it fits matrix-keymap maximum number of keys when the full
+ * 10cols * 8rows are used.
+ */
+#define ADP5588_KEYMAPSIZE 128
+
+#define GPI_PIN_ROW0 97
+#define GPI_PIN_ROW1 98
+#define GPI_PIN_ROW2 99
+#define GPI_PIN_ROW3 100
+#define GPI_PIN_ROW4 101
+#define GPI_PIN_ROW5 102
+#define GPI_PIN_ROW6 103
+#define GPI_PIN_ROW7 104
+#define GPI_PIN_COL0 105
+#define GPI_PIN_COL1 106
+#define GPI_PIN_COL2 107
+#define GPI_PIN_COL3 108
+#define GPI_PIN_COL4 109
+#define GPI_PIN_COL5 110
+#define GPI_PIN_COL6 111
+#define GPI_PIN_COL7 112
+#define GPI_PIN_COL8 113
+#define GPI_PIN_COL9 114
+
+#define GPI_PIN_ROW_BASE GPI_PIN_ROW0
+#define GPI_PIN_ROW_END GPI_PIN_ROW7
+#define GPI_PIN_COL_BASE GPI_PIN_COL0
+#define GPI_PIN_COL_END GPI_PIN_COL9
+
+#define GPI_PIN_BASE GPI_PIN_ROW_BASE
+#define GPI_PIN_END GPI_PIN_COL_END
+
+#define ADP5588_ROWS_MAX (GPI_PIN_ROW7 - GPI_PIN_ROW0 + 1)
+#define ADP5588_COLS_MAX (GPI_PIN_COL9 - GPI_PIN_COL0 + 1)
+
+#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1)
/* Key Event Register xy */
-#define KEY_EV_PRESSED (1 << 7)
-#define KEY_EV_MASK (0x7F)
+#define KEY_EV_PRESSED BIT(7)
+#define KEY_EV_MASK GENMASK(6, 0)
-#define KP_SEL(x) (0xFFFF >> (16 - x)) /* 2^x-1 */
+#define KP_SEL(x) (BIT(x) - 1) /* 2^x-1 */
#define KEYP_MAX_EVENT 10
@@ -40,21 +176,27 @@
#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
#define WA_DELAYED_READOUT_TIME 25
+#define ADP5588_INVALID_HWIRQ (~0UL)
+
struct adp5588_kpad {
struct i2c_client *client;
struct input_dev *input;
ktime_t irq_time;
unsigned long delay;
+ u32 row_shift;
+ u32 rows;
+ u32 cols;
+ u32 unlock_keys[2];
+ int nkeys_unlock;
unsigned short keycode[ADP5588_KEYMAPSIZE];
- const struct adp5588_gpi_map *gpimap;
- unsigned short gpimapsize;
-#ifdef CONFIG_GPIOLIB
unsigned char gpiomap[ADP5588_MAXGPIO];
struct gpio_chip gc;
struct mutex gpio_lock; /* Protect cached dir, dat_out */
u8 dat_out[3];
u8 dir[3];
-#endif
+ u8 int_en[3];
+ u8 irq_mask[3];
+ u8 pull_dis[3];
};
static int adp5588_read(struct i2c_client *client, u8 reg)
@@ -72,8 +214,7 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val)
return i2c_smbus_write_byte_data(client, reg, val);
}
-#ifdef CONFIG_GPIOLIB
-static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
+static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned int off)
{
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
@@ -93,7 +234,7 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
}
static void adp5588_gpio_set_value(struct gpio_chip *chip,
- unsigned off, int val)
+ unsigned int off, int val)
{
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
@@ -106,13 +247,47 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
else
kpad->dat_out[bank] &= ~bit;
- adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank,
- kpad->dat_out[bank]);
+ adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, kpad->dat_out[bank]);
mutex_unlock(&kpad->gpio_lock);
}
-static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
+static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off,
+ unsigned long config)
+{
+ struct adp5588_kpad *kpad = gpiochip_get_data(chip);
+ unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+ unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
+ bool pull_disable;
+ int ret;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pull_disable = false;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ pull_disable = true;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ mutex_lock(&kpad->gpio_lock);
+
+ if (pull_disable)
+ kpad->pull_dis[bank] |= bit;
+ else
+ kpad->pull_dis[bank] &= bit;
+
+ ret = adp5588_write(kpad->client, GPIO_PULL1 + bank,
+ kpad->pull_dis[bank]);
+
+ mutex_unlock(&kpad->gpio_lock);
+
+ return ret;
+}
+
+static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned int off)
{
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
@@ -130,7 +305,7 @@ static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
}
static int adp5588_gpio_direction_output(struct gpio_chip *chip,
- unsigned off, int val)
+ unsigned int off, int val)
{
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
@@ -147,17 +322,19 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
kpad->dat_out[bank] &= ~bit;
ret = adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank,
- kpad->dat_out[bank]);
- ret |= adp5588_write(kpad->client, GPIO_DIR1 + bank,
- kpad->dir[bank]);
+ kpad->dat_out[bank]);
+ if (ret)
+ goto out_unlock;
+
+ ret = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]);
+out_unlock:
mutex_unlock(&kpad->gpio_lock);
return ret;
}
-static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
- const struct adp5588_kpad_platform_data *pdata)
+static int adp5588_build_gpiomap(struct adp5588_kpad *kpad)
{
bool pin_used[ADP5588_MAXGPIO];
int n_unused = 0;
@@ -165,15 +342,12 @@ static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
memset(pin_used, 0, sizeof(pin_used));
- for (i = 0; i < pdata->rows; i++)
+ for (i = 0; i < kpad->rows; i++)
pin_used[i] = true;
- for (i = 0; i < pdata->cols; i++)
+ for (i = 0; i < kpad->cols; i++)
pin_used[i + GPI_PIN_COL_BASE - GPI_PIN_BASE] = true;
- for (i = 0; i < kpad->gpimapsize; i++)
- pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true;
-
for (i = 0; i < ADP5588_MAXGPIO; i++)
if (!pin_used[i])
kpad->gpiomap[n_unused++] = i;
@@ -181,47 +355,101 @@ static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
return n_unused;
}
-static void adp5588_gpio_do_teardown(void *_kpad)
+static void adp5588_irq_bus_lock(struct irq_data *d)
{
- struct adp5588_kpad *kpad = _kpad;
- struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
- const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
- int error;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5588_kpad *kpad = gpiochip_get_data(gc);
- error = gpio_data->teardown(kpad->client,
- kpad->gc.base, kpad->gc.ngpio,
- gpio_data->context);
- if (error)
- dev_warn(&kpad->client->dev, "teardown failed %d\n", error);
+ mutex_lock(&kpad->gpio_lock);
}
+static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5588_kpad *kpad = gpiochip_get_data(gc);
+ int i;
+
+ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+ if (kpad->int_en[i] ^ kpad->irq_mask[i]) {
+ kpad->int_en[i] = kpad->irq_mask[i];
+ adp5588_write(kpad->client, GPI_EM1 + i, kpad->int_en[i]);
+ }
+ }
+
+ mutex_unlock(&kpad->gpio_lock);
+}
+
+static void adp5588_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5588_kpad *kpad = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long real_irq = kpad->gpiomap[hwirq];
+
+ kpad->irq_mask[ADP5588_BANK(real_irq)] &= ~ADP5588_BIT(real_irq);
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void adp5588_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5588_kpad *kpad = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long real_irq = kpad->gpiomap[hwirq];
+
+ gpiochip_enable_irq(gc, hwirq);
+ kpad->irq_mask[ADP5588_BANK(real_irq)] |= ADP5588_BIT(real_irq);
+}
+
+static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ return 0;
+}
+
+static const struct irq_chip adp5588_irq_chip = {
+ .name = "adp5588",
+ .irq_mask = adp5588_irq_mask,
+ .irq_unmask = adp5588_irq_unmask,
+ .irq_bus_lock = adp5588_irq_bus_lock,
+ .irq_bus_sync_unlock = adp5588_irq_bus_sync_unlock,
+ .irq_set_type = adp5588_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
- const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
+ struct gpio_irq_chip *girq;
int i, error;
- if (!gpio_data)
- return 0;
-
- kpad->gc.ngpio = adp5588_build_gpiomap(kpad, pdata);
+ kpad->gc.ngpio = adp5588_build_gpiomap(kpad);
if (kpad->gc.ngpio == 0) {
dev_info(dev, "No unused gpios left to export\n");
return 0;
}
+ kpad->gc.parent = &kpad->client->dev;
kpad->gc.direction_input = adp5588_gpio_direction_input;
kpad->gc.direction_output = adp5588_gpio_direction_output;
kpad->gc.get = adp5588_gpio_get_value;
kpad->gc.set = adp5588_gpio_set_value;
+ kpad->gc.set_config = adp5588_gpio_set_config;
kpad->gc.can_sleep = 1;
- kpad->gc.base = gpio_data->gpio_start;
+ kpad->gc.base = -1;
kpad->gc.label = kpad->client->name;
kpad->gc.owner = THIS_MODULE;
- kpad->gc.names = gpio_data->names;
+
+ girq = &kpad->gc.irq;
+ gpio_irq_chip_set_chip(girq, &adp5588_irq_chip);
+ girq->handler = handle_bad_irq;
+ girq->threaded = true;
mutex_init(&kpad->gpio_lock);
@@ -235,54 +463,87 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->dat_out[i] = adp5588_read(kpad->client,
GPIO_DAT_OUT1 + i);
kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i);
+ kpad->pull_dis[i] = adp5588_read(kpad->client, GPIO_PULL1 + i);
}
- if (gpio_data->setup) {
- error = gpio_data->setup(kpad->client,
- kpad->gc.base, kpad->gc.ngpio,
- gpio_data->context);
- if (error)
- dev_warn(dev, "setup failed: %d\n", error);
- }
+ return 0;
+}
- if (gpio_data->teardown) {
- error = devm_add_action(dev, adp5588_gpio_do_teardown, kpad);
- if (error)
- dev_warn(dev, "failed to schedule teardown: %d\n",
- error);
- }
+static unsigned long adp5588_gpiomap_get_hwirq(struct device *dev,
+ const u8 *map, unsigned int gpio,
+ unsigned int ngpios)
+{
+ unsigned int hwirq;
- return 0;
+ for (hwirq = 0; hwirq < ngpios; hwirq++)
+ if (map[hwirq] == gpio)
+ return hwirq;
+
+ /* should never happen */
+ dev_warn_ratelimited(dev, "could not find the hwirq for gpio(%u)\n", gpio);
+
+ return ADP5588_INVALID_HWIRQ;
}
-#else
-static inline int adp5588_gpio_add(struct adp5588_kpad *kpad)
+static void adp5588_gpio_irq_handle(struct adp5588_kpad *kpad, int key_val,
+ int key_press)
{
- return 0;
+ unsigned int irq, gpio = key_val - GPI_PIN_BASE, irq_type;
+ struct i2c_client *client = kpad->client;
+ struct irq_data *irqd;
+ unsigned long hwirq;
+
+ hwirq = adp5588_gpiomap_get_hwirq(&client->dev, kpad->gpiomap,
+ gpio, kpad->gc.ngpio);
+ if (hwirq == ADP5588_INVALID_HWIRQ) {
+ dev_err(&client->dev, "Could not get hwirq for key(%u)\n", key_val);
+ return;
+ }
+
+ irq = irq_find_mapping(kpad->gc.irq.domain, hwirq);
+ if (!irq)
+ return;
+
+ irqd = irq_get_irq_data(irq);
+ if (!irqd) {
+ dev_err(&client->dev, "Could not get irq(%u) data\n", irq);
+ return;
+ }
+
+ irq_type = irqd_get_trigger_type(irqd);
+
+ /*
+ * Default is active low which means key_press is asserted on
+ * the falling edge.
+ */
+ if ((irq_type & IRQ_TYPE_EDGE_RISING && !key_press) ||
+ (irq_type & IRQ_TYPE_EDGE_FALLING && key_press))
+ handle_nested_irq(irq);
}
-#endif
static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt)
{
- int i, j;
+ int i;
for (i = 0; i < ev_cnt; i++) {
- int key = adp5588_read(kpad->client, Key_EVENTA + i);
+ int key = adp5588_read(kpad->client, KEY_EVENTA + i);
int key_val = key & KEY_EV_MASK;
+ int key_press = key & KEY_EV_PRESSED;
if (key_val >= GPI_PIN_BASE && key_val <= GPI_PIN_END) {
- for (j = 0; j < kpad->gpimapsize; j++) {
- if (key_val == kpad->gpimap[j].pin) {
- input_report_switch(kpad->input,
- kpad->gpimap[j].sw_evt,
- key & KEY_EV_PRESSED);
- break;
- }
- }
+ /* gpio line used as IRQ source */
+ adp5588_gpio_irq_handle(kpad, key_val, key_press);
} else {
+ int row = (key_val - 1) / ADP5588_COLS_MAX;
+ int col = (key_val - 1) % ADP5588_COLS_MAX;
+ int code = MATRIX_SCAN_CODE(row, col, kpad->row_shift);
+
+ dev_dbg_ratelimited(&kpad->client->dev,
+ "report key(%d) r(%d) c(%d) code(%d)\n",
+ key_val, row, col, kpad->keycode[code]);
+
input_report_key(kpad->input,
- kpad->keycode[key_val - 1],
- key & KEY_EV_PRESSED);
+ kpad->keycode[code], key_press);
}
}
}
@@ -335,176 +596,145 @@ static irqreturn_t adp5588_thread_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static int adp5588_setup(struct i2c_client *client)
+static int adp5588_setup(struct adp5588_kpad *kpad)
{
- const struct adp5588_kpad_platform_data *pdata =
- dev_get_platdata(&client->dev);
- const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
+ struct i2c_client *client = kpad->client;
int i, ret;
- unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0;
-
- ret = adp5588_write(client, KP_GPIO1, KP_SEL(pdata->rows));
- ret |= adp5588_write(client, KP_GPIO2, KP_SEL(pdata->cols) & 0xFF);
- ret |= adp5588_write(client, KP_GPIO3, KP_SEL(pdata->cols) >> 8);
- if (pdata->en_keylock) {
- ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1);
- ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2);
- ret |= adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN);
- }
+ ret = adp5588_write(client, KP_GPIO1, KP_SEL(kpad->rows));
+ if (ret)
+ return ret;
- for (i = 0; i < KEYP_MAX_EVENT; i++)
- ret |= adp5588_read(client, Key_EVENTA);
+ ret = adp5588_write(client, KP_GPIO2, KP_SEL(kpad->cols) & 0xFF);
+ if (ret)
+ return ret;
- for (i = 0; i < pdata->gpimapsize; i++) {
- unsigned short pin = pdata->gpimap[i].pin;
+ ret = adp5588_write(client, KP_GPIO3, KP_SEL(kpad->cols) >> 8);
+ if (ret)
+ return ret;
- if (pin <= GPI_PIN_ROW_END) {
- evt_mode1 |= (1 << (pin - GPI_PIN_ROW_BASE));
- } else {
- evt_mode2 |= ((1 << (pin - GPI_PIN_COL_BASE)) & 0xFF);
- evt_mode3 |= ((1 << (pin - GPI_PIN_COL_BASE)) >> 8);
- }
+ for (i = 0; i < kpad->nkeys_unlock; i++) {
+ ret = adp5588_write(client, UNLOCK1 + i, kpad->unlock_keys[i]);
+ if (ret)
+ return ret;
}
- if (pdata->gpimapsize) {
- ret |= adp5588_write(client, GPI_EM1, evt_mode1);
- ret |= adp5588_write(client, GPI_EM2, evt_mode2);
- ret |= adp5588_write(client, GPI_EM3, evt_mode3);
+ if (kpad->nkeys_unlock) {
+ ret = adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN);
+ if (ret)
+ return ret;
}
- if (gpio_data) {
- for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
- int pull_mask = gpio_data->pullup_dis_mask;
-
- ret |= adp5588_write(client, GPIO_PULL1 + i,
- (pull_mask >> (8 * i)) & 0xFF);
- }
+ for (i = 0; i < KEYP_MAX_EVENT; i++) {
+ ret = adp5588_read(client, KEY_EVENTA);
+ if (ret)
+ return ret;
}
- ret |= adp5588_write(client, INT_STAT,
- ADP5588_CMP2_INT | ADP5588_CMP1_INT |
- ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT |
- ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */
+ ret = adp5588_write(client, INT_STAT,
+ ADP5588_CMP2_INT | ADP5588_CMP1_INT |
+ ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT |
+ ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */
+ if (ret)
+ return ret;
- ret |= adp5588_write(client, CFG, ADP5588_INT_CFG |
- ADP5588_OVR_FLOW_IEN |
- ADP5588_KE_IEN);
+ return adp5588_write(client, CFG, ADP5588_INT_CFG |
+ ADP5588_OVR_FLOW_IEN | ADP5588_KE_IEN);
+}
+
+static int adp5588_fw_parse(struct adp5588_kpad *kpad)
+{
+ struct i2c_client *client = kpad->client;
+ int ret, i;
- if (ret < 0) {
- dev_err(&client->dev, "Write Error\n");
+ ret = matrix_keypad_parse_properties(&client->dev, &kpad->rows,
+ &kpad->cols);
+ if (ret)
return ret;
+
+ if (kpad->rows > ADP5588_ROWS_MAX || kpad->cols > ADP5588_COLS_MAX) {
+ dev_err(&client->dev, "Invalid nr of rows(%u) or cols(%u)\n",
+ kpad->rows, kpad->cols);
+ return -EINVAL;
}
- return 0;
-}
+ ret = matrix_keypad_build_keymap(NULL, NULL, kpad->rows, kpad->cols,
+ kpad->keycode, kpad->input);
+ if (ret)
+ return ret;
-static void adp5588_report_switch_state(struct adp5588_kpad *kpad)
-{
- int gpi_stat1 = adp5588_read(kpad->client, GPIO_DAT_STAT1);
- int gpi_stat2 = adp5588_read(kpad->client, GPIO_DAT_STAT2);
- int gpi_stat3 = adp5588_read(kpad->client, GPIO_DAT_STAT3);
- int gpi_stat_tmp, pin_loc;
- int i;
+ kpad->row_shift = get_count_order(kpad->cols);
- for (i = 0; i < kpad->gpimapsize; i++) {
- unsigned short pin = kpad->gpimap[i].pin;
+ if (device_property_read_bool(&client->dev, "autorepeat"))
+ __set_bit(EV_REP, kpad->input->evbit);
- if (pin <= GPI_PIN_ROW_END) {
- gpi_stat_tmp = gpi_stat1;
- pin_loc = pin - GPI_PIN_ROW_BASE;
- } else if ((pin - GPI_PIN_COL_BASE) < 8) {
- gpi_stat_tmp = gpi_stat2;
- pin_loc = pin - GPI_PIN_COL_BASE;
- } else {
- gpi_stat_tmp = gpi_stat3;
- pin_loc = pin - GPI_PIN_COL_BASE - 8;
- }
+ kpad->nkeys_unlock = device_property_count_u32(&client->dev,
+ "adi,unlock-keys");
+ if (kpad->nkeys_unlock <= 0) {
+ /* so that we don't end up enabling key lock */
+ kpad->nkeys_unlock = 0;
+ return 0;
+ }
- if (gpi_stat_tmp < 0) {
- dev_err(&kpad->client->dev,
- "Can't read GPIO_DAT_STAT switch %d default to OFF\n",
- pin);
- gpi_stat_tmp = 0;
+ if (kpad->nkeys_unlock > ARRAY_SIZE(kpad->unlock_keys)) {
+ dev_err(&client->dev, "number of unlock keys(%d) > (%zu)\n",
+ kpad->nkeys_unlock, ARRAY_SIZE(kpad->unlock_keys));
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u32_array(&client->dev, "adi,unlock-keys",
+ kpad->unlock_keys,
+ kpad->nkeys_unlock);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < kpad->nkeys_unlock; i++) {
+ /*
+ * Even though it should be possible (as stated in the datasheet)
+ * to use GPIs (which are part of the keys event) as unlock keys,
+ * it was not working at all and was leading to overflow events
+ * at some point. Hence, for now, let's just allow keys which are
+ * part of keypad matrix to be used and if a reliable way of
+ * using GPIs is found, this condition can be removed/lightened.
+ */
+ if (kpad->unlock_keys[i] >= kpad->cols * kpad->rows) {
+ dev_err(&client->dev, "Invalid unlock key(%d)\n",
+ kpad->unlock_keys[i]);
+ return -EINVAL;
}
- input_report_switch(kpad->input,
- kpad->gpimap[i].sw_evt,
- !(gpi_stat_tmp & (1 << pin_loc)));
+ /*
+ * Firmware properties keys start from 0 but on the device they
+ * start from 1.
+ */
+ kpad->unlock_keys[i] += 1;
}
- input_sync(kpad->input);
+ return 0;
}
+static void adp5588_disable_regulator(void *reg)
+{
+ regulator_disable(reg);
+}
static int adp5588_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adp5588_kpad *kpad;
- const struct adp5588_kpad_platform_data *pdata =
- dev_get_platdata(&client->dev);
struct input_dev *input;
+ struct gpio_desc *gpio;
+ struct regulator *vcc;
unsigned int revid;
- int ret, i;
+ int ret;
int error;
if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
return -EIO;
}
- if (!pdata) {
- dev_err(&client->dev, "no platform data?\n");
- return -EINVAL;
- }
-
- if (!pdata->rows || !pdata->cols || !pdata->keymap) {
- dev_err(&client->dev, "no rows, cols or keymap from pdata\n");
- return -EINVAL;
- }
-
- if (pdata->keymapsize != ADP5588_KEYMAPSIZE) {
- dev_err(&client->dev, "invalid keymapsize\n");
- return -EINVAL;
- }
-
- if (!pdata->gpimap && pdata->gpimapsize) {
- dev_err(&client->dev, "invalid gpimap from pdata\n");
- return -EINVAL;
- }
-
- if (pdata->gpimapsize > ADP5588_GPIMAPSIZE_MAX) {
- dev_err(&client->dev, "invalid gpimapsize\n");
- return -EINVAL;
- }
-
- for (i = 0; i < pdata->gpimapsize; i++) {
- unsigned short pin = pdata->gpimap[i].pin;
-
- if (pin < GPI_PIN_BASE || pin > GPI_PIN_END) {
- dev_err(&client->dev, "invalid gpi pin data\n");
- return -EINVAL;
- }
-
- if (pin <= GPI_PIN_ROW_END) {
- if (pin - GPI_PIN_ROW_BASE + 1 <= pdata->rows) {
- dev_err(&client->dev, "invalid gpi row data\n");
- return -EINVAL;
- }
- } else {
- if (pin - GPI_PIN_COL_BASE + 1 <= pdata->cols) {
- dev_err(&client->dev, "invalid gpi col data\n");
- return -EINVAL;
- }
- }
- }
-
- if (!client->irq) {
- dev_err(&client->dev, "no IRQ?\n");
- return -EINVAL;
- }
-
kpad = devm_kzalloc(&client->dev, sizeof(*kpad), GFP_KERNEL);
if (!kpad)
return -ENOMEM;
@@ -516,11 +746,38 @@ static int adp5588_probe(struct i2c_client *client,
kpad->client = client;
kpad->input = input;
+ error = adp5588_fw_parse(kpad);
+ if (error)
+ return error;
+
+ vcc = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(vcc))
+ return PTR_ERR(vcc);
+
+ error = regulator_enable(vcc);
+ if (error)
+ return error;
+
+ error = devm_add_action_or_reset(&client->dev,
+ adp5588_disable_regulator, vcc);
+ if (error)
+ return error;
+
+ gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (gpio) {
+ fsleep(30);
+ gpiod_set_value_cansleep(gpio, 0);
+ fsleep(60);
+ }
+
ret = adp5588_read(client, DEV_ID);
if (ret < 0)
return ret;
- revid = (u8) ret & ADP5588_DEVICE_ID_MASK;
+ revid = ret & ADP5588_DEVICE_ID_MASK;
if (WA_DELAYED_READOUT_REVID(revid))
kpad->delay = msecs_to_jiffies(WA_DELAYED_READOUT_TIME);
@@ -534,32 +791,6 @@ static int adp5588_probe(struct i2c_client *client,
input->id.product = 0x0001;
input->id.version = revid;
- input->keycodesize = sizeof(kpad->keycode[0]);
- input->keycodemax = pdata->keymapsize;
- input->keycode = kpad->keycode;
-
- memcpy(kpad->keycode, pdata->keymap,
- pdata->keymapsize * input->keycodesize);
-
- kpad->gpimap = pdata->gpimap;
- kpad->gpimapsize = pdata->gpimapsize;
-
- /* setup input device */
- __set_bit(EV_KEY, input->evbit);
-
- if (pdata->repeat)
- __set_bit(EV_REP, input->evbit);
-
- for (i = 0; i < input->keycodemax; i++)
- if (kpad->keycode[i] <= KEY_MAX)
- __set_bit(kpad->keycode[i], input->keybit);
- __clear_bit(KEY_RESERVED, input->keybit);
-
- if (kpad->gpimapsize)
- __set_bit(EV_SW, input->evbit);
- for (i = 0; i < kpad->gpimapsize; i++)
- __set_bit(kpad->gpimap[i].sw_evt, input->swbit);
-
error = input_register_device(input);
if (error) {
dev_err(&client->dev, "unable to register input device: %d\n",
@@ -567,6 +798,14 @@ static int adp5588_probe(struct i2c_client *client,
return error;
}
+ error = adp5588_setup(kpad);
+ if (error)
+ return error;
+
+ error = adp5588_gpio_add(kpad);
+ if (error)
+ return error;
+
error = devm_request_threaded_irq(&client->dev, client->irq,
adp5588_hard_irq, adp5588_thread_irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
@@ -577,17 +816,6 @@ static int adp5588_probe(struct i2c_client *client,
return error;
}
- error = adp5588_setup(client);
- if (error)
- return error;
-
- if (kpad->gpimapsize)
- adp5588_report_switch_state(kpad);
-
- error = adp5588_gpio_add(kpad);
- if (error)
- return error;
-
dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
return 0;
}
@@ -599,7 +827,7 @@ static void adp5588_remove(struct i2c_client *client)
/* all resources will be freed by devm */
}
-static int __maybe_unused adp5588_suspend(struct device *dev)
+static int adp5588_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -608,7 +836,7 @@ static int __maybe_unused adp5588_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused adp5588_resume(struct device *dev)
+static int adp5588_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -617,7 +845,7 @@ static int __maybe_unused adp5588_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(adp5588_dev_pm_ops, adp5588_suspend, adp5588_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(adp5588_dev_pm_ops, adp5588_suspend, adp5588_resume);
static const struct i2c_device_id adp5588_id[] = {
{ "adp5588-keys", 0 },
@@ -626,10 +854,18 @@ static const struct i2c_device_id adp5588_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adp5588_id);
+static const struct of_device_id adp5588_of_match[] = {
+ { .compatible = "adi,adp5588" },
+ { .compatible = "adi,adp5587" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adp5588_of_match);
+
static struct i2c_driver adp5588_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .pm = &adp5588_dev_pm_ops,
+ .of_match_table = adp5588_of_match,
+ .pm = pm_sleep_ptr(&adp5588_dev_pm_ops),
},
.probe = adp5588_probe,
.remove = adp5588_remove,
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 09551f64d53f..a20a4e186639 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -10,9 +10,6 @@
* Amiga keyboard driver for Linux/m68k
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index cbc6c0d4670a..91a9810f6980 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -202,7 +202,7 @@ struct command_protocol_tp_info {
};
/**
- * struct touchpad_info - touchpad info response.
+ * struct touchpad_info_protocol - touchpad info response.
* message.type = 0x1020, message.length = 0x006e
*
* @unknown1: unknown
@@ -311,7 +311,7 @@ struct message {
struct command_protocol_mt_init init_mt_command;
struct command_protocol_capsl capsl_command;
struct command_protocol_bl bl_command;
- u8 data[0];
+ DECLARE_FLEX_ARRAY(u8, data);
};
};
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index 77ed54630601..07e17e563f9b 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -21,9 +21,6 @@
* This driver only deals with handing key events off to the input layer.
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index d4131236d18c..246958795f60 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -323,11 +323,13 @@ static umode_t atkbd_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group atkbd_attribute_group = {
+static const struct attribute_group atkbd_attribute_group = {
.attrs = atkbd_attributes,
.is_visible = atkbd_attr_is_visible,
};
+__ATTRIBUTE_GROUPS(atkbd_attribute);
+
static const unsigned int xl_table[] = {
ATKBD_RET_BAT, ATKBD_RET_ERR, ATKBD_RET_ACK,
ATKBD_RET_NAK, ATKBD_RET_HANJA, ATKBD_RET_HANGEUL,
@@ -922,8 +924,6 @@ static void atkbd_disconnect(struct serio *serio)
{
struct atkbd *atkbd = serio_get_drvdata(serio);
- sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
-
atkbd_disable(atkbd);
input_unregister_device(atkbd->dev);
@@ -1271,21 +1271,16 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd_set_keycode_table(atkbd);
atkbd_set_device_attrs(atkbd);
- err = sysfs_create_group(&serio->dev.kobj, &atkbd_attribute_group);
- if (err)
- goto fail3;
-
atkbd_enable(atkbd);
if (serio->write)
atkbd_activate(atkbd);
err = input_register_device(atkbd->dev);
if (err)
- goto fail4;
+ goto fail3;
return 0;
- fail4: sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
fail3: serio_close(serio);
fail2: serio_set_drvdata(serio, NULL);
fail1: input_free_device(dev);
@@ -1378,7 +1373,8 @@ MODULE_DEVICE_TABLE(serio, atkbd_serio_ids);
static struct serio_driver atkbd_drv = {
.driver = {
- .name = "atkbd",
+ .name = "atkbd",
+ .dev_groups = atkbd_attribute_groups,
},
.description = DRIVER_DESC,
.id_table = atkbd_serio_ids,
diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c
index 939c88655fc0..4c1a3e611edd 100644
--- a/drivers/input/keyboard/clps711x-keypad.c
+++ b/drivers/input/keyboard/clps711x-keypad.c
@@ -6,9 +6,11 @@
*/
#include <linux/input.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/input/matrix_keypad.h>
@@ -86,7 +88,6 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
{
struct clps711x_keypad_data *priv;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct input_dev *input;
u32 poll_interval;
int i, err;
@@ -95,11 +96,11 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
+ priv->syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
if (IS_ERR(priv->syscon))
return PTR_ERR(priv->syscon);
- priv->row_count = of_gpio_named_count(np, "row-gpios");
+ priv->row_count = gpiod_count(dev, "row");
if (priv->row_count < 1)
return -EINVAL;
@@ -119,7 +120,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
return PTR_ERR(data->desc);
}
- err = of_property_read_u32(np, "poll-interval", &poll_interval);
+ err = device_property_read_u32(dev, "poll-interval", &poll_interval);
if (err)
return err;
@@ -143,7 +144,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
return err;
input_set_capability(input, EV_MSC, MSC_SCAN);
- if (of_property_read_bool(np, "autorepeat"))
+ if (device_property_read_bool(dev, "autorepeat"))
__set_bit(EV_REP, input->evbit);
/* Set all columns to low */
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 7a3b0664ab4f..f5bf7524722a 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
#include <linux/soc/cirrus/ep93xx.h>
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index ae9303848571..e15a93619e82 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index e4a1839ca934..047b654b3752 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -46,9 +46,6 @@
* http://www.vt100.net/manx/details?pn=EK-104AA-TM-001;id=21;cp=1
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -359,18 +356,18 @@ static void lkkbd_detection_done(struct lkkbd *lk)
*/
switch (lk->id[4]) {
case 1:
- strlcpy(lk->name, "DEC LK201 keyboard", sizeof(lk->name));
+ strscpy(lk->name, "DEC LK201 keyboard", sizeof(lk->name));
if (lk201_compose_is_alt)
lk->keycode[0xb1] = KEY_LEFTALT;
break;
case 2:
- strlcpy(lk->name, "DEC LK401 keyboard", sizeof(lk->name));
+ strscpy(lk->name, "DEC LK401 keyboard", sizeof(lk->name));
break;
default:
- strlcpy(lk->name, "Unknown DEC keyboard", sizeof(lk->name));
+ strscpy(lk->name, "Unknown DEC keyboard", sizeof(lk->name));
printk(KERN_ERR
"lkkbd: keyboard on %s is unknown, please report to "
"Jan-Benedict Glaw <jbglaw@lug-owl.de>\n", lk->phys);
@@ -626,7 +623,7 @@ static int lkkbd_connect(struct serio *serio, struct serio_driver *drv)
lk->ctrlclick_volume = ctrlclick_volume;
memcpy(lk->keycode, lkkbd_keycode, sizeof(lk->keycode));
- strlcpy(lk->name, "DEC LK keyboard", sizeof(lk->name));
+ strscpy(lk->name, "DEC LK keyboard", sizeof(lk->name));
snprintf(lk->phys, sizeof(lk->phys), "%s/input0", serio->phys);
input_dev->name = lk->name;
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 9dac22c14125..3052cd6dedac 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -4,13 +4,13 @@
* Copyright (C) 2012 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
*/
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
#include <linux/i2c.h>
-#include <linux/interrupt.h>
+#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/input/lm8333.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#define LM8333_FIFO_READ 0x20
#define LM8333_DEBOUNCE 0x22
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 30924b57058f..7dd3f3eda834 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/irq.h>
@@ -416,9 +417,9 @@ matrix_keypad_parse_dt(struct device *dev)
return ERR_PTR(-ENOMEM);
}
- pdata->num_row_gpios = nrow = of_gpio_named_count(np, "row-gpios");
- pdata->num_col_gpios = ncol = of_gpio_named_count(np, "col-gpios");
- if (nrow <= 0 || ncol <= 0) {
+ pdata->num_row_gpios = nrow = gpiod_count(dev, "row");
+ pdata->num_col_gpios = ncol = gpiod_count(dev, "col");
+ if (nrow < 0 || ncol < 0) {
dev_err(dev, "number of keypad rows/columns not specified\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
index bf447bf598fb..19f69d167fbd 100644
--- a/drivers/input/keyboard/mt6779-keypad.c
+++ b/drivers/input/keyboard/mt6779-keypad.c
@@ -5,6 +5,7 @@
*/
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -18,6 +19,7 @@
#define MTK_KPD_DEBOUNCE_MASK GENMASK(13, 0)
#define MTK_KPD_DEBOUNCE_MAX_MS 256
#define MTK_KPD_SEL 0x0020
+#define MTK_KPD_SEL_DOUBLE_KP_MODE BIT(0)
#define MTK_KPD_SEL_COL GENMASK(15, 10)
#define MTK_KPD_SEL_ROW GENMASK(9, 4)
#define MTK_KPD_SEL_COLMASK(c) GENMASK((c) + 9, 10)
@@ -31,6 +33,8 @@ struct mt6779_keypad {
struct clk *clk;
u32 n_rows;
u32 n_cols;
+ void (*calc_row_col)(unsigned int key,
+ unsigned int *row, unsigned int *col);
DECLARE_BITMAP(keymap_state, MTK_KPD_NUM_BITS);
};
@@ -67,8 +71,7 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
continue;
key = bit_nr / 32 * 16 + bit_nr % 32;
- row = key / 9;
- col = key % 9;
+ keypad->calc_row_col(key, &row, &col);
scancode = MATRIX_SCAN_CODE(row, col, row_shift);
/* 1: not pressed, 0: pressed */
@@ -94,12 +97,29 @@ static void mt6779_keypad_clk_disable(void *data)
clk_disable_unprepare(data);
}
+static void mt6779_keypad_calc_row_col_single(unsigned int key,
+ unsigned int *row,
+ unsigned int *col)
+{
+ *row = key / 9;
+ *col = key % 9;
+}
+
+static void mt6779_keypad_calc_row_col_double(unsigned int key,
+ unsigned int *row,
+ unsigned int *col)
+{
+ *row = key / 13;
+ *col = (key % 13) / 2;
+}
+
static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
{
struct mt6779_keypad *keypad;
void __iomem *base;
int irq;
u32 debounce;
+ u32 keys_per_group;
bool wakeup;
int error;
@@ -148,6 +168,23 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (device_property_read_u32(&pdev->dev, "mediatek,keys-per-group",
+ &keys_per_group))
+ keys_per_group = 1;
+
+ switch (keys_per_group) {
+ case 1:
+ keypad->calc_row_col = mt6779_keypad_calc_row_col_single;
+ break;
+ case 2:
+ keypad->calc_row_col = mt6779_keypad_calc_row_col_double;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "Invalid keys-per-group: %d\n", keys_per_group);
+ return -EINVAL;
+ }
+
wakeup = device_property_read_bool(&pdev->dev, "wakeup-source");
dev_dbg(&pdev->dev, "n_row=%d n_col=%d debounce=%d\n",
@@ -166,6 +203,11 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
regmap_write(keypad->regmap, MTK_KPD_DEBOUNCE,
(debounce * (1 << 5)) & MTK_KPD_DEBOUNCE_MASK);
+ if (keys_per_group == 2)
+ regmap_update_bits(keypad->regmap, MTK_KPD_SEL,
+ MTK_KPD_SEL_DOUBLE_KP_MODE,
+ MTK_KPD_SEL_DOUBLE_KP_MODE);
+
regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_ROW,
MTK_KPD_SEL_ROWMASK(keypad->n_rows));
regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_COL,
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 6404081253ea..9b34da0ec260 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6331/registers.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/registers.h>
@@ -22,6 +23,10 @@
#define MTK_PMIC_PWRKEY_RST BIT(6)
#define MTK_PMIC_HOMEKEY_RST BIT(5)
+#define MTK_PMIC_MT6331_RST_DU_MASK GENMASK(13, 12)
+#define MTK_PMIC_MT6331_PWRKEY_RST BIT(9)
+#define MTK_PMIC_MT6331_HOMEKEY_RST BIT(8)
+
#define MTK_PMIC_PWRKEY_INDEX 0
#define MTK_PMIC_HOMEKEY_INDEX 1
#define MTK_PMIC_MAX_KEY_COUNT 2
@@ -72,6 +77,19 @@ static const struct mtk_pmic_regs mt6323_regs = {
.rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
+static const struct mtk_pmic_regs mt6331_regs = {
+ .keys_regs[MTK_PMIC_PWRKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6331_TOPSTATUS, 0x2,
+ MT6331_INT_MISC_CON, 0x4,
+ MTK_PMIC_MT6331_PWRKEY_RST),
+ .keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6331_TOPSTATUS, 0x4,
+ MT6331_INT_MISC_CON, 0x2,
+ MTK_PMIC_MT6331_HOMEKEY_RST),
+ .pmic_rst_reg = MT6331_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_MT6331_RST_DU_MASK,
+};
+
static const struct mtk_pmic_regs mt6358_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
@@ -256,6 +274,9 @@ static const struct of_device_id of_mtk_pmic_keys_match_tbl[] = {
.compatible = "mediatek,mt6323-keys",
.data = &mt6323_regs,
}, {
+ .compatible = "mediatek,mt6331-keys",
+ .data = &mt6331_regs,
+ }, {
.compatible = "mediatek,mt6358-keys",
.data = &mt6358_regs,
}, {
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index 9742261b2d1a..df00a119aa9a 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -7,9 +7,6 @@
* Newton keyboard driver for Linux
*/
-/*
- */
-
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/pinephone-keyboard.c b/drivers/input/keyboard/pinephone-keyboard.c
new file mode 100644
index 000000000000..5548699b8b38
--- /dev/null
+++ b/drivers/input/keyboard/pinephone-keyboard.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2021-2022 Samuel Holland <samuel@sholland.org>
+
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#define DRV_NAME "pinephone-keyboard"
+
+#define PPKB_CRC8_POLYNOMIAL 0x07
+
+#define PPKB_DEVICE_ID_HI 0x00
+#define PPKB_DEVICE_ID_HI_VALUE 'K'
+#define PPKB_DEVICE_ID_LO 0x01
+#define PPKB_DEVICE_ID_LO_VALUE 'B'
+#define PPKB_FW_REVISION 0x02
+#define PPKB_FW_FEATURES 0x03
+#define PPKB_MATRIX_SIZE 0x06
+#define PPKB_SCAN_CRC 0x07
+#define PPKB_SCAN_DATA 0x08
+#define PPKB_SYS_CONFIG 0x20
+#define PPKB_SYS_CONFIG_DISABLE_SCAN BIT(0)
+#define PPKB_SYS_SMBUS_COMMAND 0x21
+#define PPKB_SYS_SMBUS_DATA 0x22
+#define PPKB_SYS_COMMAND 0x23
+#define PPKB_SYS_COMMAND_SMBUS_READ 0x91
+#define PPKB_SYS_COMMAND_SMBUS_WRITE 0xa1
+
+#define PPKB_ROWS 6
+#define PPKB_COLS 12
+
+/* Size of the scan buffer, including the CRC byte at the beginning. */
+#define PPKB_BUF_LEN (1 + PPKB_COLS)
+
+static const uint32_t ppkb_keymap[] = {
+ KEY(0, 0, KEY_ESC),
+ KEY(0, 1, KEY_1),
+ KEY(0, 2, KEY_2),
+ KEY(0, 3, KEY_3),
+ KEY(0, 4, KEY_4),
+ KEY(0, 5, KEY_5),
+ KEY(0, 6, KEY_6),
+ KEY(0, 7, KEY_7),
+ KEY(0, 8, KEY_8),
+ KEY(0, 9, KEY_9),
+ KEY(0, 10, KEY_0),
+ KEY(0, 11, KEY_BACKSPACE),
+
+ KEY(1, 0, KEY_TAB),
+ KEY(1, 1, KEY_Q),
+ KEY(1, 2, KEY_W),
+ KEY(1, 3, KEY_E),
+ KEY(1, 4, KEY_R),
+ KEY(1, 5, KEY_T),
+ KEY(1, 6, KEY_Y),
+ KEY(1, 7, KEY_U),
+ KEY(1, 8, KEY_I),
+ KEY(1, 9, KEY_O),
+ KEY(1, 10, KEY_P),
+ KEY(1, 11, KEY_ENTER),
+
+ KEY(2, 0, KEY_LEFTMETA),
+ KEY(2, 1, KEY_A),
+ KEY(2, 2, KEY_S),
+ KEY(2, 3, KEY_D),
+ KEY(2, 4, KEY_F),
+ KEY(2, 5, KEY_G),
+ KEY(2, 6, KEY_H),
+ KEY(2, 7, KEY_J),
+ KEY(2, 8, KEY_K),
+ KEY(2, 9, KEY_L),
+ KEY(2, 10, KEY_SEMICOLON),
+
+ KEY(3, 0, KEY_LEFTSHIFT),
+ KEY(3, 1, KEY_Z),
+ KEY(3, 2, KEY_X),
+ KEY(3, 3, KEY_C),
+ KEY(3, 4, KEY_V),
+ KEY(3, 5, KEY_B),
+ KEY(3, 6, KEY_N),
+ KEY(3, 7, KEY_M),
+ KEY(3, 8, KEY_COMMA),
+ KEY(3, 9, KEY_DOT),
+ KEY(3, 10, KEY_SLASH),
+
+ KEY(4, 1, KEY_LEFTCTRL),
+ KEY(4, 4, KEY_SPACE),
+ KEY(4, 6, KEY_APOSTROPHE),
+ KEY(4, 8, KEY_RIGHTBRACE),
+ KEY(4, 9, KEY_LEFTBRACE),
+
+ KEY(5, 2, KEY_FN),
+ KEY(5, 3, KEY_LEFTALT),
+ KEY(5, 5, KEY_RIGHTALT),
+
+ /* FN layer */
+ KEY(PPKB_ROWS + 0, 0, KEY_FN_ESC),
+ KEY(PPKB_ROWS + 0, 1, KEY_F1),
+ KEY(PPKB_ROWS + 0, 2, KEY_F2),
+ KEY(PPKB_ROWS + 0, 3, KEY_F3),
+ KEY(PPKB_ROWS + 0, 4, KEY_F4),
+ KEY(PPKB_ROWS + 0, 5, KEY_F5),
+ KEY(PPKB_ROWS + 0, 6, KEY_F6),
+ KEY(PPKB_ROWS + 0, 7, KEY_F7),
+ KEY(PPKB_ROWS + 0, 8, KEY_F8),
+ KEY(PPKB_ROWS + 0, 9, KEY_F9),
+ KEY(PPKB_ROWS + 0, 10, KEY_F10),
+ KEY(PPKB_ROWS + 0, 11, KEY_DELETE),
+
+ KEY(PPKB_ROWS + 1, 10, KEY_PAGEUP),
+
+ KEY(PPKB_ROWS + 2, 0, KEY_SYSRQ),
+ KEY(PPKB_ROWS + 2, 9, KEY_PAGEDOWN),
+ KEY(PPKB_ROWS + 2, 10, KEY_INSERT),
+
+ KEY(PPKB_ROWS + 3, 0, KEY_LEFTSHIFT),
+ KEY(PPKB_ROWS + 3, 8, KEY_HOME),
+ KEY(PPKB_ROWS + 3, 9, KEY_UP),
+ KEY(PPKB_ROWS + 3, 10, KEY_END),
+
+ KEY(PPKB_ROWS + 4, 1, KEY_LEFTCTRL),
+ KEY(PPKB_ROWS + 4, 6, KEY_LEFT),
+ KEY(PPKB_ROWS + 4, 8, KEY_RIGHT),
+ KEY(PPKB_ROWS + 4, 9, KEY_DOWN),
+
+ KEY(PPKB_ROWS + 5, 3, KEY_LEFTALT),
+ KEY(PPKB_ROWS + 5, 5, KEY_RIGHTALT),
+};
+
+static const struct matrix_keymap_data ppkb_keymap_data = {
+ .keymap = ppkb_keymap,
+ .keymap_size = ARRAY_SIZE(ppkb_keymap),
+};
+
+struct pinephone_keyboard {
+ struct i2c_adapter adapter;
+ struct input_dev *input;
+ u8 buf[2][PPKB_BUF_LEN];
+ u8 crc_table[CRC8_TABLE_SIZE];
+ u8 fn_state[PPKB_COLS];
+ bool buf_swap;
+ bool fn_pressed;
+};
+
+static int ppkb_adap_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size,
+ union i2c_smbus_data *data)
+{
+ struct i2c_client *client = adap->algo_data;
+ u8 buf[3];
+ int ret;
+
+ buf[0] = command;
+ buf[1] = data->byte;
+ buf[2] = read_write == I2C_SMBUS_READ ? PPKB_SYS_COMMAND_SMBUS_READ
+ : PPKB_SYS_COMMAND_SMBUS_WRITE;
+
+ ret = i2c_smbus_write_i2c_block_data(client, PPKB_SYS_SMBUS_COMMAND,
+ sizeof(buf), buf);
+ if (ret)
+ return ret;
+
+ /* Read back the command status until it passes or fails. */
+ do {
+ usleep_range(300, 500);
+ ret = i2c_smbus_read_byte_data(client, PPKB_SYS_COMMAND);
+ } while (ret == buf[2]);
+ if (ret < 0)
+ return ret;
+ /* Commands return 0x00 on success and 0xff on failure. */
+ if (ret)
+ return -EIO;
+
+ if (read_write == I2C_SMBUS_READ) {
+ ret = i2c_smbus_read_byte_data(client, PPKB_SYS_SMBUS_DATA);
+ if (ret < 0)
+ return ret;
+
+ data->byte = ret;
+ }
+
+ return 0;
+}
+
+static u32 ppkg_adap_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static const struct i2c_algorithm ppkb_adap_algo = {
+ .smbus_xfer = ppkb_adap_smbus_xfer,
+ .functionality = ppkg_adap_functionality,
+};
+
+static void ppkb_update(struct i2c_client *client)
+{
+ struct pinephone_keyboard *ppkb = i2c_get_clientdata(client);
+ unsigned short *keymap = ppkb->input->keycode;
+ int row_shift = get_count_order(PPKB_COLS);
+ u8 *old_buf = ppkb->buf[!ppkb->buf_swap];
+ u8 *new_buf = ppkb->buf[ppkb->buf_swap];
+ int col, crc, ret, row;
+ struct device *dev = &client->dev;
+
+ ret = i2c_smbus_read_i2c_block_data(client, PPKB_SCAN_CRC,
+ PPKB_BUF_LEN, new_buf);
+ if (ret != PPKB_BUF_LEN) {
+ dev_err(dev, "Failed to read scan data: %d\n", ret);
+ return;
+ }
+
+ crc = crc8(ppkb->crc_table, &new_buf[1], PPKB_COLS, CRC8_INIT_VALUE);
+ if (crc != new_buf[0]) {
+ dev_err(dev, "Bad scan data (%02x != %02x)\n", crc, new_buf[0]);
+ return;
+ }
+
+ ppkb->buf_swap = !ppkb->buf_swap;
+
+ for (col = 0; col < PPKB_COLS; ++col) {
+ u8 old = old_buf[1 + col];
+ u8 new = new_buf[1 + col];
+ u8 changed = old ^ new;
+
+ if (!changed)
+ continue;
+
+ for (row = 0; row < PPKB_ROWS; ++row) {
+ u8 mask = BIT(row);
+ u8 value = new & mask;
+ unsigned short code;
+ bool fn_state;
+
+ if (!(changed & mask))
+ continue;
+
+ /*
+ * Save off the FN key state when the key was pressed,
+ * and use that to determine the code during a release.
+ */
+ fn_state = value ? ppkb->fn_pressed : ppkb->fn_state[col] & mask;
+ if (fn_state)
+ ppkb->fn_state[col] ^= mask;
+
+ /* The FN layer is a second set of rows. */
+ code = MATRIX_SCAN_CODE(fn_state ? PPKB_ROWS + row : row,
+ col, row_shift);
+ input_event(ppkb->input, EV_MSC, MSC_SCAN, code);
+ input_report_key(ppkb->input, keymap[code], value);
+ if (keymap[code] == KEY_FN)
+ ppkb->fn_pressed = value;
+ }
+ }
+ input_sync(ppkb->input);
+}
+
+static irqreturn_t ppkb_irq_thread(int irq, void *data)
+{
+ struct i2c_client *client = data;
+
+ ppkb_update(client);
+
+ return IRQ_HANDLED;
+}
+
+static int ppkb_set_scan(struct i2c_client *client, bool enable)
+{
+ struct device *dev = &client->dev;
+ int ret, val;
+
+ ret = i2c_smbus_read_byte_data(client, PPKB_SYS_CONFIG);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read config: %d\n", ret);
+ return ret;
+ }
+
+ if (enable)
+ val = ret & ~PPKB_SYS_CONFIG_DISABLE_SCAN;
+ else
+ val = ret | PPKB_SYS_CONFIG_DISABLE_SCAN;
+
+ ret = i2c_smbus_write_byte_data(client, PPKB_SYS_CONFIG, val);
+ if (ret) {
+ dev_err(dev, "Failed to write config: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ppkb_open(struct input_dev *input)
+{
+ struct i2c_client *client = input_get_drvdata(input);
+ int error;
+
+ error = ppkb_set_scan(client, true);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static void ppkb_close(struct input_dev *input)
+{
+ struct i2c_client *client = input_get_drvdata(input);
+
+ ppkb_set_scan(client, false);
+}
+
+static void ppkb_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
+static int ppkb_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ unsigned int phys_rows, phys_cols;
+ struct pinephone_keyboard *ppkb;
+ struct regulator *vbat_supply;
+ u8 info[PPKB_MATRIX_SIZE + 1];
+ struct device_node *i2c_bus;
+ int ret;
+ int error;
+
+ vbat_supply = devm_regulator_get(dev, "vbat");
+ error = PTR_ERR_OR_ZERO(vbat_supply);
+ if (error) {
+ dev_err(dev, "Failed to get VBAT supply: %d\n", error);
+ return error;
+ }
+
+ error = regulator_enable(vbat_supply);
+ if (error) {
+ dev_err(dev, "Failed to enable VBAT: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(dev, ppkb_regulator_disable,
+ vbat_supply);
+ if (error)
+ return error;
+
+ ret = i2c_smbus_read_i2c_block_data(client, 0, sizeof(info), info);
+ if (ret != sizeof(info)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(dev, "Failed to read device ID: %d\n", error);
+ return error;
+ }
+
+ if (info[PPKB_DEVICE_ID_HI] != PPKB_DEVICE_ID_HI_VALUE ||
+ info[PPKB_DEVICE_ID_LO] != PPKB_DEVICE_ID_LO_VALUE) {
+ dev_warn(dev, "Unexpected device ID: %#02x %#02x\n",
+ info[PPKB_DEVICE_ID_HI], info[PPKB_DEVICE_ID_LO]);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "Found firmware version %d.%d features %#x\n",
+ info[PPKB_FW_REVISION] >> 4,
+ info[PPKB_FW_REVISION] & 0xf,
+ info[PPKB_FW_FEATURES]);
+
+ phys_rows = info[PPKB_MATRIX_SIZE] & 0xf;
+ phys_cols = info[PPKB_MATRIX_SIZE] >> 4;
+ if (phys_rows != PPKB_ROWS || phys_cols != PPKB_COLS) {
+ dev_err(dev, "Unexpected keyboard size %ux%u\n",
+ phys_rows, phys_cols);
+ return -EINVAL;
+ }
+
+ /* Disable scan by default to save power. */
+ error = ppkb_set_scan(client, false);
+ if (error)
+ return error;
+
+ ppkb = devm_kzalloc(dev, sizeof(*ppkb), GFP_KERNEL);
+ if (!ppkb)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ppkb);
+
+ i2c_bus = of_get_child_by_name(dev->of_node, "i2c");
+ if (i2c_bus) {
+ ppkb->adapter.owner = THIS_MODULE;
+ ppkb->adapter.algo = &ppkb_adap_algo;
+ ppkb->adapter.algo_data = client;
+ ppkb->adapter.dev.parent = dev;
+ ppkb->adapter.dev.of_node = i2c_bus;
+ strscpy(ppkb->adapter.name, DRV_NAME, sizeof(ppkb->adapter.name));
+
+ error = devm_i2c_add_adapter(dev, &ppkb->adapter);
+ if (error) {
+ dev_err(dev, "Failed to add I2C adapter: %d\n", error);
+ return error;
+ }
+ }
+
+ crc8_populate_msb(ppkb->crc_table, PPKB_CRC8_POLYNOMIAL);
+
+ ppkb->input = devm_input_allocate_device(dev);
+ if (!ppkb->input)
+ return -ENOMEM;
+
+ input_set_drvdata(ppkb->input, client);
+
+ ppkb->input->name = "PinePhone Keyboard";
+ ppkb->input->phys = DRV_NAME "/input0";
+ ppkb->input->id.bustype = BUS_I2C;
+ ppkb->input->open = ppkb_open;
+ ppkb->input->close = ppkb_close;
+
+ input_set_capability(ppkb->input, EV_MSC, MSC_SCAN);
+ __set_bit(EV_REP, ppkb->input->evbit);
+
+ error = matrix_keypad_build_keymap(&ppkb_keymap_data, NULL,
+ 2 * PPKB_ROWS, PPKB_COLS, NULL,
+ ppkb->input);
+ if (error) {
+ dev_err(dev, "Failed to build keymap: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(ppkb->input);
+ if (error) {
+ dev_err(dev, "Failed to register input: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, ppkb_irq_thread,
+ IRQF_ONESHOT, client->name, client);
+ if (error) {
+ dev_err(dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ppkb_of_match[] = {
+ { .compatible = "pine64,pinephone-keyboard" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ppkb_of_match);
+
+static struct i2c_driver ppkb_driver = {
+ .probe_new = ppkb_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ppkb_of_match,
+ },
+};
+module_i2c_driver(ppkb_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Pine64 PinePhone keyboard driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index a045d61165ac..a62bb8fff88c 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -8,12 +8,14 @@
* Based on sh_keysc.c, copyright 2008 Magnus Damm
*/
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/io.h>
+#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#define ST_KEYSCAN_MAXKEYS 16
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c
index a4977193dd4a..56e784936059 100644
--- a/drivers/input/keyboard/stowaway.c
+++ b/drivers/input/keyboard/stowaway.c
@@ -10,9 +10,6 @@
* by Justin Cormack
*/
-/*
- */
-
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index d450f11b98a7..b123a208ef36 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -7,9 +7,6 @@
* Sun keyboard driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 89b9575dc75d..78e55318ccd6 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -70,7 +70,7 @@
#define TC3589x_KBD_INT_CLR 0x1
/**
- * struct tc35893_keypad_platform_data - platform specific keypad data
+ * struct tc3589x_keypad_platform_data - platform specific keypad data
* @keymap_data: matrix scan code table for keycodes
* @krow: mask for available rows, value is 0xFF
* @kcol: mask for available columns, value is 0xFF
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index 280796df679a..c9d7c2481726 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -7,9 +7,6 @@
* XT keyboard driver for Linux
*/
-/*
- */
-
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index a18ab7358d8f..9f088900f863 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -730,6 +730,24 @@ config INPUT_ADXL34X_SPI
To compile this driver as a module, choose M here: the
module will be called adxl34x-spi.
+config INPUT_IBM_PANEL
+ tristate "IBM Operation Panel driver"
+ depends on I2C && I2C_SLAVE
+ help
+ Say Y here if you have an IBM Operation Panel connected to your system
+ over I2C. The panel is typically connected only to a system's service
+ processor (BMC).
+
+ If unsure, say N.
+
+ The Operation Panel is a controller with some buttons and an LCD
+ display that allows someone with physical access to the system to
+ perform various administrative tasks. This driver only supports the part
+ of the controller that sends commands to the system.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ibm-panel.
+
config INPUT_IMS_PCU
tristate "IMS Passenger Control Unit driver"
depends on USB
@@ -891,6 +909,15 @@ config INPUT_SC27XX_VIBRA
To compile this driver as a module, choose M here. The module will
be called sc27xx_vibra.
+config INPUT_RT5120_PWRKEY
+ tristate "RT5120 PMIC power key support"
+ depends on MFD_RT5120 || COMPILE_TEST
+ help
+ This enables support for RT5120 PMIC power key driver.
+
+ To compile this driver as a module, choose M here. the module will
+ be called rt5120-pwrkey.
+
config INPUT_STPMIC1_ONKEY
tristate "STPMIC1 PMIC Onkey support"
depends on MFD_STPMIC1
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 28dfc444f0a9..6abefc41037b 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o
obj-$(CONFIG_INPUT_GPIO_VIBRA) += gpio-vibra.o
obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
+obj-$(CONFIG_INPUT_IBM_PANEL) += ibm-panel.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o
obj-$(CONFIG_INPUT_IQS626A) += iqs626a.o
@@ -69,6 +70,7 @@ obj-$(CONFIG_INPUT_RAVE_SP_PWRBUTTON) += rave-sp-pwrbutton.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
obj-$(CONFIG_INPUT_REGULATOR_HAPTIC) += regulator-haptic.o
obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
+obj-$(CONFIG_INPUT_RT5120_PWRKEY) += rt5120-pwrkey.o
obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o
diff --git a/drivers/input/misc/ibm-panel.c b/drivers/input/misc/ibm-panel.c
new file mode 100644
index 000000000000..a8fba0054719
--- /dev/null
+++ b/drivers/input/misc/ibm-panel.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) IBM Corporation 2020
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+
+#define DEVICE_NAME "ibm-panel"
+#define PANEL_KEYCODES_COUNT 3
+
+struct ibm_panel {
+ u8 idx;
+ u8 command[11];
+ u32 keycodes[PANEL_KEYCODES_COUNT];
+ spinlock_t lock; /* protects writes to idx and command */
+ struct input_dev *input;
+};
+
+static u8 ibm_panel_calculate_checksum(struct ibm_panel *panel)
+{
+ u8 chksum;
+ u16 sum = 0;
+ unsigned int i;
+
+ for (i = 0; i < sizeof(panel->command) - 1; ++i) {
+ sum += panel->command[i];
+ if (sum & 0xff00) {
+ sum &= 0xff;
+ sum++;
+ }
+ }
+
+ chksum = sum & 0xff;
+ chksum = ~chksum;
+ chksum++;
+
+ return chksum;
+}
+
+static void ibm_panel_process_command(struct ibm_panel *panel)
+{
+ u8 button;
+ u8 chksum;
+
+ if (panel->command[0] != 0xff && panel->command[1] != 0xf0) {
+ dev_dbg(&panel->input->dev, "command invalid: %02x %02x\n",
+ panel->command[0], panel->command[1]);
+ return;
+ }
+
+ chksum = ibm_panel_calculate_checksum(panel);
+ if (chksum != panel->command[sizeof(panel->command) - 1]) {
+ dev_dbg(&panel->input->dev,
+ "command failed checksum: %u != %u\n", chksum,
+ panel->command[sizeof(panel->command) - 1]);
+ return;
+ }
+
+ button = panel->command[2] & 0xf;
+ if (button < PANEL_KEYCODES_COUNT) {
+ input_report_key(panel->input, panel->keycodes[button],
+ !(panel->command[2] & 0x80));
+ input_sync(panel->input);
+ } else {
+ dev_dbg(&panel->input->dev, "unknown button %u\n",
+ button);
+ }
+}
+
+static int ibm_panel_i2c_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ unsigned long flags;
+ struct ibm_panel *panel = i2c_get_clientdata(client);
+
+ dev_dbg(&panel->input->dev, "event: %u data: %02x\n", event, *val);
+
+ spin_lock_irqsave(&panel->lock, flags);
+
+ switch (event) {
+ case I2C_SLAVE_STOP:
+ if (panel->idx == sizeof(panel->command))
+ ibm_panel_process_command(panel);
+ else
+ dev_dbg(&panel->input->dev,
+ "command incorrect size %u\n", panel->idx);
+ fallthrough;
+ case I2C_SLAVE_WRITE_REQUESTED:
+ panel->idx = 0;
+ break;
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (panel->idx < sizeof(panel->command))
+ panel->command[panel->idx++] = *val;
+ else
+ /*
+ * The command is too long and therefore invalid, so set the index
+ * to it's largest possible value. When a STOP is finally received,
+ * the command will be rejected upon processing.
+ */
+ panel->idx = U8_MAX;
+ break;
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_READ_PROCESSED:
+ *val = 0xff;
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&panel->lock, flags);
+
+ return 0;
+}
+
+static int ibm_panel_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ibm_panel *panel;
+ int i;
+ int error;
+
+ panel = devm_kzalloc(&client->dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ spin_lock_init(&panel->lock);
+
+ panel->input = devm_input_allocate_device(&client->dev);
+ if (!panel->input)
+ return -ENOMEM;
+
+ panel->input->name = client->name;
+ panel->input->id.bustype = BUS_I2C;
+
+ error = device_property_read_u32_array(&client->dev,
+ "linux,keycodes",
+ panel->keycodes,
+ PANEL_KEYCODES_COUNT);
+ if (error) {
+ /*
+ * Use gamepad buttons as defaults for compatibility with
+ * existing applications.
+ */
+ panel->keycodes[0] = BTN_NORTH;
+ panel->keycodes[1] = BTN_SOUTH;
+ panel->keycodes[2] = BTN_SELECT;
+ }
+
+ for (i = 0; i < PANEL_KEYCODES_COUNT; ++i)
+ input_set_capability(panel->input, EV_KEY, panel->keycodes[i]);
+
+ error = input_register_device(panel->input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ i2c_set_clientdata(client, panel);
+ error = i2c_slave_register(client, ibm_panel_i2c_slave_cb);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register as i2c slave: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void ibm_panel_remove(struct i2c_client *client)
+{
+ i2c_slave_unregister(client);
+}
+
+static const struct of_device_id ibm_panel_match[] = {
+ { .compatible = "ibm,op-panel" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ibm_panel_match);
+
+static struct i2c_driver ibm_panel_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = ibm_panel_match,
+ },
+ .probe = ibm_panel_probe,
+ .remove = ibm_panel_remove,
+};
+module_i2c_driver(ibm_panel_driver);
+
+MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
+MODULE_DESCRIPTION("IBM Operation Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 6f38aa23a1ff..b2f1292e27ef 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -744,7 +744,7 @@ static int ims_pcu_switch_to_bootloader(struct ims_pcu *pcu)
error = ims_pcu_execute_command(pcu, JUMP_TO_BTLDR, NULL, 0);
if (error) {
dev_err(pcu->dev,
- "Failure when sending JUMP TO BOOLTLOADER command, error: %d\n",
+ "Failure when sending JUMP TO BOOTLOADER command, error: %d\n",
error);
return error;
}
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index b2e8097a2e6d..ddb863bf63ee 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -1077,7 +1077,7 @@ static int iqs7222_hard_reset(struct iqs7222_private *iqs7222)
static int iqs7222_force_comms(struct iqs7222_private *iqs7222)
{
- u8 msg_buf[] = { 0xFF, 0x00, };
+ u8 msg_buf[] = { 0xFF, };
int ret;
/*
@@ -1771,11 +1771,9 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
if (!chan_node)
return 0;
- if (dev_desc->allow_offset) {
- sys_setup[dev_desc->allow_offset] |= BIT(chan_index);
- if (fwnode_property_present(chan_node, "azoteq,ulp-allow"))
- sys_setup[dev_desc->allow_offset] &= ~BIT(chan_index);
- }
+ if (dev_desc->allow_offset &&
+ fwnode_property_present(chan_node, "azoteq,ulp-allow"))
+ sys_setup[dev_desc->allow_offset] &= ~BIT(chan_index);
chan_setup[0] |= IQS7222_CHAN_SETUP_0_CHAN_EN;
@@ -2206,6 +2204,9 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
u16 *sys_setup = iqs7222->sys_setup;
int error, i;
+ if (dev_desc->allow_offset)
+ sys_setup[dev_desc->allow_offset] = U16_MAX;
+
if (dev_desc->event_offset)
sys_setup[dev_desc->event_offset] = IQS7222_EVENT_MASK_ATI;
@@ -2326,6 +2327,9 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
int k = 2 + j * (num_chan > 16 ? 2 : 1);
u16 state = le16_to_cpu(status[k + i / 16]);
+ if (!iqs7222->kp_type[i][j])
+ continue;
+
input_event(iqs7222->keypad,
iqs7222->kp_type[i][j],
iqs7222->kp_code[i][j],
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 4650f4a94989..bee4b1376491 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -485,7 +485,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
}
if (udev->manufacturer)
- strlcpy(remote->name, udev->manufacturer, sizeof(remote->name));
+ strscpy(remote->name, udev->manufacturer, sizeof(remote->name));
if (udev->product) {
if (udev->manufacturer)
diff --git a/drivers/input/misc/rt5120-pwrkey.c b/drivers/input/misc/rt5120-pwrkey.c
new file mode 100644
index 000000000000..8a8c1aeeed05
--- /dev/null
+++ b/drivers/input/misc/rt5120-pwrkey.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define RT5120_REG_INTSTAT 0x1E
+#define RT5120_PWRKEYSTAT_MASK BIT(7)
+
+struct rt5120_priv {
+ struct regmap *regmap;
+ struct input_dev *input;
+};
+
+static irqreturn_t rt5120_pwrkey_handler(int irq, void *devid)
+{
+ struct rt5120_priv *priv = devid;
+ unsigned int stat;
+ int error;
+
+ error = regmap_read(priv->regmap, RT5120_REG_INTSTAT, &stat);
+ if (error)
+ return IRQ_NONE;
+
+ input_report_key(priv->input, KEY_POWER,
+ !(stat & RT5120_PWRKEYSTAT_MASK));
+ input_sync(priv->input);
+
+ return IRQ_HANDLED;
+}
+
+static int rt5120_pwrkey_probe(struct platform_device *pdev)
+{
+ struct rt5120_priv *priv;
+ struct device *dev = &pdev->dev;
+ int press_irq, release_irq;
+ int error;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!priv->regmap) {
+ dev_err(dev, "Failed to init regmap\n");
+ return -ENODEV;
+ }
+
+ press_irq = platform_get_irq_byname(pdev, "pwrkey-press");
+ if (press_irq < 0)
+ return press_irq;
+
+ release_irq = platform_get_irq_byname(pdev, "pwrkey-release");
+ if (release_irq < 0)
+ return release_irq;
+
+ /* Make input device be device resource managed */
+ priv->input = devm_input_allocate_device(dev);
+ if (!priv->input)
+ return -ENOMEM;
+
+ priv->input->name = "rt5120_pwrkey";
+ priv->input->phys = "rt5120_pwrkey/input0";
+ priv->input->id.bustype = BUS_I2C;
+ input_set_capability(priv->input, EV_KEY, KEY_POWER);
+
+ error = input_register_device(priv->input);
+ if (error) {
+ dev_err(dev, "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, press_irq,
+ NULL, rt5120_pwrkey_handler,
+ 0, "pwrkey-press", priv);
+ if (error) {
+ dev_err(dev,
+ "Failed to register pwrkey press irq: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, release_irq,
+ NULL, rt5120_pwrkey_handler,
+ 0, "pwrkey-release", priv);
+ if (error) {
+ dev_err(dev,
+ "Failed to register pwrkey release irq: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id r5120_pwrkey_match_table[] = {
+ { .compatible = "richtek,rt5120-pwrkey" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, r5120_pwrkey_match_table);
+
+static struct platform_driver rt5120_pwrkey_driver = {
+ .driver = {
+ .name = "rt5120-pwrkey",
+ .of_match_table = r5120_pwrkey_match_table,
+ },
+ .probe = rt5120_pwrkey_probe,
+};
+module_platform_driver(rt5120_pwrkey_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT5120 power key driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index b307cca17022..e3ee0638ffba 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -26,6 +26,7 @@
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/mfd/twl.h>
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index e0ff616fb857..5619996da86f 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -163,14 +163,10 @@ static int __maybe_unused twl4030_vibra_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
-static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
- struct device_node *parent)
+static bool twl4030_vibra_check_coexist(struct device_node *parent)
{
struct device_node *node;
- if (pdata && pdata->coexist)
- return true;
-
node = of_get_child_by_name(parent, "codec");
if (node) {
of_node_put(node);
@@ -182,13 +178,12 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
static int twl4030_vibra_probe(struct platform_device *pdev)
{
- struct twl4030_vibra_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *twl4030_core_node = pdev->dev.parent->of_node;
struct vibra_info *info;
int ret;
- if (!pdata && !twl4030_core_node) {
- dev_dbg(&pdev->dev, "platform_data not available\n");
+ if (!twl4030_core_node) {
+ dev_dbg(&pdev->dev, "twl4030 OF node is missing\n");
return -EINVAL;
}
@@ -197,7 +192,7 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
return -ENOMEM;
info->dev = &pdev->dev;
- info->coexist = twl4030_vibra_check_coexist(pdata, twl4030_core_node);
+ info->coexist = twl4030_vibra_check_coexist(twl4030_core_node);
INIT_WORK(&info->play_work, vibra_play_work);
info->input_dev = devm_input_allocate_device(&pdev->dev);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index e1758d5ffe42..d4eb59b55bf1 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1311,12 +1311,6 @@ static int elan_probe(struct i2c_client *client,
return error;
}
- error = devm_device_add_groups(dev, elan_sysfs_groups);
- if (error) {
- dev_err(dev, "failed to create sysfs attributes: %d\n", error);
- return error;
- }
-
error = input_register_device(data->input);
if (error) {
dev_err(dev, "failed to register input device: %d\n", error);
@@ -1442,6 +1436,7 @@ static struct i2c_driver elan_driver = {
.acpi_match_table = ACPI_PTR(elan_acpi_id),
.of_match_table = of_match_ptr(elan_of_match),
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .dev_groups = elan_sysfs_groups,
},
.probe = elan_probe,
.id_table = elan_id,
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 4dc441309aac..3c8310da0b05 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -884,7 +884,7 @@ static ssize_t hgpk_trigger_recal(struct psmouse *psmouse, void *data,
/*
* We queue work instead of doing recalibration right here
- * to avoid adding locking to to hgpk_force_recalibrate()
+ * to avoid adding locking to hgpk_force_recalibrate()
* since workqueue provides serialization.
*/
psmouse_queue_work(psmouse, &priv->recalib_wq, 0);
@@ -1057,7 +1057,7 @@ void hgpk_module_init(void)
strlen(hgpk_mode_name));
if (hgpk_default_mode == HGPK_MODE_INVALID) {
hgpk_default_mode = HGPK_MODE_MOUSE;
- strlcpy(hgpk_mode_name, hgpk_mode_names[HGPK_MODE_MOUSE],
+ strscpy(hgpk_mode_name, hgpk_mode_names[HGPK_MODE_MOUSE],
sizeof(hgpk_mode_name));
}
}
diff --git a/drivers/input/mouse/inport.c b/drivers/input/mouse/inport.c
index df5d1160478c..401d8bff8e84 100644
--- a/drivers/input/mouse/inport.c
+++ b/drivers/input/mouse/inport.c
@@ -13,9 +13,6 @@
* Inport (ATI XL and Microsoft) busmouse driver for Linux
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
diff --git a/drivers/input/mouse/logibm.c b/drivers/input/mouse/logibm.c
index bd647f9f505a..0aab63dbc30a 100644
--- a/drivers/input/mouse/logibm.c
+++ b/drivers/input/mouse/logibm.c
@@ -14,9 +14,6 @@
* Logitech Bus Mouse Driver for Linux
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ioport.h>
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
index f75574766b85..efa58049f746 100644
--- a/drivers/input/mouse/pc110pad.c
+++ b/drivers/input/mouse/pc110pad.c
@@ -10,9 +10,6 @@
* IBM PC110 touchpad driver for Linux
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 0b4a3039f312..c9a7e87b273e 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -94,7 +94,7 @@ PSMOUSE_DEFINE_ATTR(resync_time, S_IWUSR | S_IRUGO,
(void *) offsetof(struct psmouse, resync_time),
psmouse_show_int_attr, psmouse_set_int_attr);
-static struct attribute *psmouse_attributes[] = {
+static struct attribute *psmouse_dev_attrs[] = {
&psmouse_attr_protocol.dattr.attr,
&psmouse_attr_rate.dattr.attr,
&psmouse_attr_resolution.dattr.attr,
@@ -103,9 +103,7 @@ static struct attribute *psmouse_attributes[] = {
NULL
};
-static const struct attribute_group psmouse_attribute_group = {
- .attrs = psmouse_attributes,
-};
+ATTRIBUTE_GROUPS(psmouse_dev);
/*
* psmouse_mutex protects all operations changing state of mouse
@@ -1481,8 +1479,6 @@ static void psmouse_disconnect(struct serio *serio)
struct psmouse *psmouse = serio_get_drvdata(serio);
struct psmouse *parent = NULL;
- sysfs_remove_group(&serio->dev.kobj, &psmouse_attribute_group);
-
mutex_lock(&psmouse_mutex);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
@@ -1647,10 +1643,6 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
if (parent && parent->pt_activate)
parent->pt_activate(parent);
- error = sysfs_create_group(&serio->dev.kobj, &psmouse_attribute_group);
- if (error)
- goto err_pt_deactivate;
-
/*
* PS/2 devices having SMBus companions should stay disabled
* on PS/2 side, in order to have SMBus part operable.
@@ -1666,13 +1658,6 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
mutex_unlock(&psmouse_mutex);
return retval;
- err_pt_deactivate:
- if (parent && parent->pt_deactivate)
- parent->pt_deactivate(parent);
- if (input_dev) {
- input_unregister_device(input_dev);
- input_dev = NULL; /* so we don't try to free it below */
- }
err_protocol_disconnect:
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
@@ -1791,7 +1776,8 @@ MODULE_DEVICE_TABLE(serio, psmouse_serio_ids);
static struct serio_driver psmouse_drv = {
.driver = {
- .name = "psmouse",
+ .name = "psmouse",
+ .dev_groups = psmouse_dev_groups,
},
.description = DRIVER_DESC,
.id_table = psmouse_serio_ids,
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index caa79c177c55..993f90333380 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -7,9 +7,6 @@
* Serial mouse driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ffad142801b3..fa021af8506e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -182,6 +182,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0099", /* X1 Extreme Gen 1 / P1 Gen 1 */
"LEN009b", /* T580 */
"LEN0402", /* X1 Extreme Gen 2 / P1 Gen 2 */
+ "LEN040f", /* P1 Gen 3 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */
"LEN2054", /* E480 */
@@ -714,8 +715,8 @@ static void synaptics_pt_create(struct psmouse *psmouse)
}
serio->id.type = SERIO_PS_PSTHRU;
- strlcpy(serio->name, "Synaptics pass-through", sizeof(serio->name));
- strlcpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->phys));
+ strscpy(serio->name, "Synaptics pass-through", sizeof(serio->name));
+ strscpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->phys));
serio->write = synaptics_pt_write;
serio->start = synaptics_pt_start;
serio->stop = synaptics_pt_stop;
diff --git a/drivers/input/mouse/synaptics_usb.c b/drivers/input/mouse/synaptics_usb.c
index b5ff27e32a0c..75e45f3ae675 100644
--- a/drivers/input/mouse/synaptics_usb.c
+++ b/drivers/input/mouse/synaptics_usb.c
@@ -354,7 +354,7 @@ static int synusb_probe(struct usb_interface *intf,
synusb->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (udev->manufacturer)
- strlcpy(synusb->name, udev->manufacturer,
+ strscpy(synusb->name, udev->manufacturer,
sizeof(synusb->name));
if (udev->product) {
diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c
index bd415f4b574e..8af8e4a15f95 100644
--- a/drivers/input/mouse/vsxxxaa.c
+++ b/drivers/input/mouse/vsxxxaa.c
@@ -13,9 +13,6 @@
*/
/*
- */
-
-/*
* Building an adaptor to DE9 / DB25 RS232
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -138,12 +135,12 @@ static void vsxxxaa_detection_done(struct vsxxxaa *mouse)
{
switch (mouse->type) {
case 0x02:
- strlcpy(mouse->name, "DEC VSXXX-AA/-GA mouse",
+ strscpy(mouse->name, "DEC VSXXX-AA/-GA mouse",
sizeof(mouse->name));
break;
case 0x04:
- strlcpy(mouse->name, "DEC VSXXX-AB digitizer",
+ strscpy(mouse->name, "DEC VSXXX-AB digitizer",
sizeof(mouse->name));
break;
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
index c194b1664b10..1e11ea30d7bd 100644
--- a/drivers/input/rmi4/rmi_f03.c
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -181,7 +181,7 @@ static int rmi_f03_register_pt(struct f03_data *f03)
serio->close = rmi_f03_pt_close;
serio->port_data = f03;
- strlcpy(serio->name, "RMI4 PS/2 pass-through", sizeof(serio->name));
+ strscpy(serio->name, "RMI4 PS/2 pass-through", sizeof(serio->name));
snprintf(serio->phys, sizeof(serio->phys), "%s/serio0",
dev_name(&f03->fn->dev));
serio->dev.parent = &f03->fn->dev;
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
index e5dca9868f87..0d9a5756e3f5 100644
--- a/drivers/input/rmi4/rmi_f34.c
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -114,13 +114,13 @@ static irqreturn_t rmi_f34_attention(int irq, void *ctx)
complete(&f34->v5.cmd_done);
} else {
ret = rmi_read_block(f34->fn->rmi_dev,
- f34->fn->fd.data_base_addr +
- f34->v7.off.flash_status,
- &status, sizeof(status));
- rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: status: %#02x, ret: %d\n",
+ f34->fn->fd.data_base_addr +
+ V7_COMMAND_OFFSET,
+ &status, sizeof(status));
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: cmd: %#02x, ret: %d\n",
__func__, status, ret);
- if (!ret && !(status & 0x1f))
+ if (!ret && status == CMD_V7_IDLE)
complete(&f34->v7.cmd_done);
}
@@ -321,13 +321,13 @@ static ssize_t rmi_driver_bootloader_id_show(struct device *dev,
f34 = dev_get_drvdata(&fn->dev);
if (f34->bl_version == 5)
- return scnprintf(buf, PAGE_SIZE, "%c%c\n",
- f34->bootloader_id[0],
- f34->bootloader_id[1]);
+ return sysfs_emit(buf, "%c%c\n",
+ f34->bootloader_id[0],
+ f34->bootloader_id[1]);
else
- return scnprintf(buf, PAGE_SIZE, "V%d.%d\n",
- f34->bootloader_id[1],
- f34->bootloader_id[0]);
+ return sysfs_emit(buf, "V%d.%d\n",
+ f34->bootloader_id[1],
+ f34->bootloader_id[0]);
}
return 0;
@@ -346,7 +346,7 @@ static ssize_t rmi_driver_configuration_id_show(struct device *dev,
if (fn) {
f34 = dev_get_drvdata(&fn->dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", f34->configuration_id);
+ return sysfs_emit(buf, "%s\n", f34->configuration_id);
}
return 0;
@@ -370,7 +370,7 @@ static int rmi_firmware_update(struct rmi_driver_data *data,
f34 = dev_get_drvdata(&data->f34_container->dev);
- if (f34->bl_version == 7) {
+ if (f34->bl_version >= 7) {
if (data->pdt_props & HAS_BSR) {
dev_err(dev, "%s: LTS not supported\n", __func__);
return -ENODEV;
@@ -382,7 +382,7 @@ static int rmi_firmware_update(struct rmi_driver_data *data,
}
/* Enter flash mode */
- if (f34->bl_version == 7)
+ if (f34->bl_version >= 7)
ret = rmi_f34v7_start_reflash(f34, fw);
else
ret = rmi_f34_enable_flash(f34);
@@ -413,7 +413,7 @@ static int rmi_firmware_update(struct rmi_driver_data *data,
f34 = dev_get_drvdata(&data->f34_container->dev);
/* Perform firmware update */
- if (f34->bl_version == 7)
+ if (f34->bl_version >= 7)
ret = rmi_f34v7_do_reflash(f34, fw);
else
ret = rmi_f34_update_firmware(f34, fw);
@@ -499,7 +499,7 @@ static ssize_t rmi_driver_update_fw_status_show(struct device *dev,
if (data->f34_container)
update_status = rmi_f34_status(data->f34_container);
- return scnprintf(buf, PAGE_SIZE, "%d\n", update_status);
+ return sysfs_emit(buf, "%d\n", update_status);
}
static DEVICE_ATTR(update_fw_status, 0444,
diff --git a/drivers/input/rmi4/rmi_f34.h b/drivers/input/rmi4/rmi_f34.h
index 99faa8c2269d..cfa3039804fd 100644
--- a/drivers/input/rmi4/rmi_f34.h
+++ b/drivers/input/rmi4/rmi_f34.h
@@ -222,20 +222,6 @@ struct image_metadata {
struct physical_address phyaddr;
};
-struct register_offset {
- u8 properties;
- u8 properties_2;
- u8 block_size;
- u8 block_count;
- u8 gc_block_count;
- u8 flash_status;
- u8 partition_id;
- u8 block_number;
- u8 transfer_length;
- u8 flash_cmd;
- u8 payload;
-};
-
struct rmi_f34_firmware {
__le32 checksum;
u8 pad1[3];
@@ -262,7 +248,6 @@ struct f34v5_data {
struct f34v7_data {
bool has_display_cfg;
bool has_guest_code;
- bool force_update;
bool in_bl_mode;
u8 *read_config_buf;
size_t read_config_buf_size;
@@ -276,9 +261,7 @@ struct f34v7_data {
u16 payload_length;
u8 partitions;
u16 partition_table_bytes;
- bool new_partition_table;
- struct register_offset off;
struct block_count blkcount;
struct physical_address phyaddr;
struct image_metadata img;
diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
index 8d7ec9d89b18..886557b01eba 100644
--- a/drivers/input/rmi4/rmi_f34v7.c
+++ b/drivers/input/rmi4/rmi_f34v7.c
@@ -25,7 +25,7 @@ static int rmi_f34v7_read_flash_status(struct f34_data *f34)
int ret;
ret = rmi_read_block(f34->fn->rmi_dev,
- f34->fn->fd.data_base_addr + f34->v7.off.flash_status,
+ f34->fn->fd.data_base_addr + V7_FLASH_STATUS_OFFSET,
&status,
sizeof(status));
if (ret < 0) {
@@ -43,7 +43,7 @@ static int rmi_f34v7_read_flash_status(struct f34_data *f34)
}
ret = rmi_read_block(f34->fn->rmi_dev,
- f34->fn->fd.data_base_addr + f34->v7.off.flash_cmd,
+ f34->fn->fd.data_base_addr + V7_COMMAND_OFFSET,
&command,
sizeof(command));
if (ret < 0) {
@@ -72,6 +72,24 @@ static int rmi_f34v7_wait_for_idle(struct f34_data *f34, int timeout_ms)
return 0;
}
+static int rmi_f34v7_check_command_status(struct f34_data *f34, int timeout_ms)
+{
+ int ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, timeout_ms);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_read_flash_status(f34);
+ if (ret < 0)
+ return ret;
+
+ if (f34->v7.flash_status != 0x00)
+ return -EIO;
+
+ return 0;
+}
+
static int rmi_f34v7_write_command_single_transaction(struct f34_data *f34,
u8 cmd)
{
@@ -122,7 +140,7 @@ static int rmi_f34v7_write_command_single_transaction(struct f34_data *f34,
data_1_5.payload[1] = f34->bootloader_id[1];
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.partition_id,
+ base + V7_PARTITION_ID_OFFSET,
&data_1_5, sizeof(data_1_5));
if (ret < 0) {
dev_err(&f34->fn->dev,
@@ -195,7 +213,7 @@ static int rmi_f34v7_write_command(struct f34_data *f34, u8 cmd)
__func__, command);
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.flash_cmd,
+ base + V7_COMMAND_OFFSET,
&command, sizeof(command));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write flash command\n",
@@ -262,7 +280,7 @@ static int rmi_f34v7_write_partition_id(struct f34_data *f34, u8 cmd)
}
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.partition_id,
+ base + V7_PARTITION_ID_OFFSET,
&partition, sizeof(partition));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write partition ID\n",
@@ -290,7 +308,7 @@ static int rmi_f34v7_read_partition_table(struct f34_data *f34)
return ret;
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.block_number,
+ base + V7_BLOCK_NUMBER_OFFSET,
&block_number, sizeof(block_number));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
@@ -301,7 +319,7 @@ static int rmi_f34v7_read_partition_table(struct f34_data *f34)
put_unaligned_le16(f34->v7.flash_config_length, &length);
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.transfer_length,
+ base + V7_TRANSFER_LENGTH_OFFSET,
&length, sizeof(length));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write transfer length\n",
@@ -318,6 +336,10 @@ static int rmi_f34v7_read_partition_table(struct f34_data *f34)
return ret;
}
+ /*
+ * rmi_f34v7_check_command_status() can't be used here, as this
+ * function is called before IRQs are available
+ */
timeout = msecs_to_jiffies(F34_WRITE_WAIT_MS);
while (time_before(jiffies, timeout)) {
usleep_range(5000, 6000);
@@ -330,7 +352,7 @@ static int rmi_f34v7_read_partition_table(struct f34_data *f34)
}
ret = rmi_read_block(f34->fn->rmi_dev,
- base + f34->v7.off.payload,
+ base + V7_PAYLOAD_OFFSET,
f34->v7.read_config_buf,
f34->v7.partition_table_bytes);
if (ret < 0) {
@@ -504,13 +526,6 @@ static int rmi_f34v7_read_queries(struct f34_data *f34)
rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: f34->v7.block_size = %d\n",
__func__, f34->v7.block_size);
- f34->v7.off.flash_status = V7_FLASH_STATUS_OFFSET;
- f34->v7.off.partition_id = V7_PARTITION_ID_OFFSET;
- f34->v7.off.block_number = V7_BLOCK_NUMBER_OFFSET;
- f34->v7.off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
- f34->v7.off.flash_cmd = V7_COMMAND_OFFSET;
- f34->v7.off.payload = V7_PAYLOAD_OFFSET;
-
f34->v7.has_display_cfg = query_1_7.partition_support[1] & HAS_DISP_CFG;
f34->v7.has_guest_code =
query_1_7.partition_support[1] & HAS_GUEST_CODE;
@@ -571,68 +586,6 @@ static int rmi_f34v7_read_queries(struct f34_data *f34)
return 0;
}
-static int rmi_f34v7_check_ui_firmware_size(struct f34_data *f34)
-{
- u16 block_count;
-
- block_count = f34->v7.img.ui_firmware.size / f34->v7.block_size;
- f34->update_size += block_count;
-
- if (block_count != f34->v7.blkcount.ui_firmware) {
- dev_err(&f34->fn->dev,
- "UI firmware size mismatch: %d != %d\n",
- block_count, f34->v7.blkcount.ui_firmware);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rmi_f34v7_check_ui_config_size(struct f34_data *f34)
-{
- u16 block_count;
-
- block_count = f34->v7.img.ui_config.size / f34->v7.block_size;
- f34->update_size += block_count;
-
- if (block_count != f34->v7.blkcount.ui_config) {
- dev_err(&f34->fn->dev, "UI config size mismatch\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rmi_f34v7_check_dp_config_size(struct f34_data *f34)
-{
- u16 block_count;
-
- block_count = f34->v7.img.dp_config.size / f34->v7.block_size;
- f34->update_size += block_count;
-
- if (block_count != f34->v7.blkcount.dp_config) {
- dev_err(&f34->fn->dev, "Display config size mismatch\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rmi_f34v7_check_guest_code_size(struct f34_data *f34)
-{
- u16 block_count;
-
- block_count = f34->v7.img.guest_code.size / f34->v7.block_size;
- f34->update_size += block_count;
-
- if (block_count != f34->v7.blkcount.guest_code) {
- dev_err(&f34->fn->dev, "Guest code size mismatch\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int rmi_f34v7_check_bl_config_size(struct f34_data *f34)
{
u16 block_count;
@@ -648,58 +601,6 @@ static int rmi_f34v7_check_bl_config_size(struct f34_data *f34)
return 0;
}
-static int rmi_f34v7_erase_config(struct f34_data *f34)
-{
- int ret;
-
- dev_info(&f34->fn->dev, "Erasing config...\n");
-
- init_completion(&f34->v7.cmd_done);
-
- switch (f34->v7.config_area) {
- case v7_UI_CONFIG_AREA:
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_CONFIG);
- if (ret < 0)
- return ret;
- break;
- case v7_DP_CONFIG_AREA:
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_DISP_CONFIG);
- if (ret < 0)
- return ret;
- break;
- case v7_BL_CONFIG_AREA:
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_BL_CONFIG);
- if (ret < 0)
- return ret;
- break;
- }
-
- ret = rmi_f34v7_wait_for_idle(f34, F34_ERASE_WAIT_MS);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int rmi_f34v7_erase_guest_code(struct f34_data *f34)
-{
- int ret;
-
- dev_info(&f34->fn->dev, "Erasing guest code...\n");
-
- init_completion(&f34->v7.cmd_done);
-
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_GUEST_CODE);
- if (ret < 0)
- return ret;
-
- ret = rmi_f34v7_wait_for_idle(f34, F34_ERASE_WAIT_MS);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static int rmi_f34v7_erase_all(struct f34_data *f34)
{
int ret;
@@ -708,32 +609,14 @@ static int rmi_f34v7_erase_all(struct f34_data *f34)
init_completion(&f34->v7.cmd_done);
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_FIRMWARE);
- if (ret < 0)
- return ret;
-
- ret = rmi_f34v7_wait_for_idle(f34, F34_ERASE_WAIT_MS);
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_ALL);
if (ret < 0)
return ret;
- f34->v7.config_area = v7_UI_CONFIG_AREA;
- ret = rmi_f34v7_erase_config(f34);
+ ret = rmi_f34v7_check_command_status(f34, F34_ERASE_WAIT_MS);
if (ret < 0)
return ret;
- if (f34->v7.has_display_cfg) {
- f34->v7.config_area = v7_DP_CONFIG_AREA;
- ret = rmi_f34v7_erase_config(f34);
- if (ret < 0)
- return ret;
- }
-
- if (f34->v7.new_partition_table && f34->v7.has_guest_code) {
- ret = rmi_f34v7_erase_guest_code(f34);
- if (ret < 0)
- return ret;
- }
-
return 0;
}
@@ -756,7 +639,7 @@ static int rmi_f34v7_read_blocks(struct f34_data *f34,
return ret;
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.block_number,
+ base + V7_BLOCK_NUMBER_OFFSET,
&block_number, sizeof(block_number));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
@@ -772,7 +655,7 @@ static int rmi_f34v7_read_blocks(struct f34_data *f34,
put_unaligned_le16(transfer, &length);
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.transfer_length,
+ base + V7_TRANSFER_LENGTH_OFFSET,
&length, sizeof(length));
if (ret < 0) {
dev_err(&f34->fn->dev,
@@ -787,12 +670,12 @@ static int rmi_f34v7_read_blocks(struct f34_data *f34,
if (ret < 0)
return ret;
- ret = rmi_f34v7_wait_for_idle(f34, F34_ENABLE_WAIT_MS);
+ ret = rmi_f34v7_check_command_status(f34, F34_ENABLE_WAIT_MS);
if (ret < 0)
return ret;
ret = rmi_read_block(f34->fn->rmi_dev,
- base + f34->v7.off.payload,
+ base + V7_PAYLOAD_OFFSET,
&f34->v7.read_config_buf[index],
transfer * f34->v7.block_size);
if (ret < 0) {
@@ -828,7 +711,7 @@ static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
return ret;
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.block_number,
+ base + V7_BLOCK_NUMBER_OFFSET,
&block_number, sizeof(block_number));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
@@ -848,7 +731,7 @@ static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
init_completion(&f34->v7.cmd_done);
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.transfer_length,
+ base + V7_TRANSFER_LENGTH_OFFSET,
&length, sizeof(length));
if (ret < 0) {
dev_err(&f34->fn->dev,
@@ -862,7 +745,7 @@ static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
return ret;
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.payload,
+ base + V7_PAYLOAD_OFFSET,
block_ptr, transfer * f34->v7.block_size);
if (ret < 0) {
dev_err(&f34->fn->dev,
@@ -871,7 +754,7 @@ static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
return ret;
}
- ret = rmi_f34v7_wait_for_idle(f34, F34_ENABLE_WAIT_MS);
+ ret = rmi_f34v7_check_command_status(f34, F34_ENABLE_WAIT_MS);
if (ret < 0)
return ret;
@@ -937,17 +820,6 @@ static int rmi_f34v7_write_flash_config(struct f34_data *f34)
init_completion(&f34->v7.cmd_done);
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_FLASH_CONFIG);
- if (ret < 0)
- return ret;
-
- rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
- "%s: Erase flash config command written\n", __func__);
-
- ret = rmi_f34v7_wait_for_idle(f34, F34_WRITE_WAIT_MS);
- if (ret < 0)
- return ret;
-
ret = rmi_f34v7_write_config(f34);
if (ret < 0)
return ret;
@@ -977,10 +849,6 @@ static int rmi_f34v7_write_partition_table(struct f34_data *f34)
if (ret < 0)
return ret;
- ret = rmi_f34v7_erase_config(f34);
- if (ret < 0)
- return ret;
-
ret = rmi_f34v7_write_flash_config(f34);
if (ret < 0)
return ret;
@@ -1007,33 +875,6 @@ static int rmi_f34v7_write_firmware(struct f34_data *f34)
blk_count, v7_CMD_WRITE_FW);
}
-static void rmi_f34v7_compare_partition_tables(struct f34_data *f34)
-{
- if (f34->v7.phyaddr.ui_firmware != f34->v7.img.phyaddr.ui_firmware) {
- f34->v7.new_partition_table = true;
- return;
- }
-
- if (f34->v7.phyaddr.ui_config != f34->v7.img.phyaddr.ui_config) {
- f34->v7.new_partition_table = true;
- return;
- }
-
- if (f34->v7.has_display_cfg &&
- f34->v7.phyaddr.dp_config != f34->v7.img.phyaddr.dp_config) {
- f34->v7.new_partition_table = true;
- return;
- }
-
- if (f34->v7.has_guest_code &&
- f34->v7.phyaddr.guest_code != f34->v7.img.phyaddr.guest_code) {
- f34->v7.new_partition_table = true;
- return;
- }
-
- f34->v7.new_partition_table = false;
-}
-
static void rmi_f34v7_parse_img_header_10_bl_container(struct f34_data *f34,
const void *image)
{
@@ -1180,8 +1021,6 @@ static int rmi_f34v7_parse_image_info(struct f34_data *f34)
rmi_f34v7_parse_partition_table(f34, f34->v7.img.fl_config.data,
&f34->v7.img.blkcount, &f34->v7.img.phyaddr);
- rmi_f34v7_compare_partition_tables(f34);
-
return 0;
}
@@ -1200,53 +1039,35 @@ int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
ret = rmi_f34v7_parse_image_info(f34);
if (ret < 0)
- goto fail;
-
- if (!f34->v7.new_partition_table) {
- ret = rmi_f34v7_check_ui_firmware_size(f34);
- if (ret < 0)
- goto fail;
-
- ret = rmi_f34v7_check_ui_config_size(f34);
- if (ret < 0)
- goto fail;
-
- if (f34->v7.has_display_cfg &&
- f34->v7.img.contains_display_cfg) {
- ret = rmi_f34v7_check_dp_config_size(f34);
- if (ret < 0)
- goto fail;
- }
+ return ret;
- if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
- ret = rmi_f34v7_check_guest_code_size(f34);
- if (ret < 0)
- goto fail;
- }
- } else {
- ret = rmi_f34v7_check_bl_config_size(f34);
- if (ret < 0)
- goto fail;
- }
+ ret = rmi_f34v7_check_bl_config_size(f34);
+ if (ret < 0)
+ return ret;
ret = rmi_f34v7_erase_all(f34);
if (ret < 0)
- goto fail;
+ return ret;
- if (f34->v7.new_partition_table) {
- ret = rmi_f34v7_write_partition_table(f34);
- if (ret < 0)
- goto fail;
- dev_info(&f34->fn->dev, "%s: Partition table programmed\n",
- __func__);
- }
+ ret = rmi_f34v7_write_partition_table(f34);
+ if (ret < 0)
+ return ret;
+ dev_info(&f34->fn->dev, "%s: Partition table programmed\n", __func__);
+
+ /*
+ * Reset to reload partition table - as the previous firmware has been
+ * erased, we remain in bootloader mode.
+ */
+ ret = rmi_scan_pdt(f34->fn->rmi_dev, NULL, rmi_initial_reset);
+ if (ret < 0)
+ dev_warn(&f34->fn->dev, "RMI reset failed!\n");
dev_info(&f34->fn->dev, "Writing firmware (%d bytes)...\n",
f34->v7.img.ui_firmware.size);
ret = rmi_f34v7_write_firmware(f34);
if (ret < 0)
- goto fail;
+ return ret;
dev_info(&f34->fn->dev, "Writing config (%d bytes)...\n",
f34->v7.img.ui_config.size);
@@ -1254,28 +1075,25 @@ int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
f34->v7.config_area = v7_UI_CONFIG_AREA;
ret = rmi_f34v7_write_ui_config(f34);
if (ret < 0)
- goto fail;
+ return ret;
if (f34->v7.has_display_cfg && f34->v7.img.contains_display_cfg) {
dev_info(&f34->fn->dev, "Writing display config...\n");
ret = rmi_f34v7_write_dp_config(f34);
if (ret < 0)
- goto fail;
+ return ret;
}
- if (f34->v7.new_partition_table) {
- if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
- dev_info(&f34->fn->dev, "Writing guest code...\n");
+ if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
+ dev_info(&f34->fn->dev, "Writing guest code...\n");
- ret = rmi_f34v7_write_guest_code(f34);
- if (ret < 0)
- goto fail;
- }
+ ret = rmi_f34v7_write_guest_code(f34);
+ if (ret < 0)
+ return ret;
}
-fail:
- return ret;
+ return 0;
}
static int rmi_f34v7_enter_flash_prog(struct f34_data *f34)
@@ -1288,8 +1106,11 @@ static int rmi_f34v7_enter_flash_prog(struct f34_data *f34)
if (ret < 0)
return ret;
- if (f34->v7.in_bl_mode)
+ if (f34->v7.in_bl_mode) {
+ dev_info(&f34->fn->dev, "%s: Device in bootloader mode\n",
+ __func__);
return 0;
+ }
init_completion(&f34->v7.cmd_done);
@@ -1297,7 +1118,7 @@ static int rmi_f34v7_enter_flash_prog(struct f34_data *f34)
if (ret < 0)
return ret;
- ret = rmi_f34v7_wait_for_idle(f34, F34_ENABLE_WAIT_MS);
+ ret = rmi_f34v7_check_command_status(f34, F34_ENABLE_WAIT_MS);
if (ret < 0)
return ret;
@@ -1308,39 +1129,16 @@ int rmi_f34v7_start_reflash(struct f34_data *f34, const struct firmware *fw)
{
int ret = 0;
- f34->fn->rmi_dev->driver->set_irq_bits(f34->fn->rmi_dev, f34->fn->irq_mask);
-
f34->v7.config_area = v7_UI_CONFIG_AREA;
f34->v7.image = fw->data;
ret = rmi_f34v7_parse_image_info(f34);
if (ret < 0)
- goto exit;
-
- if (!f34->v7.force_update && f34->v7.new_partition_table) {
- dev_err(&f34->fn->dev, "%s: Partition table mismatch\n",
- __func__);
- ret = -EINVAL;
- goto exit;
- }
+ return ret;
dev_info(&f34->fn->dev, "Firmware image OK\n");
- ret = rmi_f34v7_read_flash_status(f34);
- if (ret < 0)
- goto exit;
-
- if (f34->v7.in_bl_mode) {
- dev_info(&f34->fn->dev, "%s: Device in bootloader mode\n",
- __func__);
- }
-
- rmi_f34v7_enter_flash_prog(f34);
-
- return 0;
-
-exit:
- return ret;
+ return rmi_f34v7_enter_flash_prog(f34);
}
int rmi_f34v7_probe(struct f34_data *f34)
@@ -1384,6 +1182,5 @@ int rmi_f34v7_probe(struct f34_data *f34)
if (ret < 0)
return ret;
- f34->v7.force_update = true;
return 0;
}
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index c5ce907535ef..5c3da910b5b2 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -390,8 +390,8 @@ static int rmi_f54_vidioc_querycap(struct file *file, void *priv,
{
struct f54_data *f54 = video_drvdata(file);
- strlcpy(cap->driver, F54_NAME, sizeof(cap->driver));
- strlcpy(cap->card, SYNAPTICS_INPUT_DEVICE_NAME, sizeof(cap->card));
+ strscpy(cap->driver, F54_NAME, sizeof(cap->driver));
+ strscpy(cap->card, SYNAPTICS_INPUT_DEVICE_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"rmi4:%s", dev_name(&f54->fn->dev));
@@ -410,7 +410,7 @@ static int rmi_f54_vidioc_enum_input(struct file *file, void *priv,
i->type = V4L2_INPUT_TYPE_TOUCH;
- strlcpy(i->name, rmi_f54_report_type_names[reptype], sizeof(i->name));
+ strscpy(i->name, rmi_f54_report_type_names[reptype], sizeof(i->name));
return 0;
}
@@ -696,7 +696,7 @@ static int rmi_f54_probe(struct rmi_function *fn)
rmi_f54_set_input(f54, 0);
/* register video device */
- strlcpy(f54->v4l2.name, F54_NAME, sizeof(f54->v4l2.name));
+ strscpy(f54->v4l2.name, F54_NAME, sizeof(f54->v4l2.name));
ret = v4l2_device_register(&fn->dev, &f54->v4l2);
if (ret) {
dev_err(&fn->dev, "Unable to register video dev.\n");
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index 379e9240c2b3..3a92304f64fb 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -110,8 +110,8 @@ static int altera_ps2_probe(struct platform_device *pdev)
serio->write = altera_ps2_write;
serio->open = altera_ps2_open;
serio->close = altera_ps2_close;
- strlcpy(serio->name, dev_name(&pdev->dev), sizeof(serio->name));
- strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
+ strscpy(serio->name, dev_name(&pdev->dev), sizeof(serio->name));
+ strscpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
serio->port_data = ps2if;
serio->dev.parent = &pdev->dev;
ps2if->io = serio;
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 4408245b61d2..c391700fc4ae 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -126,8 +126,8 @@ static int amba_kmi_probe(struct amba_device *dev,
io->write = amba_kmi_write;
io->open = amba_kmi_open;
io->close = amba_kmi_close;
- strlcpy(io->name, dev_name(&dev->dev), sizeof(io->name));
- strlcpy(io->phys, dev_name(&dev->dev), sizeof(io->phys));
+ strscpy(io->name, dev_name(&dev->dev), sizeof(io->name));
+ strscpy(io->phys, dev_name(&dev->dev), sizeof(io->phys));
io->port_data = kmi;
io->dev.parent = &dev->dev;
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 1c0be299f179..ec93cb4573c3 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -159,8 +159,8 @@ static int ams_delta_serio_init(struct platform_device *pdev)
serio->id.type = SERIO_8042;
serio->open = ams_delta_serio_open;
serio->close = ams_delta_serio_close;
- strlcpy(serio->name, "AMS DELTA keyboard adapter", sizeof(serio->name));
- strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
+ strscpy(serio->name, "AMS DELTA keyboard adapter", sizeof(serio->name));
+ strscpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
serio->dev.parent = &pdev->dev;
serio->port_data = priv;
diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c
index 974d7bfae0a0..9c9ce097f8bf 100644
--- a/drivers/input/serio/apbps2.c
+++ b/drivers/input/serio/apbps2.c
@@ -176,7 +176,7 @@ static int apbps2_of_probe(struct platform_device *ofdev)
priv->io->close = apbps2_close;
priv->io->write = apbps2_write;
priv->io->port_data = priv;
- strlcpy(priv->io->name, "APBPS2 PS/2", sizeof(priv->io->name));
+ strscpy(priv->io->name, "APBPS2 PS/2", sizeof(priv->io->name));
snprintf(priv->io->phys, sizeof(priv->io->phys),
"apbps2_%d", apbps2_idx++);
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index d45009d654bf..3da751f4a6bf 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -7,9 +7,6 @@
* 82C710 C&T mouse port chip driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/ioport.h>
@@ -170,7 +167,7 @@ static int ct82c710_probe(struct platform_device *dev)
ct82c710_port->open = ct82c710_open;
ct82c710_port->close = ct82c710_close;
ct82c710_port->write = ct82c710_write;
- strlcpy(ct82c710_port->name, "C&T 82c710 mouse port",
+ strscpy(ct82c710_port->name, "C&T 82c710 mouse port",
sizeof(ct82c710_port->name));
snprintf(ct82c710_port->phys, sizeof(ct82c710_port->phys),
"isa%16llx/serio0", (unsigned long long)CT82C710_DATA);
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index da2c67cb8642..633c7de49d67 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -361,7 +361,7 @@ static int __init gscps2_probe(struct parisc_device *dev)
snprintf(serio->name, sizeof(serio->name), "gsc-ps2-%s",
(ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse");
- strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
+ strscpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
serio->id.type = SERIO_8042;
serio->write = gscps2_write;
serio->open = gscps2_open;
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 1a7b72a9016d..d62aefb2e245 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -334,9 +334,9 @@ static int hv_kbd_probe(struct hv_device *hv_dev,
hv_serio->dev.parent = &hv_dev->device;
hv_serio->id.type = SERIO_8042_XL;
hv_serio->port_data = kbd_dev;
- strlcpy(hv_serio->name, dev_name(&hv_dev->device),
+ strscpy(hv_serio->name, dev_name(&hv_dev->device),
sizeof(hv_serio->name));
- strlcpy(hv_serio->phys, dev_name(&hv_dev->device),
+ strscpy(hv_serio->phys, dev_name(&hv_dev->device),
sizeof(hv_serio->phys));
hv_serio->start = hv_kbd_start;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-acpipnpio.h
index 4fbec7bbecca..0778dc03cd9e 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _I8042_X86IA64IO_H
-#define _I8042_X86IA64IO_H
+#ifndef _I8042_ACPIPNPIO_H
+#define _I8042_ACPIPNPIO_H
+#include <linux/acpi.h>
#ifdef CONFIG_X86
#include <asm/x86_init.h>
@@ -1300,7 +1301,7 @@ static char i8042_pnp_aux_name[32];
static void i8042_pnp_id_to_string(struct pnp_id *id, char *dst, int dst_size)
{
- strlcpy(dst, "PNP:", dst_size);
+ strscpy(dst, "PNP:", dst_size);
while (id) {
strlcat(dst, " ", dst_size);
@@ -1320,7 +1321,7 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
if (pnp_irq_valid(dev,0))
i8042_pnp_kbd_irq = pnp_irq(dev, 0);
- strlcpy(i8042_pnp_kbd_name, did->id, sizeof(i8042_pnp_kbd_name));
+ strscpy(i8042_pnp_kbd_name, did->id, sizeof(i8042_pnp_kbd_name));
if (strlen(pnp_dev_name(dev))) {
strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name));
strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
@@ -1347,7 +1348,7 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
if (pnp_irq_valid(dev, 0))
i8042_pnp_aux_irq = pnp_irq(dev, 0);
- strlcpy(i8042_pnp_aux_name, did->id, sizeof(i8042_pnp_aux_name));
+ strscpy(i8042_pnp_aux_name, did->id, sizeof(i8042_pnp_aux_name));
if (strlen(pnp_dev_name(dev))) {
strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name));
strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
@@ -1453,9 +1454,14 @@ static int __init i8042_pnp_init(void)
return -ENODEV;
#else
pr_info("PNP: No PS/2 controller found.\n");
+#if defined(__loongarch__)
+ if (acpi_disabled == 0)
+ return -ENODEV;
+#else
if (x86_platform.legacy.i8042 !=
X86_LEGACY_I8042_EXPECTED_PRESENT)
return -ENODEV;
+#endif
pr_info("Probing ports directly.\n");
return 0;
#endif
@@ -1665,4 +1671,4 @@ static inline void i8042_platform_exit(void)
i8042_pnp_exit();
}
-#endif /* _I8042_X86IA64IO_H */
+#endif /* _I8042_ACPIPNPIO_H */
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index fce76812843b..c712c1fe0605 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -3,6 +3,7 @@
#define _I8042_SPARCIO_H
#include <linux/of_device.h>
+#include <linux/types.h>
#include <asm/io.h>
#include <asm/oplib.h>
@@ -103,12 +104,25 @@ static struct platform_driver sparc_i8042_driver = {
.remove = sparc_i8042_remove,
};
-static int __init i8042_platform_init(void)
+static bool i8042_is_mr_coffee(void)
{
- struct device_node *root = of_find_node_by_path("/");
- const char *name = of_get_property(root, "name", NULL);
+ struct device_node *root;
+ const char *name;
+ bool is_mr_coffee;
+
+ root = of_find_node_by_path("/");
+
+ name = of_get_property(root, "name", NULL);
+ is_mr_coffee = name && !strcmp(name, "SUNW,JavaStation-1");
- if (name && !strcmp(name, "SUNW,JavaStation-1")) {
+ of_node_put(root);
+
+ return is_mr_coffee;
+}
+
+static int __init i8042_platform_init(void)
+{
+ if (i8042_is_mr_coffee()) {
/* Hardcoded values for MrCoffee. */
i8042_kbd_irq = i8042_aux_irq = 13 | 0x20;
kbd_iobase = ioremap(0x71300060, 8);
@@ -136,10 +150,7 @@ static int __init i8042_platform_init(void)
static inline void i8042_platform_exit(void)
{
- struct device_node *root = of_find_node_by_path("/");
- const char *name = of_get_property(root, "name", NULL);
-
- if (!name || strcmp(name, "SUNW,JavaStation-1"))
+ if (!i8042_is_mr_coffee())
platform_driver_unregister(&sparc_i8042_driver);
}
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 3fc0a89cc785..f9486495baef 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1341,9 +1341,9 @@ static int i8042_create_kbd_port(void)
serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
- strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
- strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
- strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
+ strscpy(serio->name, "i8042 KBD port", sizeof(serio->name));
+ strscpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
+ strscpy(serio->firmware_id, i8042_kbd_firmware_id,
sizeof(serio->firmware_id));
set_primary_fwnode(&serio->dev, i8042_kbd_fwnode);
@@ -1371,15 +1371,15 @@ static int i8042_create_aux_port(int idx)
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
if (idx < 0) {
- strlcpy(serio->name, "i8042 AUX port", sizeof(serio->name));
- strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
- strlcpy(serio->firmware_id, i8042_aux_firmware_id,
+ strscpy(serio->name, "i8042 AUX port", sizeof(serio->name));
+ strscpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
+ strscpy(serio->firmware_id, i8042_aux_firmware_id,
sizeof(serio->firmware_id));
serio->close = i8042_port_close;
} else {
snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx);
snprintf(serio->phys, sizeof(serio->phys), I8042_MUX_PHYS_DESC, idx + 1);
- strlcpy(serio->firmware_id, i8042_aux_firmware_id,
+ strscpy(serio->firmware_id, i8042_aux_firmware_id,
sizeof(serio->firmware_id));
}
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index 55381783dc82..adb5173372d3 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -19,8 +19,8 @@
#include "i8042-snirm.h"
#elif defined(CONFIG_SPARC)
#include "i8042-sparcio.h"
-#elif defined(CONFIG_X86) || defined(CONFIG_IA64)
-#include "i8042-x86ia64io.h"
+#elif defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH)
+#include "i8042-acpipnpio.h"
#else
#include "i8042-io.h"
#endif
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 250e213cc80c..3e19344eda93 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/kmsan-checks.h>
#include <linux/serio.h>
#include <linux/i8042.h>
#include <linux/libps2.h>
@@ -294,9 +295,11 @@ int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command)
serio_pause_rx(ps2dev->serio);
- if (param)
+ if (param) {
for (i = 0; i < receive; i++)
param[i] = ps2dev->cmdbuf[(receive - 1) - i];
+ kmsan_unpoison_memory(param, receive);
+ }
if (ps2dev->cmdcnt &&
(command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) {
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index 59de8d9b6710..04d2db982fb8 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -199,8 +199,8 @@ static int olpc_apsp_probe(struct platform_device *pdev)
kb_serio->close = olpc_apsp_close;
kb_serio->port_data = priv;
kb_serio->dev.parent = &pdev->dev;
- strlcpy(kb_serio->name, "sp keyboard", sizeof(kb_serio->name));
- strlcpy(kb_serio->phys, "sp/serio0", sizeof(kb_serio->phys));
+ strscpy(kb_serio->name, "sp keyboard", sizeof(kb_serio->name));
+ strscpy(kb_serio->phys, "sp/serio0", sizeof(kb_serio->phys));
priv->kbio = kb_serio;
serio_register_port(kb_serio);
@@ -216,8 +216,8 @@ static int olpc_apsp_probe(struct platform_device *pdev)
pad_serio->close = olpc_apsp_close;
pad_serio->port_data = priv;
pad_serio->dev.parent = &pdev->dev;
- strlcpy(pad_serio->name, "sp touchpad", sizeof(pad_serio->name));
- strlcpy(pad_serio->phys, "sp/serio1", sizeof(pad_serio->phys));
+ strscpy(pad_serio->name, "sp touchpad", sizeof(pad_serio->name));
+ strscpy(pad_serio->phys, "sp/serio1", sizeof(pad_serio->phys));
priv->padio = pad_serio;
serio_register_port(pad_serio);
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 51b68501896c..0d54895428f5 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -169,7 +169,7 @@ static struct serio *parkbd_allocate_serio(void)
if (serio) {
serio->id.type = parkbd_mode;
serio->write = parkbd_write;
- strlcpy(serio->name, "PARKBD AT/XT keyboard adapter", sizeof(serio->name));
+ strscpy(serio->name, "PARKBD AT/XT keyboard adapter", sizeof(serio->name));
snprintf(serio->phys, sizeof(serio->phys), "%s/serio0", parkbd_dev->port->name);
}
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index bedf75de0a2c..05878750f2c2 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -149,8 +149,8 @@ static int pcips2_probe(struct pci_dev *dev, const struct pci_device_id *id)
serio->write = pcips2_write;
serio->open = pcips2_open;
serio->close = pcips2_close;
- strlcpy(serio->name, pci_name(dev), sizeof(serio->name));
- strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
+ strscpy(serio->name, pci_name(dev), sizeof(serio->name));
+ strscpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
serio->port_data = ps2if;
serio->dev.parent = &dev->dev;
ps2if->io = serio;
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index 9b02dd5dd2b9..bc1dc484389b 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -449,8 +449,8 @@ static int ps2_gpio_probe(struct platform_device *pdev)
serio->write = drvdata->write_enable ? ps2_gpio_write : NULL;
serio->port_data = drvdata;
serio->dev.parent = dev;
- strlcpy(serio->name, dev_name(dev), sizeof(serio->name));
- strlcpy(serio->phys, dev_name(dev), sizeof(serio->phys));
+ strscpy(serio->name, dev_name(dev), sizeof(serio->name));
+ strscpy(serio->phys, dev_name(dev), sizeof(serio->phys));
drvdata->serio = serio;
drvdata->dev = dev;
diff --git a/drivers/input/serio/ps2mult.c b/drivers/input/serio/ps2mult.c
index 0071dd5ebcc2..902e81826fbf 100644
--- a/drivers/input/serio/ps2mult.c
+++ b/drivers/input/serio/ps2mult.c
@@ -131,7 +131,7 @@ static int ps2mult_create_port(struct ps2mult *psm, int i)
if (!serio)
return -ENOMEM;
- strlcpy(serio->name, "TQC PS/2 Multiplexer", sizeof(serio->name));
+ strscpy(serio->name, "TQC PS/2 Multiplexer", sizeof(serio->name));
snprintf(serio->phys, sizeof(serio->phys),
"%s/port%d", mx_serio->phys, i);
serio->id.type = SERIO_8042;
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index bd248398556a..ba04058fc3cb 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -10,9 +10,6 @@
* Q40 PS/2 keyboard controller driver for Linux/m68k
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/serio.h>
#include <linux/interrupt.h>
@@ -126,8 +123,8 @@ static int q40kbd_probe(struct platform_device *pdev)
port->close = q40kbd_close;
port->port_data = q40kbd;
port->dev.parent = &pdev->dev;
- strlcpy(port->name, "Q40 Kbd Port", sizeof(port->name));
- strlcpy(port->phys, "Q40", sizeof(port->phys));
+ strscpy(port->name, "Q40 Kbd Port", sizeof(port->name));
+ strscpy(port->phys, "Q40", sizeof(port->phys));
q40kbd_stop();
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 37fe6a5711ea..ce420eb1f51b 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -8,9 +8,6 @@
* Acorn RiscPC PS/2 keyboard controller driver for Linux/ARM
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/serio.h>
@@ -128,8 +125,8 @@ static int rpckbd_probe(struct platform_device *dev)
serio->close = rpckbd_close;
serio->dev.parent = &dev->dev;
serio->port_data = rpckbd;
- strlcpy(serio->name, "RiscPC PS/2 kbd port", sizeof(serio->name));
- strlcpy(serio->phys, "rpckbd/serio0", sizeof(serio->phys));
+ strscpy(serio->name, "RiscPC PS/2 kbd port", sizeof(serio->name));
+ strscpy(serio->phys, "rpckbd/serio0", sizeof(serio->phys));
platform_set_drvdata(dev, serio);
serio_register_port(serio);
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 68fac4801e2e..2724c3aa512c 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -267,8 +267,8 @@ static int ps2_probe(struct sa1111_dev *dev)
serio->write = ps2_write;
serio->open = ps2_open;
serio->close = ps2_close;
- strlcpy(serio->name, dev_name(&dev->dev), sizeof(serio->name));
- strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
+ strscpy(serio->name, dev_name(&dev->dev), sizeof(serio->name));
+ strscpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
serio->port_data = ps2if;
serio->dev.parent = &dev->dev;
ps2if->io = serio;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index ec117be3d8d8..15ce3202322f 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -7,9 +7,6 @@
* Copyright (c) 2003 Daniele Bellucci
*/
-/*
- */
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/stddef.h>
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 669a728095b8..7f7ef0e3a749 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -171,7 +171,7 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file,
if (!serio)
return -ENOMEM;
- strlcpy(serio->name, "Serial port", sizeof(serio->name));
+ strscpy(serio->name, "Serial port", sizeof(serio->name));
snprintf(serio->phys, sizeof(serio->phys), "%s/serio0", tty_name(tty));
serio->id = serport->id;
serio->id.type = SERIO_RS232;
diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
index f15ed3dcdb9b..eb262640192e 100644
--- a/drivers/input/serio/sun4i-ps2.c
+++ b/drivers/input/serio/sun4i-ps2.c
@@ -256,8 +256,8 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
serio->close = sun4i_ps2_close;
serio->port_data = drvdata;
serio->dev.parent = dev;
- strlcpy(serio->name, dev_name(dev), sizeof(serio->name));
- strlcpy(serio->phys, dev_name(dev), sizeof(serio->phys));
+ strscpy(serio->name, dev_name(dev), sizeof(serio->name));
+ strscpy(serio->phys, dev_name(dev), sizeof(serio->phys));
/* shutoff interrupt */
writel(0, drvdata->reg_base + PS2_REG_GCTL);
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index 56c7e471ac32..b20e5a1afbcc 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -9,9 +9,6 @@
* v3.2 - Added sysfs support
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -155,7 +152,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
acecad->input = input_dev;
if (dev->manufacturer)
- strlcpy(acecad->name, dev->manufacturer, sizeof(acecad->name));
+ strscpy(acecad->name, dev->manufacturer, sizeof(acecad->name));
if (dev->product) {
if (dev->manufacturer)
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 24ec4844a5c3..baabc51547b8 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1617,7 +1617,7 @@ static ssize_t show_firmwareCode(struct device *dev, struct device_attribute *at
static DEVICE_ATTR(firmware_code, S_IRUGO, show_firmwareCode, NULL);
-static struct attribute *aiptek_attributes[] = {
+static struct attribute *aiptek_dev_attrs[] = {
&dev_attr_size.attr,
&dev_attr_pointer_mode.attr,
&dev_attr_coordinate_mode.attr,
@@ -1641,9 +1641,7 @@ static struct attribute *aiptek_attributes[] = {
NULL
};
-static const struct attribute_group aiptek_attribute_group = {
- .attrs = aiptek_attributes,
-};
+ATTRIBUTE_GROUPS(aiptek_dev);
/***********************************************************************
* This routine is called when a tablet has been identified. It basically
@@ -1842,26 +1840,16 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
*/
usb_set_intfdata(intf, aiptek);
- /* Set up the sysfs files
- */
- err = sysfs_create_group(&intf->dev.kobj, &aiptek_attribute_group);
- if (err) {
- dev_warn(&intf->dev, "cannot create sysfs group err: %d\n",
- err);
- goto fail3;
- }
-
/* Register the tablet as an Input Device
*/
err = input_register_device(aiptek->inputdev);
if (err) {
dev_warn(&intf->dev,
"input_register_device returned err: %d\n", err);
- goto fail4;
+ goto fail3;
}
return 0;
- fail4: sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
fail3: usb_free_urb(aiptek->urb);
fail2: usb_free_coherent(usbdev, AIPTEK_PACKET_LENGTH, aiptek->data,
aiptek->data_dma);
@@ -1886,7 +1874,6 @@ static void aiptek_disconnect(struct usb_interface *intf)
*/
usb_kill_urb(aiptek->urb);
input_unregister_device(aiptek->inputdev);
- sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
usb_free_urb(aiptek->urb);
usb_free_coherent(interface_to_usbdev(intf),
AIPTEK_PACKET_LENGTH,
@@ -1900,6 +1887,7 @@ static struct usb_driver aiptek_driver = {
.probe = aiptek_probe,
.disconnect = aiptek_disconnect,
.id_table = aiptek_ids,
+ .dev_groups = aiptek_dev_groups,
};
module_usb_driver(aiptek_driver);
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index 6d58443bb3e9..9bc631518b92 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -5,9 +5,6 @@
* Copyright (c) 2010 Xing Wei <weixing@hanwang.com.cn>
*/
-/*
- */
-
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -356,7 +353,7 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
usb_make_path(dev, hanwang->phys, sizeof(hanwang->phys));
strlcat(hanwang->phys, "/input0", sizeof(hanwang->phys));
- strlcpy(hanwang->name, hanwang->features->name, sizeof(hanwang->name));
+ strscpy(hanwang->name, hanwang->features->name, sizeof(hanwang->name));
input_dev->name = hanwang->name;
input_dev->phys = hanwang->phys;
usb_to_input_id(dev, &input_dev->id);
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index c608ac505d1b..d836d3dcc6a2 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -319,7 +319,7 @@ static int pegasus_probe(struct usb_interface *intf,
pegasus->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (dev->manufacturer)
- strlcpy(pegasus->name, dev->manufacturer,
+ strscpy(pegasus->name, dev->manufacturer,
sizeof(pegasus->name));
if (dev->product) {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 2d70c945b20a..dc90a3ea51ee 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1335,7 +1335,7 @@ config TOUCHSCREEN_ZFORCE
config TOUCHSCREEN_COLIBRI_VF50
tristate "Toradex Colibri on board touchscreen driver"
- depends on IIO && VF610_ADC
+ depends on IIO
depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have a Colibri VF50 and plan to use
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 4eedea08b0b5..ccecd1441f0b 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2497,8 +2497,8 @@ static int mxt_vidioc_querycap(struct file *file, void *priv,
{
struct mxt_data *data = video_drvdata(file);
- strlcpy(cap->driver, "atmel_mxt_ts", sizeof(cap->driver));
- strlcpy(cap->card, "atmel_mxt_ts touch", sizeof(cap->card));
+ strscpy(cap->driver, "atmel_mxt_ts", sizeof(cap->driver));
+ strscpy(cap->card, "atmel_mxt_ts touch", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"I2C:%s", dev_name(&data->client->dev));
return 0;
@@ -2514,11 +2514,11 @@ static int mxt_vidioc_enum_input(struct file *file, void *priv,
switch (i->index) {
case MXT_V4L_INPUT_REFS:
- strlcpy(i->name, "Mutual Capacitance References",
+ strscpy(i->name, "Mutual Capacitance References",
sizeof(i->name));
break;
case MXT_V4L_INPUT_DELTAS:
- strlcpy(i->name, "Mutual Capacitance Deltas", sizeof(i->name));
+ strscpy(i->name, "Mutual Capacitance Deltas", sizeof(i->name));
break;
}
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index c33e63ca6142..2deae5a6823a 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -10,6 +10,7 @@
* Copyright (c) 2008 QUALCOMM USA, INC.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -19,10 +20,9 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/input/auo-pixcir-ts.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
/*
* Coordinate calculation:
@@ -69,6 +69,16 @@
#define AUO_PIXCIR_INT_RELEASE (1 << 4)
#define AUO_PIXCIR_INT_ENABLE (1 << 3)
#define AUO_PIXCIR_INT_POL_HIGH (1 << 2)
+
+/*
+ * Interrupt modes:
+ * periodical: interrupt is asserted periodicaly
+ * compare coordinates: interrupt is asserted when coordinates change
+ * indicate touch: interrupt is asserted during touch
+ */
+#define AUO_PIXCIR_INT_PERIODICAL 0x00
+#define AUO_PIXCIR_INT_COMP_COORD 0x01
+#define AUO_PIXCIR_INT_TOUCH_IND 0x02
#define AUO_PIXCIR_INT_MODE_MASK 0x03
/*
@@ -103,10 +113,14 @@
struct auo_pixcir_ts {
struct i2c_client *client;
struct input_dev *input;
- const struct auo_pixcir_ts_platdata *pdata;
+ struct gpio_desc *gpio_int;
+ struct gpio_desc *gpio_rst;
char phys[32];
- /* special handling for touch_indicate interupt mode */
+ unsigned int x_max;
+ unsigned int y_max;
+
+ /* special handling for touch_indicate interrupt mode */
bool touch_ind_mode;
wait_queue_head_t wait;
@@ -125,7 +139,6 @@ static int auo_pixcir_collect_data(struct auo_pixcir_ts *ts,
struct auo_point_t *point)
{
struct i2c_client *client = ts->client;
- const struct auo_pixcir_ts_platdata *pdata = ts->pdata;
uint8_t raw_coord[8];
uint8_t raw_area[4];
int i, ret;
@@ -152,8 +165,8 @@ static int auo_pixcir_collect_data(struct auo_pixcir_ts *ts,
point[i].coord_y =
raw_coord[4 * i + 3] << 8 | raw_coord[4 * i + 2];
- if (point[i].coord_x > pdata->x_max ||
- point[i].coord_y > pdata->y_max) {
+ if (point[i].coord_x > ts->x_max ||
+ point[i].coord_y > ts->y_max) {
dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
point[i].coord_x, point[i].coord_y);
point[i].coord_x = point[i].coord_y = 0;
@@ -171,7 +184,6 @@ static int auo_pixcir_collect_data(struct auo_pixcir_ts *ts,
static irqreturn_t auo_pixcir_interrupt(int irq, void *dev_id)
{
struct auo_pixcir_ts *ts = dev_id;
- const struct auo_pixcir_ts_platdata *pdata = ts->pdata;
struct auo_point_t point[AUO_PIXCIR_REPORT_POINTS];
int i;
int ret;
@@ -182,7 +194,7 @@ static irqreturn_t auo_pixcir_interrupt(int irq, void *dev_id)
/* check for up event in touch touch_ind_mode */
if (ts->touch_ind_mode) {
- if (gpio_get_value(pdata->gpio_int) == 0) {
+ if (gpiod_get_value_cansleep(ts->gpio_int) == 0) {
input_mt_sync(ts->input);
input_report_key(ts->input, BTN_TOUCH, 0);
input_sync(ts->input);
@@ -278,11 +290,9 @@ static int auo_pixcir_power_mode(struct auo_pixcir_ts *ts, int mode)
return 0;
}
-static int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
- int int_setting)
+static int auo_pixcir_int_config(struct auo_pixcir_ts *ts, int int_setting)
{
struct i2c_client *client = ts->client;
- const struct auo_pixcir_ts_platdata *pdata = ts->pdata;
int ret;
ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_INT_SETTING);
@@ -304,7 +314,7 @@ static int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
return ret;
}
- ts->touch_ind_mode = pdata->int_setting == AUO_PIXCIR_INT_TOUCH_IND;
+ ts->touch_ind_mode = int_setting == AUO_PIXCIR_INT_TOUCH_IND;
return 0;
}
@@ -465,78 +475,22 @@ unlock:
static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops,
auo_pixcir_suspend, auo_pixcir_resume);
-#ifdef CONFIG_OF
-static struct auo_pixcir_ts_platdata *auo_pixcir_parse_dt(struct device *dev)
-{
- struct auo_pixcir_ts_platdata *pdata;
- struct device_node *np = dev->of_node;
-
- if (!np)
- return ERR_PTR(-ENOENT);
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->gpio_int = of_get_gpio(np, 0);
- if (!gpio_is_valid(pdata->gpio_int)) {
- dev_err(dev, "failed to get interrupt gpio\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata->gpio_rst = of_get_gpio(np, 1);
- if (!gpio_is_valid(pdata->gpio_rst)) {
- dev_err(dev, "failed to get reset gpio\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (of_property_read_u32(np, "x-size", &pdata->x_max)) {
- dev_err(dev, "failed to get x-size property\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (of_property_read_u32(np, "y-size", &pdata->y_max)) {
- dev_err(dev, "failed to get y-size property\n");
- return ERR_PTR(-EINVAL);
- }
-
- /* default to asserting the interrupt when the screen is touched */
- pdata->int_setting = AUO_PIXCIR_INT_TOUCH_IND;
-
- return pdata;
-}
-#else
-static struct auo_pixcir_ts_platdata *auo_pixcir_parse_dt(struct device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif
-
static void auo_pixcir_reset(void *data)
{
struct auo_pixcir_ts *ts = data;
- gpio_set_value(ts->pdata->gpio_rst, 0);
+ gpiod_set_value_cansleep(ts->gpio_rst, 1);
}
static int auo_pixcir_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct auo_pixcir_ts_platdata *pdata;
struct auo_pixcir_ts *ts;
struct input_dev *input_dev;
int version;
int error;
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- pdata = auo_pixcir_parse_dt(&client->dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- }
-
- ts = devm_kzalloc(&client->dev,
- sizeof(struct auo_pixcir_ts), GFP_KERNEL);
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
if (!ts)
return -ENOMEM;
@@ -546,7 +500,6 @@ static int auo_pixcir_probe(struct i2c_client *client,
return -ENOMEM;
}
- ts->pdata = pdata;
ts->client = client;
ts->input = input_dev;
ts->touch_ind_mode = 0;
@@ -556,6 +509,16 @@ static int auo_pixcir_probe(struct i2c_client *client,
snprintf(ts->phys, sizeof(ts->phys),
"%s/input0", dev_name(&client->dev));
+ if (device_property_read_u32(&client->dev, "x-size", &ts->x_max)) {
+ dev_err(&client->dev, "failed to get x-size property\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&client->dev, "y-size", &ts->y_max)) {
+ dev_err(&client->dev, "failed to get y-size property\n");
+ return -EINVAL;
+ }
+
input_dev->name = "AUO-Pixcir touchscreen";
input_dev->phys = ts->phys;
input_dev->id.bustype = BUS_I2C;
@@ -569,39 +532,42 @@ static int auo_pixcir_probe(struct i2c_client *client,
__set_bit(BTN_TOUCH, input_dev->keybit);
/* For single touch */
- input_set_abs_params(input_dev, ABS_X, 0, pdata->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, pdata->y_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_X, 0, ts->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, ts->y_max, 0, 0);
/* For multi touch */
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
- pdata->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
- pdata->y_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
- AUO_PIXCIR_MAX_AREA, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
- AUO_PIXCIR_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, ts->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, AUO_PIXCIR_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
+ 0, AUO_PIXCIR_MAX_AREA, 0, 0);
input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
input_set_drvdata(ts->input, ts);
- error = devm_gpio_request_one(&client->dev, pdata->gpio_int,
- GPIOF_DIR_IN, "auo_pixcir_ts_int");
+ ts->gpio_int = devm_gpiod_get_index(&client->dev, NULL, 0, GPIOD_IN);
+ error = PTR_ERR_OR_ZERO(ts->gpio_int);
if (error) {
- dev_err(&client->dev, "request of gpio %d failed, %d\n",
- pdata->gpio_int, error);
+ dev_err(&client->dev,
+ "request of int gpio failed: %d\n", error);
return error;
}
- error = devm_gpio_request_one(&client->dev, pdata->gpio_rst,
- GPIOF_DIR_OUT | GPIOF_INIT_HIGH,
- "auo_pixcir_ts_rst");
+ gpiod_set_consumer_name(ts->gpio_int, "auo_pixcir_ts_int");
+
+ /* Take the chip out of reset */
+ ts->gpio_rst = devm_gpiod_get_index(&client->dev, NULL, 1,
+ GPIOD_OUT_LOW);
+ error = PTR_ERR_OR_ZERO(ts->gpio_rst);
if (error) {
- dev_err(&client->dev, "request of gpio %d failed, %d\n",
- pdata->gpio_rst, error);
+ dev_err(&client->dev,
+ "request of reset gpio failed: %d\n", error);
return error;
}
+ gpiod_set_consumer_name(ts->gpio_rst, "auo_pixcir_ts_rst");
+
error = devm_add_action_or_reset(&client->dev, auo_pixcir_reset, ts);
if (error) {
dev_err(&client->dev, "failed to register reset action, %d\n",
@@ -619,13 +585,14 @@ static int auo_pixcir_probe(struct i2c_client *client,
dev_info(&client->dev, "firmware version 0x%X\n", version);
- error = auo_pixcir_int_config(ts, pdata->int_setting);
+ /* default to asserting the interrupt when the screen is touched */
+ error = auo_pixcir_int_config(ts, AUO_PIXCIR_INT_TOUCH_IND);
if (error)
return error;
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, auo_pixcir_interrupt,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ IRQF_ONESHOT,
input_dev->name, ts);
if (error) {
dev_err(&client->dev, "irq %d requested failed, %d\n",
diff --git a/drivers/input/touchscreen/chipone_icn8505.c b/drivers/input/touchscreen/chipone_icn8505.c
index f9ca5502ac8c..c421f4be2700 100644
--- a/drivers/input/touchscreen/chipone_icn8505.c
+++ b/drivers/input/touchscreen/chipone_icn8505.c
@@ -364,32 +364,20 @@ static irqreturn_t icn8505_irq(int irq, void *dev_id)
static int icn8505_probe_acpi(struct icn8505_data *icn8505, struct device *dev)
{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- const char *subsys = "unknown";
- struct acpi_device *adev;
- union acpi_object *obj;
- acpi_status status;
-
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENODEV;
+ const char *subsys;
+ int error;
- status = acpi_evaluate_object(adev->handle, "_SUB", NULL, &buffer);
- if (ACPI_SUCCESS(status)) {
- obj = buffer.pointer;
- if (obj->type == ACPI_TYPE_STRING)
- subsys = obj->string.pointer;
- else
- dev_warn(dev, "Warning ACPI _SUB did not return a string\n");
- } else {
- dev_warn(dev, "Warning ACPI _SUB failed: %#x\n", status);
- buffer.pointer = NULL;
- }
+ subsys = acpi_get_subsystem_id(ACPI_HANDLE(dev));
+ error = PTR_ERR_OR_ZERO(subsys);
+ if (error == -ENODATA)
+ subsys = "unknown";
+ else if (error)
+ return error;
snprintf(icn8505->firmware_name, sizeof(icn8505->firmware_name),
"chipone/icn8505-%s.fw", subsys);
- kfree(buffer.pointer);
+ kfree_const(subsys);
return 0;
}
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 5fb441387fe5..9ac1378610bc 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -912,8 +912,8 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
p = strchr(rdbuf, '*');
if (p)
*p++ = '\0';
- strlcpy(model_name, rdbuf + 1, EDT_NAME_LEN);
- strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+ strscpy(model_name, rdbuf + 1, EDT_NAME_LEN);
+ strscpy(fw_version, p ? p : "", EDT_NAME_LEN);
} else if (!strncasecmp(rdbuf, "EP0", 3)) {
tsdata->version = EDT_M12;
@@ -926,8 +926,8 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
p = strchr(rdbuf, '*');
if (p)
*p++ = '\0';
- strlcpy(model_name, rdbuf, EDT_NAME_LEN);
- strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+ strscpy(model_name, rdbuf, EDT_NAME_LEN);
+ strscpy(fw_version, p ? p : "", EDT_NAME_LEN);
} else {
/* If it is not an EDT M06/M12 touchscreen, then the model
* detection is a bit hairy. The different ft5x06
@@ -945,7 +945,7 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
if (error)
return error;
- strlcpy(fw_version, rdbuf, 2);
+ strscpy(fw_version, rdbuf, 2);
error = edt_ft5x06_ts_readwrite(client, 1, "\xA8",
1, rdbuf);
@@ -981,7 +981,7 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
1, rdbuf);
if (error)
return error;
- strlcpy(fw_version, rdbuf, 1);
+ strscpy(fw_version, rdbuf, 1);
snprintf(model_name, EDT_NAME_LEN,
"EVERVISION-FT5726NEi");
break;
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index e07e8e0fe8ea..5a5f9da73fa1 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -7,9 +7,6 @@
* Gunze AHL-51S touchscreen driver for Linux
*/
-/*
- */
-
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 12f2562b0141..8ddb3f7d307a 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -939,8 +939,8 @@ static int sur40_vidioc_querycap(struct file *file, void *priv,
{
struct sur40_state *sur40 = video_drvdata(file);
- strlcpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver));
- strlcpy(cap->card, DRIVER_LONG, sizeof(cap->card));
+ strscpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver));
+ strscpy(cap->card, DRIVER_LONG, sizeof(cap->card));
usb_make_path(sur40->usbdev, cap->bus_info, sizeof(cap->bus_info));
return 0;
}
@@ -952,7 +952,7 @@ static int sur40_vidioc_enum_input(struct file *file, void *priv,
return -EINVAL;
i->type = V4L2_INPUT_TYPE_TOUCH;
i->std = V4L2_STD_UNKNOWN;
- strlcpy(i->name, "In-Cell Sensor", sizeof(i->name));
+ strscpy(i->name, "In-Cell Sensor", sizeof(i->name));
i->capabilities = 0;
return 0;
}
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 3dda6eaabdab..d6d04b9f04fc 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1708,7 +1708,7 @@ static int usbtouch_probe(struct usb_interface *intf,
usbtouch->input = input_dev;
if (udev->manufacturer)
- strlcpy(usbtouch->name, udev->manufacturer, sizeof(usbtouch->name));
+ strscpy(usbtouch->name, udev->manufacturer, sizeof(usbtouch->name));
if (udev->product) {
if (udev->manufacturer)
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 691285ace228..928c5ee3ac36 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -625,7 +625,7 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
/* For backwards-compatibility we compose the basename based on
* capabilities and then just append the tool type
*/
- strlcpy(basename, "Wacom Serial", sizeof(basename));
+ strscpy(basename, "Wacom Serial", sizeof(basename));
err_pen = w8001_setup_pen(w8001, basename, sizeof(basename));
err_touch = w8001_setup_touch(w8001, basename, sizeof(basename));
@@ -635,7 +635,7 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
}
if (!err_pen) {
- strlcpy(w8001->pen_name, basename, sizeof(w8001->pen_name));
+ strscpy(w8001->pen_name, basename, sizeof(w8001->pen_name));
strlcat(w8001->pen_name, " Pen", sizeof(w8001->pen_name));
input_dev_pen->name = w8001->pen_name;
@@ -651,7 +651,7 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
}
if (!err_touch) {
- strlcpy(w8001->touch_name, basename, sizeof(w8001->touch_name));
+ strscpy(w8001->touch_name, basename, sizeof(w8001->touch_name));
strlcat(w8001->touch_name, " Finger",
sizeof(w8001->touch_name));
input_dev_touch->name = w8001->touch_name;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5c5cb5bee8b6..dc5f7a156ff5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -67,6 +67,17 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
If unsure, say N here.
+config IOMMU_IO_PGTABLE_DART
+ bool "Apple DART Formats"
+ select IOMMU_IO_PGTABLE
+ depends on ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ help
+ Enable support for the Apple DART pagetable formats. These include
+ the t8020 and t6000/t8110 DART formats used in Apple M1/M2 family
+ SoCs.
+
+ If unsure, say N here.
+
endmenu
config IOMMU_DEBUGFS
@@ -137,7 +148,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
- bool
+ def_bool ARM64 || IA64 || X86
select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
@@ -294,7 +305,7 @@ config APPLE_DART
tristate "Apple DART IOMMU Support"
depends on ARCH_APPLE || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
+ select IOMMU_IO_PGTABLE_DART
default ARCH_APPLE
help
Support for Apple DART (Device Address Resolution Table) IOMMUs
@@ -476,7 +487,6 @@ config VIRTIO_IOMMU
depends on VIRTIO
depends on (ARM64 || X86)
select IOMMU_API
- select IOMMU_DMA
select INTERVAL_TREE
select ACPI_VIOT if ACPI
help
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 44475a9b3eea..cc9f381013c3 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_DART) += io-pgtable-dart.o
obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index a3cbafb603f5..9b5fc3356bf2 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -9,7 +9,6 @@ config AMD_IOMMU
select PCI_PASID
select IOMMU_API
select IOMMU_IOVA
- select IOMMU_DMA
select IOMMU_IO_PGTABLE
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index a935f8f4b974..773d8aa00283 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o
+obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 84e5bb1bf01b..c160a332ce33 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -18,7 +18,6 @@ extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
-extern int amd_iommu_init_api(void);
extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5b1019dab328..1d0a70c85333 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -94,6 +94,7 @@
#define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9)
#define FEATURE_GAM_VAPIC (1ULL<<21)
+#define FEATURE_GIOSUP (1ULL<<48)
#define FEATURE_EPHSUP (1ULL<<50)
#define FEATURE_SNP (1ULL<<63)
@@ -276,6 +277,8 @@
* 512GB Pages are not supported due to a hardware bug
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
+/* 4K, 2MB, 1G page sizes are supported */
+#define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30))
/* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
@@ -376,6 +379,7 @@
#define DTE_FLAG_IW (1ULL << 62)
#define DTE_FLAG_IOTLB (1ULL << 32)
+#define DTE_FLAG_GIOV (1ULL << 54)
#define DTE_FLAG_GV (1ULL << 55)
#define DTE_FLAG_MASK (0x3ffULL << 32)
#define DTE_GLX_SHIFT (56)
@@ -434,6 +438,7 @@
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
translation */
#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
+#define PD_GIOV_MASK (1UL << 4) /* domain enable GIOV support */
extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
@@ -456,6 +461,8 @@ struct irq_remap_table {
/* Interrupt remapping feature used? */
extern bool amd_iommu_irq_remap;
+extern const struct iommu_ops amd_iommu_ops;
+
/* IVRS indicates that pre-boot remapping was enabled */
extern bool amdr_ivrs_remap_support;
@@ -526,7 +533,8 @@ struct amd_io_pgtable {
struct io_pgtable iop;
int mode;
u64 *root;
- atomic64_t pt_root; /* pgtable root and pgtable mode */
+ atomic64_t pt_root; /* pgtable root and pgtable mode */
+ u64 *pgd; /* v2 pgtable pgd pointer */
};
/*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index fdc642362c14..1a2d425bf568 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -95,8 +95,6 @@
* out of it.
*/
-extern const struct iommu_ops amd_iommu_ops;
-
/*
* structure describing one IOMMU in the ACPI table. Typically followed by one
* or more ivhd_entrys.
@@ -2068,6 +2066,17 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
init_iommu_perf_ctr(iommu);
+ if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
+ !iommu_feature(iommu, FEATURE_GT)) {
+ pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ } else if (iommu_default_passthrough()) {
+ pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+ }
+
if (is_rd890_iommu(iommu->dev)) {
int i, j;
@@ -2146,6 +2155,8 @@ static void print_iommu_info(void)
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
+ if (amd_iommu_pgtable == AMD_IOMMU_V2)
+ pr_info("V2 page table enabled\n");
}
static int __init amd_iommu_init_pci(void)
@@ -2168,20 +2179,13 @@ static int __init amd_iommu_init_pci(void)
/*
* Order is important here to make sure any unity map requirements are
* fulfilled. The unity mappings are created and written to the device
- * table during the amd_iommu_init_api() call.
+ * table during the iommu_init_pci() call.
*
* After that we call init_device_table_dma() to make sure any
* uninitialized DTE will block DMA, and in the end we flush the caches
* of all IOMMUs to make sure the changes to the device table are
* active.
*/
- ret = amd_iommu_init_api();
- if (ret) {
- pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n",
- ret);
- goto out;
- }
-
for_each_pci_segment(pci_seg)
init_device_table_dma(pci_seg);
@@ -3366,17 +3370,30 @@ static int __init parse_amd_iommu_intr(char *str)
static int __init parse_amd_iommu_options(char *str)
{
- for (; *str; ++str) {
+ if (!str)
+ return -EINVAL;
+
+ while (*str) {
if (strncmp(str, "fullflush", 9) == 0) {
pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
iommu_set_dma_strict();
- }
- if (strncmp(str, "force_enable", 12) == 0)
+ } else if (strncmp(str, "force_enable", 12) == 0) {
amd_iommu_force_enable = true;
- if (strncmp(str, "off", 3) == 0)
+ } else if (strncmp(str, "off", 3) == 0) {
amd_iommu_disabled = true;
- if (strncmp(str, "force_isolation", 15) == 0)
+ } else if (strncmp(str, "force_isolation", 15) == 0) {
amd_iommu_force_isolation = true;
+ } else if (strncmp(str, "pgtbl_v1", 8) == 0) {
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
+ amd_iommu_pgtable = AMD_IOMMU_V2;
+ } else {
+ pr_notice("Unknown option - '%s'\n", str);
+ }
+
+ str += strcspn(str, ",");
+ while (*str == ',')
+ str++;
}
return 1;
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 7d4b61e5db47..ace0e9b8b913 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -360,8 +360,9 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
* supporting all features of AMD IOMMU page tables like level skipping
* and full 64 bit address spaces.
*/
-static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
LIST_HEAD(freelist);
@@ -369,39 +370,47 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
u64 __pte, *pte;
int ret, i, count;
- BUG_ON(!IS_ALIGNED(iova, size));
- BUG_ON(!IS_ALIGNED(paddr, size));
+ BUG_ON(!IS_ALIGNED(iova, pgsize));
+ BUG_ON(!IS_ALIGNED(paddr, pgsize));
ret = -EINVAL;
if (!(prot & IOMMU_PROT_MASK))
goto out;
- count = PAGE_SIZE_PTE_COUNT(size);
- pte = alloc_pte(dom, iova, size, NULL, gfp, &updated);
+ while (pgcount > 0) {
+ count = PAGE_SIZE_PTE_COUNT(pgsize);
+ pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
- ret = -ENOMEM;
- if (!pte)
- goto out;
+ ret = -ENOMEM;
+ if (!pte)
+ goto out;
- for (i = 0; i < count; ++i)
- free_clear_pte(&pte[i], pte[i], &freelist);
+ for (i = 0; i < count; ++i)
+ free_clear_pte(&pte[i], pte[i], &freelist);
- if (!list_empty(&freelist))
- updated = true;
+ if (!list_empty(&freelist))
+ updated = true;
- if (count > 1) {
- __pte = PAGE_SIZE_PTE(__sme_set(paddr), size);
- __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- } else
- __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ if (count > 1) {
+ __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
+ __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ } else
+ __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- if (prot & IOMMU_PROT_IR)
- __pte |= IOMMU_PTE_IR;
- if (prot & IOMMU_PROT_IW)
- __pte |= IOMMU_PTE_IW;
+ if (prot & IOMMU_PROT_IR)
+ __pte |= IOMMU_PTE_IR;
+ if (prot & IOMMU_PROT_IW)
+ __pte |= IOMMU_PTE_IW;
- for (i = 0; i < count; ++i)
- pte[i] = __pte;
+ for (i = 0; i < count; ++i)
+ pte[i] = __pte;
+
+ iova += pgsize;
+ paddr += pgsize;
+ pgcount--;
+ if (mapped)
+ *mapped += pgsize;
+ }
ret = 0;
@@ -426,17 +435,18 @@ out:
return ret;
}
-static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
- unsigned long iova,
- size_t size,
- struct iommu_iotlb_gather *gather)
+static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long long unmapped;
unsigned long unmap_size;
u64 *pte;
+ size_t size = pgcount << __ffs(pgsize);
- BUG_ON(!is_power_of_2(size));
+ BUG_ON(!is_power_of_2(pgsize));
unmapped = 0;
@@ -448,14 +458,14 @@ static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
+ } else {
+ return unmapped;
}
iova = (iova & ~(unmap_size - 1)) + unmap_size;
unmapped += unmap_size;
}
- BUG_ON(unmapped && !is_power_of_2(unmapped));
-
return unmapped;
}
@@ -514,8 +524,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v1_flush_ops;
- pgtable->iop.ops.map = iommu_v1_map_page;
- pgtable->iop.ops.unmap = iommu_v1_unmap_page;
+ pgtable->iop.ops.map_pages = iommu_v1_map_pages;
+ pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
return &pgtable->iop;
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
new file mode 100644
index 000000000000..8638ddf6fb3b
--- /dev/null
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU-agnostic AMD IO page table v2 allocator.
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ * Author: Vasant Hegde <vasant.hegde@amd.com>
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+
+#include <asm/barrier.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu.h"
+
+#define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
+#define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
+#define IOMMU_PAGE_USER BIT_ULL(2) /* Userspace addressable */
+#define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
+#define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
+#define IOMMU_PAGE_ACCESS BIT_ULL(5) /* Was accessed (updated by IOMMU) */
+#define IOMMU_PAGE_DIRTY BIT_ULL(6) /* Was written to (updated by IOMMU) */
+#define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
+#define IOMMU_PAGE_NX BIT_ULL(63) /* No execute */
+
+#define MAX_PTRS_PER_PAGE 512
+
+#define IOMMU_PAGE_SIZE_2M BIT_ULL(21)
+#define IOMMU_PAGE_SIZE_1G BIT_ULL(30)
+
+
+static inline int get_pgtable_level(void)
+{
+ /* 5 level page table is not supported */
+ return PAGE_MODE_4_LEVEL;
+}
+
+static inline bool is_large_pte(u64 pte)
+{
+ return (pte & IOMMU_PAGE_PSE);
+}
+
+static inline void *alloc_pgtable_page(void)
+{
+ return (void *)get_zeroed_page(GFP_KERNEL);
+}
+
+static inline u64 set_pgtable_attr(u64 *page)
+{
+ u64 prot;
+
+ prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
+ prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
+
+ return (iommu_virt_to_phys(page) | prot);
+}
+
+static inline void *get_pgtable_pte(u64 pte)
+{
+ return iommu_phys_to_virt(pte & PM_ADDR_MASK);
+}
+
+static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
+{
+ u64 pte;
+
+ pte = __sme_set(paddr & PM_ADDR_MASK);
+ pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
+ pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
+
+ if (prot & IOMMU_PROT_IW)
+ pte |= IOMMU_PAGE_RW;
+
+ /* Large page */
+ if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
+ pte |= IOMMU_PAGE_PSE;
+
+ return pte;
+}
+
+static inline u64 get_alloc_page_size(u64 size)
+{
+ if (size >= IOMMU_PAGE_SIZE_1G)
+ return IOMMU_PAGE_SIZE_1G;
+
+ if (size >= IOMMU_PAGE_SIZE_2M)
+ return IOMMU_PAGE_SIZE_2M;
+
+ return PAGE_SIZE;
+}
+
+static inline int page_size_to_level(u64 pg_size)
+{
+ if (pg_size == IOMMU_PAGE_SIZE_1G)
+ return PAGE_MODE_3_LEVEL;
+ if (pg_size == IOMMU_PAGE_SIZE_2M)
+ return PAGE_MODE_2_LEVEL;
+
+ return PAGE_MODE_1_LEVEL;
+}
+
+static inline void free_pgtable_page(u64 *pt)
+{
+ free_page((unsigned long)pt);
+}
+
+static void free_pgtable(u64 *pt, int level)
+{
+ u64 *p;
+ int i;
+
+ for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
+ /* PTE present? */
+ if (!IOMMU_PTE_PRESENT(pt[i]))
+ continue;
+
+ if (is_large_pte(pt[i]))
+ continue;
+
+ /*
+ * Free the next level. No need to look at l1 tables here since
+ * they can only contain leaf PTEs; just free them directly.
+ */
+ p = get_pgtable_pte(pt[i]);
+ if (level > 2)
+ free_pgtable(p, level - 1);
+ else
+ free_pgtable_page(p);
+ }
+
+ free_pgtable_page(pt);
+}
+
+/* Allocate page table */
+static u64 *v2_alloc_pte(u64 *pgd, unsigned long iova,
+ unsigned long pg_size, bool *updated)
+{
+ u64 *pte, *page;
+ int level, end_level;
+
+ level = get_pgtable_level() - 1;
+ end_level = page_size_to_level(pg_size);
+ pte = &pgd[PM_LEVEL_INDEX(level, iova)];
+ iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
+
+ while (level >= end_level) {
+ u64 __pte, __npte;
+
+ __pte = *pte;
+
+ if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
+ /* Unmap large pte */
+ cmpxchg64(pte, *pte, 0ULL);
+ *updated = true;
+ continue;
+ }
+
+ if (!IOMMU_PTE_PRESENT(__pte)) {
+ page = alloc_pgtable_page();
+ if (!page)
+ return NULL;
+
+ __npte = set_pgtable_attr(page);
+ /* pte could have been changed somewhere. */
+ if (cmpxchg64(pte, __pte, __npte) != __pte)
+ free_pgtable_page(page);
+ else if (IOMMU_PTE_PRESENT(__pte))
+ *updated = true;
+
+ continue;
+ }
+
+ level -= 1;
+ pte = get_pgtable_pte(__pte);
+ pte = &pte[PM_LEVEL_INDEX(level, iova)];
+ }
+
+ /* Tear down existing pte entries */
+ if (IOMMU_PTE_PRESENT(*pte)) {
+ u64 *__pte;
+
+ *updated = true;
+ __pte = get_pgtable_pte(*pte);
+ cmpxchg64(pte, *pte, 0ULL);
+ if (pg_size == IOMMU_PAGE_SIZE_1G)
+ free_pgtable(__pte, end_level - 1);
+ else if (pg_size == IOMMU_PAGE_SIZE_2M)
+ free_pgtable_page(__pte);
+ }
+
+ return pte;
+}
+
+/*
+ * This function checks if there is a PTE for a given dma address.
+ * If there is one, it returns the pointer to it.
+ */
+static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ unsigned long iova, unsigned long *page_size)
+{
+ u64 *pte;
+ int level;
+
+ level = get_pgtable_level() - 1;
+ pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
+ /* Default page size is 4K */
+ *page_size = PAGE_SIZE;
+
+ while (level) {
+ /* Not present */
+ if (!IOMMU_PTE_PRESENT(*pte))
+ return NULL;
+
+ /* Walk to the next level */
+ pte = get_pgtable_pte(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
+
+ /* Large page */
+ if (is_large_pte(*pte)) {
+ if (level == PAGE_MODE_3_LEVEL)
+ *page_size = IOMMU_PAGE_SIZE_1G;
+ else if (level == PAGE_MODE_2_LEVEL)
+ *page_size = IOMMU_PAGE_SIZE_2M;
+ else
+ return NULL; /* Wrongly set PSE bit in PTE */
+
+ break;
+ }
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
+ struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg;
+ u64 *pte;
+ unsigned long map_size;
+ unsigned long mapped_size = 0;
+ unsigned long o_iova = iova;
+ size_t size = pgcount << __ffs(pgsize);
+ int count = 0;
+ int ret = 0;
+ bool updated = false;
+
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
+ return -EINVAL;
+
+ if (!(prot & IOMMU_PROT_MASK))
+ return -EINVAL;
+
+ while (mapped_size < size) {
+ map_size = get_alloc_page_size(pgsize);
+ pte = v2_alloc_pte(pdom->iop.pgd, iova, map_size, &updated);
+ if (!pte) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *pte = set_pte_attr(paddr, map_size, prot);
+
+ count++;
+ iova += map_size;
+ paddr += map_size;
+ mapped_size += map_size;
+ }
+
+out:
+ if (updated) {
+ if (count > 1)
+ amd_iommu_flush_tlb(&pdom->domain, 0);
+ else
+ amd_iommu_flush_page(&pdom->domain, 0, o_iova);
+ }
+
+ if (mapped)
+ *mapped += mapped_size;
+
+ return ret;
+}
+
+static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &pgtable->iop.cfg;
+ unsigned long unmap_size;
+ unsigned long unmapped = 0;
+ size_t size = pgcount << __ffs(pgsize);
+ u64 *pte;
+
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
+ return 0;
+
+ while (unmapped < size) {
+ pte = fetch_pte(pgtable, iova, &unmap_size);
+ if (!pte)
+ return unmapped;
+
+ *pte = 0ULL;
+
+ iova = (iova & ~(unmap_size - 1)) + unmap_size;
+ unmapped += unmap_size;
+ }
+
+ return unmapped;
+}
+
+static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ unsigned long offset_mask, pte_pgsize;
+ u64 *pte, __pte;
+
+ pte = fetch_pte(pgtable, iova, &pte_pgsize);
+ if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ return 0;
+
+ offset_mask = pte_pgsize - 1;
+ __pte = __sme_clr(*pte & PM_ADDR_MASK);
+
+ return (__pte & ~offset_mask) | (iova & offset_mask);
+}
+
+/*
+ * ----------------------------------------------------
+ */
+static void v2_tlb_flush_all(void *cookie)
+{
+}
+
+static void v2_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+}
+
+static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+}
+
+static const struct iommu_flush_ops v2_flush_ops = {
+ .tlb_flush_all = v2_tlb_flush_all,
+ .tlb_flush_walk = v2_tlb_flush_walk,
+ .tlb_add_page = v2_tlb_add_page,
+};
+
+static void v2_free_pgtable(struct io_pgtable *iop)
+{
+ struct protection_domain *pdom;
+ struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
+
+ pdom = container_of(pgtable, struct protection_domain, iop);
+ if (!(pdom->flags & PD_IOMMUV2_MASK))
+ return;
+
+ /*
+ * Make changes visible to IOMMUs. No need to clear gcr3 entry
+ * as gcr3 table is already freed.
+ */
+ amd_iommu_domain_update(pdom);
+
+ /* Free page table */
+ free_pgtable(pgtable->pgd, get_pgtable_level());
+}
+
+static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
+ struct protection_domain *pdom = (struct protection_domain *)cookie;
+ int ret;
+
+ pgtable->pgd = alloc_pgtable_page();
+ if (!pgtable->pgd)
+ return NULL;
+
+ ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd));
+ if (ret)
+ goto err_free_pgd;
+
+ pgtable->iop.ops.map_pages = iommu_v2_map_pages;
+ pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages;
+ pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
+
+ cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
+ cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
+ cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
+ cfg->tlb = &v2_flush_ops;
+
+ return &pgtable->iop;
+
+err_free_pgd:
+ free_pgtable_page(pgtable->pgd);
+
+ return NULL;
+}
+
+struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
+ .alloc = v2_alloc_pgtable,
+ .free = v2_free_pgtable,
+};
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 828672a46a3d..65856e401949 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -11,8 +11,6 @@
#include <linux/ratelimit.h>
#include <linux/pci.h>
#include <linux/acpi.h>
-#include <linux/amba/bus.h>
-#include <linux/platform_device.h>
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
@@ -20,7 +18,6 @@
#include <linux/scatterlist.h>
#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h>
#include <linux/delay.h>
#include <linux/amd-iommu.h>
@@ -42,6 +39,7 @@
#include <asm/dma.h>
#include "amd_iommu.h"
+#include "../dma-iommu.h"
#include "../irq_remapping.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -66,10 +64,6 @@ LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
-/*
- * Domain for untranslated devices - only allocated
- * if iommu=pt passed on kernel cmd line.
- */
const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -85,6 +79,7 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void detach_device(struct device *dev);
+static int domain_enable_v2(struct protection_domain *domain, int pasids);
/****************************************************************************
*
@@ -1597,6 +1592,9 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
flags |= tmp;
+
+ if (domain->flags & PD_GIOV_MASK)
+ pte_root |= DTE_FLAG_GIOV;
}
flags &= ~DEV_DOMID_MASK;
@@ -1650,6 +1648,10 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
+ /* Override supported page sizes */
+ if (domain->flags & PD_GIOV_MASK)
+ domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
+
/* Update device table */
set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
@@ -1694,7 +1696,7 @@ static void pdev_iommuv2_disable(struct pci_dev *pdev)
pci_disable_pasid(pdev);
}
-static int pdev_iommuv2_enable(struct pci_dev *pdev)
+static int pdev_pri_ats_enable(struct pci_dev *pdev)
{
int ret;
@@ -1757,11 +1759,19 @@ static int attach_device(struct device *dev,
struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
ret = -EINVAL;
- if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
+
+ /*
+ * In case of using AMD_IOMMU_V1 page table mode and the device
+ * is enabling for PPR/ATS support (using v2 table),
+ * we need to make sure that the domain type is identity map.
+ */
+ if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ def_domain->type != IOMMU_DOMAIN_IDENTITY) {
goto out;
+ }
if (dev_data->iommu_v2) {
- if (pdev_iommuv2_enable(pdev) != 0)
+ if (pdev_pri_ats_enable(pdev) != 0)
goto out;
dev_data->ats.enabled = true;
@@ -1852,6 +1862,10 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
if (!iommu)
return ERR_PTR(-ENODEV);
+ /* Not registered yet? */
+ if (!iommu->iommu.ops)
+ return ERR_PTR(-ENODEV);
+
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
@@ -1938,25 +1952,6 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_domain_flush_complete(domain);
}
-int __init amd_iommu_init_api(void)
-{
- int err;
-
- err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
- if (err)
- return err;
-#ifdef CONFIG_ARM_AMBA
- err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
- if (err)
- return err;
-#endif
- err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
- if (err)
- return err;
-
- return 0;
-}
-
/*****************************************************************************
*
* The following functions belong to the exported interface of AMD IOMMU
@@ -1989,12 +1984,12 @@ static void protection_domain_free(struct protection_domain *domain)
if (!domain)
return;
- if (domain->id)
- domain_id_free(domain->id);
-
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
+ if (domain->id)
+ domain_id_free(domain->id);
+
kfree(domain);
}
@@ -2012,8 +2007,10 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
if (mode != PAGE_MODE_NONE) {
pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pt_root)
+ if (!pt_root) {
+ domain_id_free(domain->id);
return -ENOMEM;
+ }
}
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
@@ -2021,6 +2018,24 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
return 0;
}
+static int protection_domain_init_v2(struct protection_domain *domain)
+{
+ spin_lock_init(&domain->lock);
+ domain->id = domain_id_alloc();
+ if (!domain->id)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&domain->dev_list);
+
+ domain->flags |= PD_GIOV_MASK;
+
+ if (domain_enable_v2(domain, 1)) {
+ domain_id_free(domain->id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static struct protection_domain *protection_domain_alloc(unsigned int type)
{
struct io_pgtable_ops *pgtbl_ops;
@@ -2048,6 +2063,9 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
case AMD_IOMMU_V1:
ret = protection_domain_init_v1(domain, mode);
break;
+ case AMD_IOMMU_V2:
+ ret = protection_domain_init_v2(domain);
+ break;
default:
ret = -EINVAL;
}
@@ -2056,8 +2074,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
goto out_err;
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
- if (!pgtbl_ops)
+ if (!pgtbl_ops) {
+ domain_id_free(domain->id);
goto out_err;
+ }
return domain;
out_err:
@@ -2175,13 +2195,13 @@ static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
- if (ops->map)
+ if (ops->map_pages)
domain_flush_np_cache(domain, iova, size);
}
-static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, size_t page_size, int iommu_prot,
- gfp_t gfp)
+static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int iommu_prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2197,8 +2217,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- if (ops->map)
- ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
+ if (ops->map_pages) {
+ ret = ops->map_pages(ops, iova, paddr, pgsize,
+ pgcount, prot, gfp, mapped);
+ }
return ret;
}
@@ -2224,9 +2246,9 @@ static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
iommu_iotlb_gather_add_range(gather, iova, size);
}
-static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
- size_t page_size,
- struct iommu_iotlb_gather *gather)
+static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2236,9 +2258,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
(domain->iop.mode == PAGE_MODE_NONE))
return 0;
- r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
+ r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;
- amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
+ if (r)
+ amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);
return r;
}
@@ -2252,7 +2275,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
return ops->iova_to_phys(ops, iova);
}
-static bool amd_iommu_capable(enum iommu_cap cap)
+static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -2400,8 +2423,8 @@ const struct iommu_ops amd_iommu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
- .map = amd_iommu_map,
- .unmap = amd_iommu_unmap,
+ .map_pages = amd_iommu_map_pages,
+ .unmap_pages = amd_iommu_unmap_pages,
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
.iova_to_phys = amd_iommu_iova_to_phys,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
@@ -2448,11 +2471,10 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
}
EXPORT_SYMBOL(amd_iommu_domain_direct_map);
-int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
+static int domain_enable_v2(struct protection_domain *domain, int pasids)
{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int levels, ret;
+ int levels;
/* Number of GCR3 table levels required */
for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
@@ -2461,7 +2483,25 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
if (levels > amd_iommu_max_glx_val)
return -EINVAL;
- spin_lock_irqsave(&domain->lock, flags);
+ domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (domain->gcr3_tbl == NULL)
+ return -ENOMEM;
+
+ domain->glx = levels;
+ domain->flags |= PD_IOMMUV2_MASK;
+
+ amd_iommu_domain_update(domain);
+
+ return 0;
+}
+
+int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+{
+ struct protection_domain *pdom = to_pdomain(dom);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pdom->lock, flags);
/*
* Save us all sanity checks whether devices already in the
@@ -2469,24 +2509,14 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
* devices attached when it is switched into IOMMUv2 mode.
*/
ret = -EBUSY;
- if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
+ if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK)
goto out;
- ret = -ENOMEM;
- domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
- if (domain->gcr3_tbl == NULL)
- goto out;
-
- domain->glx = levels;
- domain->flags |= PD_IOMMUV2_MASK;
-
- amd_iommu_domain_update(domain);
-
- ret = 0;
+ if (!pdom->gcr3_tbl)
+ ret = domain_enable_v2(pdom, pasids);
out:
- spin_unlock_irqrestore(&domain->lock, flags);
-
+ spin_unlock_irqrestore(&pdom->lock, flags);
return ret;
}
EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 1b1725759262..4526575b999e 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -15,7 +15,6 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dev_printk.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -33,6 +32,8 @@
#include <linux/swab.h>
#include <linux/types.h>
+#include "dma-iommu.h"
+
#define DART_MAX_STREAMS 16
#define DART_MAX_TTBR 4
#define MAX_DARTS_PER_DEVICE 2
@@ -81,10 +82,16 @@
#define DART_TTBR_VALID BIT(31)
#define DART_TTBR_SHIFT 12
+struct apple_dart_hw {
+ u32 oas;
+ enum io_pgtable_fmt fmt;
+};
+
/*
* Private structure associated with each DART device.
*
* @dev: device struct
+ * @hw: SoC-specific hardware data
* @regs: mapped MMIO region
* @irq: interrupt number, can be shared with other DARTs
* @clks: clocks associated with this DART
@@ -98,6 +105,7 @@
*/
struct apple_dart {
struct device *dev;
+ const struct apple_dart_hw *hw;
void __iomem *regs;
@@ -421,13 +429,13 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
pgtbl_cfg = (struct io_pgtable_cfg){
.pgsize_bitmap = dart->pgsize,
.ias = 32,
- .oas = 36,
+ .oas = dart->hw->oas,
.coherent_walk = 1,
.iommu_dev = dart->dev,
};
dart_domain->pgtbl_ops =
- alloc_io_pgtable_ops(APPLE_DART, &pgtbl_cfg, domain);
+ alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain);
if (!dart_domain->pgtbl_ops) {
ret = -ENOMEM;
goto done;
@@ -820,27 +828,6 @@ static irqreturn_t apple_dart_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int apple_dart_set_bus_ops(const struct iommu_ops *ops)
-{
- int ret;
-
- if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, ops);
- if (ret)
- return ret;
- }
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- ret = bus_set_iommu(&pci_bus_type, ops);
- if (ret) {
- bus_set_iommu(&platform_bus_type, NULL);
- return ret;
- }
- }
-#endif
- return 0;
-}
-
static int apple_dart_probe(struct platform_device *pdev)
{
int ret;
@@ -854,6 +841,7 @@ static int apple_dart_probe(struct platform_device *pdev)
return -ENOMEM;
dart->dev = dev;
+ dart->hw = of_device_get_match_data(dev);
spin_lock_init(&dart->lock);
dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
@@ -895,14 +883,10 @@ static int apple_dart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dart);
- ret = apple_dart_set_bus_ops(&apple_dart_iommu_ops);
- if (ret)
- goto err_free_irq;
-
ret = iommu_device_sysfs_add(&dart->iommu, dev, NULL, "apple-dart.%s",
dev_name(&pdev->dev));
if (ret)
- goto err_remove_bus_ops;
+ goto err_free_irq;
ret = iommu_device_register(&dart->iommu, &apple_dart_iommu_ops, dev);
if (ret)
@@ -916,8 +900,6 @@ static int apple_dart_probe(struct platform_device *pdev)
err_sysfs_remove:
iommu_device_sysfs_remove(&dart->iommu);
-err_remove_bus_ops:
- apple_dart_set_bus_ops(NULL);
err_free_irq:
free_irq(dart->irq, dart);
err_clk_disable:
@@ -932,7 +914,6 @@ static int apple_dart_remove(struct platform_device *pdev)
apple_dart_hw_reset(dart);
free_irq(dart->irq, dart);
- apple_dart_set_bus_ops(NULL);
iommu_device_unregister(&dart->iommu);
iommu_device_sysfs_remove(&dart->iommu);
@@ -942,8 +923,18 @@ static int apple_dart_remove(struct platform_device *pdev)
return 0;
}
+static const struct apple_dart_hw apple_dart_hw_t8103 = {
+ .oas = 36,
+ .fmt = APPLE_DART,
+};
+static const struct apple_dart_hw apple_dart_hw_t6000 = {
+ .oas = 42,
+ .fmt = APPLE_DART2,
+};
+
static const struct of_device_id apple_dart_of_match[] = {
- { .compatible = "apple,t8103-dart", .data = NULL },
+ { .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
+ { .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
{},
};
MODULE_DEVICE_TABLE(of, apple_dart_of_match);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 71f7edded9cf..ba47c73f5b8c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -14,7 +14,6 @@
#include <linux/bitops.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io-pgtable.h>
@@ -28,9 +27,8 @@
#include <linux/pci-ats.h>
#include <linux/platform_device.h>
-#include <linux/amba/bus.h>
-
#include "arm-smmu-v3.h"
+#include "../../dma-iommu.h"
#include "../../iommu-sva-lib.h"
static bool disable_bypass = true;
@@ -1992,11 +1990,14 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = {
};
/* IOMMU API */
-static bool arm_smmu_capable(enum iommu_cap cap)
+static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
- return true;
+ /* Assume that a coherent TCU implies coherent TBUs */
+ return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -3694,43 +3695,6 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
return SZ_128K;
}
-static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
-{
- int err;
-
-#ifdef CONFIG_PCI
- if (pci_bus_type.iommu_ops != ops) {
- err = bus_set_iommu(&pci_bus_type, ops);
- if (err)
- return err;
- }
-#endif
-#ifdef CONFIG_ARM_AMBA
- if (amba_bustype.iommu_ops != ops) {
- err = bus_set_iommu(&amba_bustype, ops);
- if (err)
- goto err_reset_pci_ops;
- }
-#endif
- if (platform_bus_type.iommu_ops != ops) {
- err = bus_set_iommu(&platform_bus_type, ops);
- if (err)
- goto err_reset_amba_ops;
- }
-
- return 0;
-
-err_reset_amba_ops:
-#ifdef CONFIG_ARM_AMBA
- bus_set_iommu(&amba_bustype, NULL);
-#endif
-err_reset_pci_ops: __maybe_unused;
-#ifdef CONFIG_PCI
- bus_set_iommu(&pci_bus_type, NULL);
-#endif
- return err;
-}
-
static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
resource_size_t size)
{
@@ -3869,27 +3833,17 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
- goto err_sysfs_remove;
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return ret;
}
- ret = arm_smmu_set_bus_ops(&arm_smmu_ops);
- if (ret)
- goto err_unregister_device;
-
return 0;
-
-err_unregister_device:
- iommu_device_unregister(&smmu->iommu);
-err_sysfs_remove:
- iommu_device_sysfs_remove(&smmu->iommu);
- return ret;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
- arm_smmu_set_bus_ops(NULL);
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_disable(smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index dfa82df00342..6c1114a4d6cc 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -21,7 +21,6 @@
#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -37,10 +36,10 @@
#include <linux/ratelimit.h>
#include <linux/slab.h>
-#include <linux/amba/bus.h>
#include <linux/fsl/mc.h>
#include "arm-smmu.h"
+#include "../../dma-iommu.h"
/*
* Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
@@ -93,8 +92,6 @@ static struct platform_driver arm_smmu_driver;
static struct iommu_ops arm_smmu_ops;
#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
-static int arm_smmu_bus_init(struct iommu_ops *ops);
-
static struct device_node *dev_get_dev_node(struct device *dev)
{
if (dev_is_pci(dev)) {
@@ -180,20 +177,6 @@ static int arm_smmu_register_legacy_master(struct device *dev,
kfree(sids);
return err;
}
-
-/*
- * With the legacy DT binding in play, we have no guarantees about
- * probe order, but then we're also not doing default domains, so we can
- * delay setting bus ops until we're sure every possible SMMU is ready,
- * and that way ensure that no probe_device() calls get missed.
- */
-static int arm_smmu_legacy_bus_init(void)
-{
- if (using_legacy_binding)
- return arm_smmu_bus_init(&arm_smmu_ops);
- return 0;
-}
-device_initcall_sync(arm_smmu_legacy_bus_init);
#else
static int arm_smmu_register_legacy_master(struct device *dev,
struct arm_smmu_device **smmu)
@@ -1330,15 +1313,14 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
return ops->iova_to_phys(ops, iova);
}
-static bool arm_smmu_capable(enum iommu_cap cap)
+static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
- /*
- * Return true here as the SMMU can always send out coherent
- * requests.
- */
- return true;
+ /* Assume that a coherent TCU implies coherent TBUs */
+ return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -2016,52 +1998,6 @@ static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
return 0;
}
-static int arm_smmu_bus_init(struct iommu_ops *ops)
-{
- int err;
-
- /* Oh, for a proper bus abstraction */
- if (!iommu_present(&platform_bus_type)) {
- err = bus_set_iommu(&platform_bus_type, ops);
- if (err)
- return err;
- }
-#ifdef CONFIG_ARM_AMBA
- if (!iommu_present(&amba_bustype)) {
- err = bus_set_iommu(&amba_bustype, ops);
- if (err)
- goto err_reset_platform_ops;
- }
-#endif
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- err = bus_set_iommu(&pci_bus_type, ops);
- if (err)
- goto err_reset_amba_ops;
- }
-#endif
-#ifdef CONFIG_FSL_MC_BUS
- if (!iommu_present(&fsl_mc_bus_type)) {
- err = bus_set_iommu(&fsl_mc_bus_type, ops);
- if (err)
- goto err_reset_pci_ops;
- }
-#endif
- return 0;
-
-err_reset_pci_ops: __maybe_unused;
-#ifdef CONFIG_PCI
- bus_set_iommu(&pci_bus_type, NULL);
-#endif
-err_reset_amba_ops: __maybe_unused;
-#ifdef CONFIG_ARM_AMBA
- bus_set_iommu(&amba_bustype, NULL);
-#endif
-err_reset_platform_ops: __maybe_unused;
- bus_set_iommu(&platform_bus_type, NULL);
- return err;
-}
-
static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
{
struct list_head rmr_list;
@@ -2226,7 +2162,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (err) {
dev_err(dev, "Failed to register iommu\n");
- goto err_sysfs_remove;
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return err;
}
platform_set_drvdata(pdev, smmu);
@@ -2248,24 +2185,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
}
- /*
- * For ACPI and generic DT bindings, an SMMU will be probed before
- * any device which might need it, so we want the bus ops in place
- * ready to handle default domain setup as soon as any SMMU exists.
- */
- if (!using_legacy_binding) {
- err = arm_smmu_bus_init(&arm_smmu_ops);
- if (err)
- goto err_unregister_device;
- }
-
return 0;
-
-err_unregister_device:
- iommu_device_unregister(&smmu->iommu);
-err_sysfs_remove:
- iommu_device_sysfs_remove(&smmu->iommu);
- return err;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
@@ -2278,7 +2198,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
dev_notice(&pdev->dev, "disabling translation\n");
- arm_smmu_bus_init(NULL);
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 17235116d3bb..3869c3ecda8c 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -493,7 +493,7 @@ static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
return ret;
}
-static bool qcom_iommu_capable(enum iommu_cap cap)
+static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -837,8 +837,6 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
goto err_pm_disable;
}
- bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
-
if (qcom_iommu->local_base) {
pm_runtime_get_sync(dev);
writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
@@ -856,8 +854,6 @@ static int qcom_iommu_device_remove(struct platform_device *pdev)
{
struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
- bus_set_iommu(&platform_bus_type, NULL);
-
pm_runtime_force_suspend(&pdev->dev);
platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&qcom_iommu->iommu);
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 17dd683b2fce..9297b741f5e8 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -13,7 +13,6 @@
#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
@@ -30,6 +29,8 @@
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
+#include "dma-iommu.h"
+
struct iommu_dma_msi_page {
struct list_head list;
dma_addr_t iova;
@@ -1633,6 +1634,13 @@ out_free_page:
return NULL;
}
+/**
+ * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
+ * @desc: MSI descriptor, will store the MSI page
+ * @msi_addr: MSI target address to be mapped
+ *
+ * Return: 0 on success or negative error code if the mapping failed.
+ */
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{
struct device *dev = msi_desc_to_dev(desc);
@@ -1661,8 +1669,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
return 0;
}
-void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg)
+/**
+ * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
+ * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
+ * @msg: MSI message containing target physical address
+ */
+void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct device *dev = msi_desc_to_dev(desc);
const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h
new file mode 100644
index 000000000000..942790009292
--- /dev/null
+++ b/drivers/iommu/dma-iommu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-2015 ARM Ltd.
+ */
+#ifndef __DMA_IOMMU_H
+#define __DMA_IOMMU_H
+
+#include <linux/iommu.h>
+
+#ifdef CONFIG_IOMMU_DMA
+
+int iommu_get_dma_cookie(struct iommu_domain *domain);
+void iommu_put_dma_cookie(struct iommu_domain *domain);
+
+int iommu_dma_init_fq(struct iommu_domain *domain);
+
+void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
+
+extern bool iommu_dma_forcedac;
+
+#else /* CONFIG_IOMMU_DMA */
+
+static inline int iommu_dma_init_fq(struct iommu_domain *domain)
+{
+ return -EINVAL;
+}
+
+static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
+{
+}
+
+static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
+{
+}
+
+#endif /* CONFIG_IOMMU_DMA */
+#endif /* __DMA_IOMMU_H */
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 8e18984a0c4f..45fd4850bacb 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1446,16 +1446,7 @@ static int __init exynos_iommu_init(void)
goto err_zero_lv2;
}
- ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
- if (ret) {
- pr_err("%s: Failed to register exynos-iommu driver.\n",
- __func__);
- goto err_set_iommu;
- }
-
return 0;
-err_set_iommu:
- kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
err_zero_lv2:
platform_driver_unregister(&exynos_sysmmu_driver);
err_reg_driver:
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 011f9ab7f743..fa20f4b03e12 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -178,7 +178,7 @@ static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
return iova;
}
-static bool fsl_pamu_capable(enum iommu_cap cap)
+static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap)
{
return cap == IOMMU_CAP_CACHE_COHERENCY;
}
@@ -476,11 +476,7 @@ int __init pamu_domain_init(void)
if (ret) {
iommu_device_sysfs_remove(&pamu_iommu);
pr_err("Can't register iommu device\n");
- return ret;
}
- bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
- bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
-
return ret;
}
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 39a06d245f12..b7dff5092fd2 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -19,8 +19,9 @@ config INTEL_IOMMU
select DMAR_TABLE
select SWIOTLB
select IOASID
- select IOMMU_DMA
select PCI_ATS
+ select PCI_PRI
+ select PCI_PASID
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
@@ -48,10 +49,7 @@ config INTEL_IOMMU_DEBUGFS
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on X86_64
- select PCI_PASID
- select PCI_PRI
select MMU_NOTIFIER
- select IOASID
select IOMMU_SVA
help
Shared Virtual Memory (SVM) provides a facility for devices
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
index 3ee68393122f..806986696841 100644
--- a/drivers/iommu/intel/cap_audit.c
+++ b/drivers/iommu/intel/cap_audit.c
@@ -37,7 +37,7 @@ static inline void check_dmar_capabilities(struct intel_iommu *a,
MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK);
MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, fl5lp_support, CAP_FL5LP_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK);
@@ -84,7 +84,7 @@ static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type
goto out;
}
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl5lp_support, CAP_FL5LP_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 31bc50e538a3..a8b36c3fddf1 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -15,7 +15,6 @@
#include <linux/crash_dump.h>
#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
#include <linux/dmi.h>
#include <linux/intel-svm.h>
#include <linux/memory.h>
@@ -26,6 +25,7 @@
#include <linux/tboot.h>
#include "iommu.h"
+#include "../dma-iommu.h"
#include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
#include "pasid.h"
@@ -199,6 +199,11 @@ static inline void context_set_domain_id(struct context_entry *context,
context->hi |= (value & ((1 << 16) - 1)) << 8;
}
+static inline void context_set_pasid(struct context_entry *context)
+{
+ context->lo |= CONTEXT_PASIDE;
+}
+
static inline int context_domain_id(struct context_entry *c)
{
return((c->hi >> 8) & 0xffff);
@@ -399,7 +404,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
{
unsigned long fl_sagaw, sl_sagaw;
- fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
+ fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0);
sl_sagaw = cap_sagaw(iommu->cap);
/* Second level only. */
@@ -1234,6 +1239,13 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ /*
+ * Hardware invalidates all DMA remapping hardware translation
+ * caches as part of SRTP flow.
+ */
+ if (cap_esrtps(iommu->cap))
+ return;
+
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
if (sm_supported(iommu))
qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
@@ -1350,21 +1362,18 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
}
static struct device_domain_info *
-iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
- u8 bus, u8 devfn)
+domain_lookup_dev_info(struct dmar_domain *domain,
+ struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct device_domain_info *info;
unsigned long flags;
- if (!iommu->qi)
- return NULL;
-
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
spin_unlock_irqrestore(&domain->lock, flags);
- return info->ats_supported ? info : NULL;
+ return info;
}
}
spin_unlock_irqrestore(&domain->lock, flags);
@@ -1389,7 +1398,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
spin_unlock_irqrestore(&domain->lock, flags);
}
-static void iommu_enable_dev_iotlb(struct device_domain_info *info)
+static void iommu_enable_pci_caps(struct device_domain_info *info)
{
struct pci_dev *pdev;
@@ -1412,7 +1421,6 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
info->pfsid = pci_dev_id(pf_pdev);
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
/* The PCIe spec, in its wisdom, declares that the behaviour of
the device if you enable PASID support after ATS support is
undefined. So always enable PASID support on devices which
@@ -1425,7 +1433,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
(info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
!pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH))
info->pri_enabled = 1;
-#endif
+
if (info->ats_supported && pci_ats_page_aligned(pdev) &&
!pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
@@ -1448,16 +1456,16 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
info->ats_enabled = 0;
domain_update_iotlb(info->domain);
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
+
if (info->pri_enabled) {
pci_disable_pri(pdev);
info->pri_enabled = 0;
}
+
if (info->pasid_enabled) {
pci_disable_pasid(pdev);
info->pasid_enabled = 0;
}
-#endif
}
static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
@@ -1907,7 +1915,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
u8 bus, u8 devfn)
{
struct device_domain_info *info =
- iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+ domain_lookup_dev_info(domain, iommu, bus, devfn);
u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
struct context_entry *context;
@@ -1980,6 +1988,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_set_sm_dte(context);
if (info && info->pri_supported)
context_set_sm_pre(context);
+ if (info && info->pasid_supported)
+ context_set_pasid(context);
} else {
struct dma_pte *pgd = domain->pgd;
int agaw;
@@ -2037,7 +2047,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
} else {
iommu_flush_write_buffer(iommu);
}
- iommu_enable_dev_iotlb(info);
+ iommu_enable_pci_caps(info);
ret = 0;
@@ -3896,7 +3906,6 @@ static int __init probe_acpi_namespace_devices(void)
continue;
}
- pn->dev->bus->iommu_ops = &intel_iommu_ops;
ret = iommu_probe_device(pn->dev);
if (ret)
break;
@@ -4029,7 +4038,6 @@ int __init intel_iommu_init(void)
}
up_read(&dmar_global_lock);
- bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
@@ -4437,7 +4445,7 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true;
}
-static bool intel_iommu_capable(enum iommu_cap cap)
+static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
return true;
@@ -4457,7 +4465,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
+ if (!iommu || !iommu->iommu.ops)
return ERR_PTR(-ENODEV);
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -4574,52 +4582,6 @@ static void intel_iommu_get_resv_regions(struct device *device,
list_add_tail(&reg->list, head);
}
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct context_entry *context;
- struct dmar_domain *domain;
- u64 ctx_lo;
- int ret;
-
- domain = info->domain;
- if (!domain)
- return -EINVAL;
-
- spin_lock(&iommu->lock);
- ret = -EINVAL;
- if (!info->pasid_supported)
- goto out;
-
- context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
- if (WARN_ON(!context))
- goto out;
-
- ctx_lo = context[0].lo;
-
- if (!(ctx_lo & CONTEXT_PASIDE)) {
- ctx_lo |= CONTEXT_PASIDE;
- context[0].lo = ctx_lo;
- wmb();
- iommu->flush.flush_context(iommu,
- domain_id_iommu(domain, iommu),
- PCI_DEVID(info->bus, info->devfn),
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- }
-
- /* Enable PASID support in the device, if it wasn't already */
- if (!info->pasid_enabled)
- iommu_enable_dev_iotlb(info);
-
- ret = 0;
-
- out:
- spin_unlock(&iommu->lock);
-
- return ret;
-}
-
static struct iommu_group *intel_iommu_device_group(struct device *dev)
{
if (dev_is_pci(dev))
@@ -4643,9 +4605,6 @@ static int intel_iommu_enable_sva(struct device *dev)
if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
return -ENODEV;
- if (intel_iommu_enable_pasid(iommu, dev))
- return -ENODEV;
-
if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
return -EINVAL;
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 74b0e19e23ee..92023dff9513 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -146,7 +146,9 @@
/*
* Decoding Capability Register
*/
-#define cap_5lp_support(c) (((c) >> 60) & 1)
+#define cap_esrtps(c) (((c) >> 63) & 1)
+#define cap_esirtps(c) (((c) >> 62) & 1)
+#define cap_fl5lp_support(c) (((c) >> 60) & 1)
#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_fl1gp_support(c) (((c) >> 56) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
@@ -586,6 +588,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU_SVM
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ unsigned long prq_seq_number;
struct completion prq_complete;
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
@@ -741,7 +744,6 @@ extern int dmar_ir_support(void);
void *alloc_pgtable_page(int node);
void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -761,7 +763,6 @@ struct intel_svm_dev {
struct device *dev;
struct intel_iommu *iommu;
struct iommu_sva sva;
- unsigned long prq_seq_number;
u32 pasid;
int users;
u16 did;
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 2e9683e970f8..5962bb5027d0 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -494,7 +494,8 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
* Global invalidation of interrupt entry cache to make sure the
* hardware uses the new irq remapping table.
*/
- qi_global_iec(iommu);
+ if (!cap_esirtps(iommu->cap))
+ qi_global_iec(iommu);
}
static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
@@ -680,7 +681,8 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
* global invalidation of interrupt entry cache before disabling
* interrupt-remapping.
*/
- qi_global_iec(iommu);
+ if (!cap_esirtps(iommu->cap))
+ qi_global_iec(iommu);
raw_spin_lock_irqsave(&iommu->register_lock, flags);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index c5e7e8b020a5..c30ddac40ee5 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -392,16 +392,6 @@ pasid_set_flpm(struct pasid_entry *pe, u64 value)
pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
}
-/*
- * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_eafe(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
-}
-
static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
u16 did, u32 pasid)
@@ -529,7 +519,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
}
}
- if ((flags & PASID_FLAG_FL5LP) && !cap_5lp_support(iommu->cap)) {
+ if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
pr_err("No 5-level paging support for first-level on %s\n",
iommu->name);
return -EINVAL;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 8bcfb93dda56..7d08eb034f2d 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -49,23 +49,6 @@ static void *pasid_private_find(ioasid_t pasid)
}
static struct intel_svm_dev *
-svm_lookup_device_by_sid(struct intel_svm *svm, u16 sid)
-{
- struct intel_svm_dev *sdev = NULL, *t;
-
- rcu_read_lock();
- list_for_each_entry_rcu(t, &svm->devs, list) {
- if (t->sid == sid) {
- sdev = t;
- break;
- }
- }
- rcu_read_unlock();
-
- return sdev;
-}
-
-static struct intel_svm_dev *
svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
{
struct intel_svm_dev *sdev = NULL, *t;
@@ -181,7 +164,7 @@ void intel_svm_check(struct intel_iommu *iommu)
}
if (cpu_feature_enabled(X86_FEATURE_LA57) &&
- !cap_5lp_support(iommu->cap)) {
+ !cap_fl5lp_support(iommu->cap)) {
pr_err("%s SVM disabled, incompatible paging mode\n",
iommu->name);
return;
@@ -706,11 +689,10 @@ static void handle_bad_prq_event(struct intel_iommu *iommu,
static irqreturn_t prq_event_thread(int irq, void *d)
{
- struct intel_svm_dev *sdev = NULL;
struct intel_iommu *iommu = d;
- struct intel_svm *svm = NULL;
struct page_req_dsc *req;
int head, tail, handled;
+ struct pci_dev *pdev;
u64 address;
/*
@@ -730,8 +712,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
pr_err("IOMMU: %s: Page request without PASID\n",
iommu->name);
bad_req:
- svm = NULL;
- sdev = NULL;
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
goto prq_advance;
}
@@ -758,34 +738,19 @@ bad_req:
if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
goto prq_advance;
- if (!svm || svm->pasid != req->pasid) {
- /*
- * It can't go away, because the driver is not permitted
- * to unbind the mm while any page faults are outstanding.
- */
- svm = pasid_private_find(req->pasid);
- if (IS_ERR_OR_NULL(svm) || (svm->flags & SVM_FLAG_SUPERVISOR_MODE))
- goto bad_req;
- }
-
- if (!sdev || sdev->sid != req->rid) {
- sdev = svm_lookup_device_by_sid(svm, req->rid);
- if (!sdev)
- goto bad_req;
- }
-
- sdev->prq_seq_number++;
-
+ pdev = pci_get_domain_bus_and_slot(iommu->segment,
+ PCI_BUS_NUM(req->rid),
+ req->rid & 0xff);
/*
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
- if (intel_svm_prq_report(iommu, sdev->dev, req))
+ if (!pdev || intel_svm_prq_report(iommu, &pdev->dev, req))
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
- trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1,
+ trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
req->priv_data[0], req->priv_data[1],
- sdev->prq_seq_number);
+ iommu->prq_seq_number++);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@@ -881,8 +846,6 @@ int intel_svm_page_response(struct device *dev,
struct iommu_page_response *msg)
{
struct iommu_fault_page_request *prm;
- struct intel_svm_dev *sdev = NULL;
- struct intel_svm *svm = NULL;
struct intel_iommu *iommu;
bool private_present;
bool pasid_present;
@@ -901,8 +864,6 @@ int intel_svm_page_response(struct device *dev,
if (!msg || !evt)
return -EINVAL;
- mutex_lock(&pasid_mutex);
-
prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
@@ -919,12 +880,6 @@ int intel_svm_page_response(struct device *dev,
goto out;
}
- ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
- if (ret || !sdev) {
- ret = -ENODEV;
- goto out;
- }
-
/*
* Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP)
@@ -954,6 +909,5 @@ int intel_svm_page_response(struct device *dev,
qi_submit_sync(iommu, &desc, 1, 0);
}
out:
- mutex_unlock(&pasid_mutex);
return ret;
}
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 94ff319ae8ac..0ba817e86346 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -130,9 +130,6 @@
#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
-#define APPLE_DART_PTE_PROT_NO_WRITE (1<<7)
-#define APPLE_DART_PTE_PROT_NO_READ (1<<8)
-
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
@@ -200,8 +197,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *pages;
VM_BUG_ON((gfp & __GFP_HIGHMEM));
- p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
- gfp | __GFP_ZERO, order);
+ p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
if (!p)
return NULL;
@@ -406,15 +402,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
{
arm_lpae_iopte pte;
- if (data->iop.fmt == APPLE_DART) {
- pte = 0;
- if (!(prot & IOMMU_WRITE))
- pte |= APPLE_DART_PTE_PROT_NO_WRITE;
- if (!(prot & IOMMU_READ))
- pte |= APPLE_DART_PTE_PROT_NO_READ;
- return pte;
- }
-
if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) {
pte = ARM_LPAE_PTE_nG;
@@ -1107,52 +1094,6 @@ out_free_data:
return NULL;
}
-static struct io_pgtable *
-apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
-{
- struct arm_lpae_io_pgtable *data;
- int i;
-
- if (cfg->oas > 36)
- return NULL;
-
- data = arm_lpae_alloc_pgtable(cfg);
- if (!data)
- return NULL;
-
- /*
- * The table format itself always uses two levels, but the total VA
- * space is mapped by four separate tables, making the MMIO registers
- * an effective "level 1". For simplicity, though, we treat this
- * equivalently to LPAE stage 2 concatenation at level 2, with the
- * additional TTBRs each just pointing at consecutive pages.
- */
- if (data->start_level < 1)
- goto out_free_data;
- if (data->start_level == 1 && data->pgd_bits > 2)
- goto out_free_data;
- if (data->start_level > 1)
- data->pgd_bits = 0;
- data->start_level = 2;
- cfg->apple_dart_cfg.n_ttbrs = 1 << data->pgd_bits;
- data->pgd_bits += data->bits_per_level;
-
- data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
- cfg);
- if (!data->pgd)
- goto out_free_data;
-
- for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i)
- cfg->apple_dart_cfg.ttbr[i] =
- virt_to_phys(data->pgd + i * ARM_LPAE_GRANULE(data));
-
- return &data->iop;
-
-out_free_data:
- kfree(data);
- return NULL;
-}
-
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
.alloc = arm_64_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable,
@@ -1178,11 +1119,6 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
.free = arm_lpae_free_pgtable,
};
-struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = {
- .alloc = apple_dart_alloc_pgtable,
- .free = arm_lpae_free_pgtable,
-};
-
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
static struct io_pgtable_cfg *cfg_cookie __initdata;
@@ -1343,12 +1279,17 @@ static int __init arm_lpae_do_selftests(void)
};
int i, j, pass = 0, fail = 0;
+ struct device dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.oas = 48,
.coherent_walk = true,
+ .iommu_dev = &dev,
};
+ /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
+ set_dev_node(&dev, NUMA_NO_NODE);
+
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
for (j = 0; j < ARRAY_SIZE(ias); ++j) {
cfg.pgsize_bitmap = pgsize[i];
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
new file mode 100644
index 000000000000..74b1ef2b96be
--- /dev/null
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple DART page table allocator.
+ *
+ * Copyright (C) 2022 The Asahi Linux Contributors
+ *
+ * Based on io-pgtable-arm.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "dart io-pgtable: " fmt
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+
+#define DART1_MAX_ADDR_BITS 36
+
+#define DART_MAX_TABLES 4
+#define DART_LEVELS 2
+
+/* Struct accessors */
+#define io_pgtable_to_data(x) \
+ container_of((x), struct dart_io_pgtable, iop)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+#define DART_GRANULE(d) \
+ (sizeof(dart_iopte) << (d)->bits_per_level)
+#define DART_PTES_PER_TABLE(d) \
+ (DART_GRANULE(d) >> ilog2(sizeof(dart_iopte)))
+
+#define APPLE_DART_PTE_SUBPAGE_START GENMASK_ULL(63, 52)
+#define APPLE_DART_PTE_SUBPAGE_END GENMASK_ULL(51, 40)
+
+#define APPLE_DART1_PADDR_MASK GENMASK_ULL(35, 12)
+#define APPLE_DART2_PADDR_MASK GENMASK_ULL(37, 10)
+#define APPLE_DART2_PADDR_SHIFT (4)
+
+/* Apple DART1 protection bits */
+#define APPLE_DART1_PTE_PROT_NO_READ BIT(8)
+#define APPLE_DART1_PTE_PROT_NO_WRITE BIT(7)
+#define APPLE_DART1_PTE_PROT_SP_DIS BIT(1)
+
+/* Apple DART2 protection bits */
+#define APPLE_DART2_PTE_PROT_NO_READ BIT(3)
+#define APPLE_DART2_PTE_PROT_NO_WRITE BIT(2)
+#define APPLE_DART2_PTE_PROT_NO_CACHE BIT(1)
+
+/* marks PTE as valid */
+#define APPLE_DART_PTE_VALID BIT(0)
+
+/* IOPTE accessors */
+#define iopte_deref(pte, d) __va(iopte_to_paddr(pte, d))
+
+struct dart_io_pgtable {
+ struct io_pgtable iop;
+
+ int tbl_bits;
+ int bits_per_level;
+
+ void *pgd[DART_MAX_TABLES];
+};
+
+typedef u64 dart_iopte;
+
+
+static dart_iopte paddr_to_iopte(phys_addr_t paddr,
+ struct dart_io_pgtable *data)
+{
+ dart_iopte pte;
+
+ if (data->iop.fmt == APPLE_DART)
+ return paddr & APPLE_DART1_PADDR_MASK;
+
+ /* format is APPLE_DART2 */
+ pte = paddr >> APPLE_DART2_PADDR_SHIFT;
+ pte &= APPLE_DART2_PADDR_MASK;
+
+ return pte;
+}
+
+static phys_addr_t iopte_to_paddr(dart_iopte pte,
+ struct dart_io_pgtable *data)
+{
+ u64 paddr;
+
+ if (data->iop.fmt == APPLE_DART)
+ return pte & APPLE_DART1_PADDR_MASK;
+
+ /* format is APPLE_DART2 */
+ paddr = pte & APPLE_DART2_PADDR_MASK;
+ paddr <<= APPLE_DART2_PADDR_SHIFT;
+
+ return paddr;
+}
+
+static void *__dart_alloc_pages(size_t size, gfp_t gfp,
+ struct io_pgtable_cfg *cfg)
+{
+ int order = get_order(size);
+ struct page *p;
+
+ VM_BUG_ON((gfp & __GFP_HIGHMEM));
+ p = alloc_pages(gfp | __GFP_ZERO, order);
+ if (!p)
+ return NULL;
+
+ return page_address(p);
+}
+
+static int dart_init_pte(struct dart_io_pgtable *data,
+ unsigned long iova, phys_addr_t paddr,
+ dart_iopte prot, int num_entries,
+ dart_iopte *ptep)
+{
+ int i;
+ dart_iopte pte = prot;
+ size_t sz = data->iop.cfg.pgsize_bitmap;
+
+ for (i = 0; i < num_entries; i++)
+ if (ptep[i] & APPLE_DART_PTE_VALID) {
+ /* We require an unmap first */
+ WARN_ON(ptep[i] & APPLE_DART_PTE_VALID);
+ return -EEXIST;
+ }
+
+ /* subpage protection: always allow access to the entire page */
+ pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_START, 0);
+ pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_END, 0xfff);
+
+ pte |= APPLE_DART1_PTE_PROT_SP_DIS;
+ pte |= APPLE_DART_PTE_VALID;
+
+ for (i = 0; i < num_entries; i++)
+ ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
+
+ return 0;
+}
+
+static dart_iopte dart_install_table(dart_iopte *table,
+ dart_iopte *ptep,
+ dart_iopte curr,
+ struct dart_io_pgtable *data)
+{
+ dart_iopte old, new;
+
+ new = paddr_to_iopte(__pa(table), data) | APPLE_DART_PTE_VALID;
+
+ /*
+ * Ensure the table itself is visible before its PTE can be.
+ * Whilst we could get away with cmpxchg64_release below, this
+ * doesn't have any ordering semantics when !CONFIG_SMP.
+ */
+ dma_wmb();
+
+ old = cmpxchg64_relaxed(ptep, curr, new);
+
+ return old;
+}
+
+static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova)
+{
+ return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->tbl_bits) - 1);
+}
+
+static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova)
+{
+
+ return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->bits_per_level) - 1);
+}
+
+static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova)
+{
+
+ return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->bits_per_level) - 1);
+}
+
+static dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova)
+{
+ dart_iopte pte, *ptep;
+ int tbl = dart_get_table(data, iova);
+
+ ptep = data->pgd[tbl];
+ if (!ptep)
+ return NULL;
+
+ ptep += dart_get_l1_index(data, iova);
+ pte = READ_ONCE(*ptep);
+
+ /* Valid entry? */
+ if (!pte)
+ return NULL;
+
+ /* Deref to get level 2 table */
+ return iopte_deref(pte, data);
+}
+
+static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
+ int prot)
+{
+ dart_iopte pte = 0;
+
+ if (data->iop.fmt == APPLE_DART) {
+ if (!(prot & IOMMU_WRITE))
+ pte |= APPLE_DART1_PTE_PROT_NO_WRITE;
+ if (!(prot & IOMMU_READ))
+ pte |= APPLE_DART1_PTE_PROT_NO_READ;
+ }
+ if (data->iop.fmt == APPLE_DART2) {
+ if (!(prot & IOMMU_WRITE))
+ pte |= APPLE_DART2_PTE_PROT_NO_WRITE;
+ if (!(prot & IOMMU_READ))
+ pte |= APPLE_DART2_PTE_PROT_NO_READ;
+ if (!(prot & IOMMU_CACHE))
+ pte |= APPLE_DART2_PTE_PROT_NO_CACHE;
+ }
+
+ return pte;
+}
+
+static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int iommu_prot, gfp_t gfp, size_t *mapped)
+{
+ struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ size_t tblsz = DART_GRANULE(data);
+ int ret = 0, tbl, num_entries, max_entries, map_idx_start;
+ dart_iopte pte, *cptep, *ptep;
+ dart_iopte prot;
+
+ if (WARN_ON(pgsize != cfg->pgsize_bitmap))
+ return -EINVAL;
+
+ if (WARN_ON(paddr >> cfg->oas))
+ return -ERANGE;
+
+ /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
+ tbl = dart_get_table(data, iova);
+
+ ptep = data->pgd[tbl];
+ ptep += dart_get_l1_index(data, iova);
+ pte = READ_ONCE(*ptep);
+
+ /* no L2 table present */
+ if (!pte) {
+ cptep = __dart_alloc_pages(tblsz, gfp, cfg);
+ if (!cptep)
+ return -ENOMEM;
+
+ pte = dart_install_table(cptep, ptep, 0, data);
+ if (pte)
+ free_pages((unsigned long)cptep, get_order(tblsz));
+
+ /* L2 table is present (now) */
+ pte = READ_ONCE(*ptep);
+ }
+
+ ptep = iopte_deref(pte, data);
+
+ /* install a leaf entries into L2 table */
+ prot = dart_prot_to_pte(data, iommu_prot);
+ map_idx_start = dart_get_l2_index(data, iova);
+ max_entries = DART_PTES_PER_TABLE(data) - map_idx_start;
+ num_entries = min_t(int, pgcount, max_entries);
+ ptep += map_idx_start;
+ ret = dart_init_pte(data, iova, paddr, prot, num_entries, ptep);
+ if (!ret && mapped)
+ *mapped += num_entries * pgsize;
+
+ /*
+ * Synchronise all PTE updates for the new mapping before there's
+ * a chance for anything to kick off a table walk for the new iova.
+ */
+ wmb();
+
+ return ret;
+}
+
+static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ int i = 0, num_entries, max_entries, unmap_idx_start;
+ dart_iopte pte, *ptep;
+
+ if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount))
+ return 0;
+
+ ptep = dart_get_l2(data, iova);
+
+ /* Valid L2 IOPTE pointer? */
+ if (WARN_ON(!ptep))
+ return 0;
+
+ unmap_idx_start = dart_get_l2_index(data, iova);
+ ptep += unmap_idx_start;
+
+ max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start;
+ num_entries = min_t(int, pgcount, max_entries);
+
+ while (i < num_entries) {
+ pte = READ_ONCE(*ptep);
+ if (WARN_ON(!pte))
+ break;
+
+ /* clear pte */
+ *ptep = 0;
+
+ if (!iommu_iotlb_gather_queued(gather))
+ io_pgtable_tlb_add_page(&data->iop, gather,
+ iova + i * pgsize, pgsize);
+
+ ptep++;
+ i++;
+ }
+
+ return i * pgsize;
+}
+
+static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ dart_iopte pte, *ptep;
+
+ ptep = dart_get_l2(data, iova);
+
+ /* Valid L2 IOPTE pointer? */
+ if (!ptep)
+ return 0;
+
+ ptep += dart_get_l2_index(data, iova);
+
+ pte = READ_ONCE(*ptep);
+ /* Found translation */
+ if (pte) {
+ iova &= (data->iop.cfg.pgsize_bitmap - 1);
+ return iopte_to_paddr(pte, data) | iova;
+ }
+
+ /* Ran out of page tables to walk */
+ return 0;
+}
+
+static struct dart_io_pgtable *
+dart_alloc_pgtable(struct io_pgtable_cfg *cfg)
+{
+ struct dart_io_pgtable *data;
+ int tbl_bits, bits_per_level, va_bits, pg_shift;
+
+ pg_shift = __ffs(cfg->pgsize_bitmap);
+ bits_per_level = pg_shift - ilog2(sizeof(dart_iopte));
+
+ va_bits = cfg->ias - pg_shift;
+
+ tbl_bits = max_t(int, 0, va_bits - (bits_per_level * DART_LEVELS));
+ if ((1 << tbl_bits) > DART_MAX_TABLES)
+ return NULL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->tbl_bits = tbl_bits;
+ data->bits_per_level = bits_per_level;
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map_pages = dart_map_pages,
+ .unmap_pages = dart_unmap_pages,
+ .iova_to_phys = dart_iova_to_phys,
+ };
+
+ return data;
+}
+
+static struct io_pgtable *
+apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct dart_io_pgtable *data;
+ int i;
+
+ if (!cfg->coherent_walk)
+ return NULL;
+
+ if (cfg->oas != 36 && cfg->oas != 42)
+ return NULL;
+
+ if (cfg->ias > cfg->oas)
+ return NULL;
+
+ if (!(cfg->pgsize_bitmap == SZ_4K || cfg->pgsize_bitmap == SZ_16K))
+ return NULL;
+
+ data = dart_alloc_pgtable(cfg);
+ if (!data)
+ return NULL;
+
+ cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
+
+ for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
+ data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL,
+ cfg);
+ if (!data->pgd[i])
+ goto out_free_data;
+ cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
+ }
+
+ return &data->iop;
+
+out_free_data:
+ while (--i >= 0)
+ free_pages((unsigned long)data->pgd[i],
+ get_order(DART_GRANULE(data)));
+ kfree(data);
+ return NULL;
+}
+
+static void apple_dart_free_pgtable(struct io_pgtable *iop)
+{
+ struct dart_io_pgtable *data = io_pgtable_to_data(iop);
+ dart_iopte *ptep, *end;
+ int i;
+
+ for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
+ ptep = data->pgd[i];
+ end = (void *)ptep + DART_GRANULE(data);
+
+ while (ptep != end) {
+ dart_iopte pte = *ptep++;
+
+ if (pte) {
+ unsigned long page =
+ (unsigned long)iopte_deref(pte, data);
+
+ free_pages(page, get_order(DART_GRANULE(data)));
+ }
+ }
+ free_pages((unsigned long)data->pgd[i],
+ get_order(DART_GRANULE(data)));
+ }
+
+ kfree(data);
+}
+
+struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = {
+ .alloc = apple_dart_alloc_pgtable,
+ .free = apple_dart_free_pgtable,
+};
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index f4bfcef98297..b843fcd365d2 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -20,13 +20,17 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
[ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns,
+#endif
+#ifdef CONFIG_IOMMU_IO_PGTABLE_DART
[APPLE_DART] = &io_pgtable_apple_dart_init_fns,
+ [APPLE_DART2] = &io_pgtable_apple_dart_init_fns,
#endif
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
#endif
#ifdef CONFIG_AMD_IOMMU
[AMD_IOMMU_V1] = &io_pgtable_amd_iommu_v1_init_fns,
+ [AMD_IOMMU_V2] = &io_pgtable_amd_iommu_v2_init_fns,
#endif
};
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3a808146b50f..4893c2429ca5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -6,8 +6,8 @@
#define pr_fmt(fmt) "iommu: " fmt
+#include <linux/amba/bus.h>
#include <linux/device.h>
-#include <linux/dma-iommu.h>
#include <linux/kernel.h>
#include <linux/bits.h>
#include <linux/bug.h>
@@ -16,17 +16,21 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/host1x_context_bus.h>
#include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/pci.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/fsl/mc.h>
#include <linux/module.h>
#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
+#include "dma-iommu.h"
+
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
@@ -75,6 +79,8 @@ static const char * const iommu_group_resv_type_string[] = {
#define IOMMU_CMD_LINE_DMA_API BIT(0)
#define IOMMU_CMD_LINE_STRICT BIT(1)
+static int iommu_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data);
static int iommu_alloc_default_domain(struct iommu_group *group,
struct device *dev);
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -103,6 +109,22 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);
+static struct bus_type * const iommu_buses[] = {
+ &platform_bus_type,
+#ifdef CONFIG_PCI
+ &pci_bus_type,
+#endif
+#ifdef CONFIG_ARM_AMBA
+ &amba_bustype,
+#endif
+#ifdef CONFIG_FSL_MC_BUS
+ &fsl_mc_bus_type,
+#endif
+#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
+ &host1x_context_device_bus_type,
+#endif
+};
+
/*
* Use a function instead of an array here because the domain-type is a
* bit-field, so an array would waste memory.
@@ -126,6 +148,8 @@ static const char *iommu_domain_type_str(unsigned int t)
static int __init iommu_subsys_init(void)
{
+ struct notifier_block *nb;
+
if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
iommu_set_default_passthrough(false);
@@ -152,10 +176,27 @@ static int __init iommu_subsys_init(void)
(iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
"(set via kernel command line)" : "");
+ nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL);
+ if (!nb)
+ return -ENOMEM;
+
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
+ nb[i].notifier_call = iommu_bus_notifier;
+ bus_register_notifier(iommu_buses[i], &nb[i]);
+ }
+
return 0;
}
subsys_initcall(iommu_subsys_init);
+static int remove_iommu_group(struct device *dev, void *data)
+{
+ if (dev->iommu && dev->iommu->iommu_dev == data)
+ iommu_release_device(dev);
+
+ return 0;
+}
+
/**
* iommu_device_register() - Register an IOMMU hardware instance
* @iommu: IOMMU handle for the instance
@@ -167,23 +208,42 @@ subsys_initcall(iommu_subsys_init);
int iommu_device_register(struct iommu_device *iommu,
const struct iommu_ops *ops, struct device *hwdev)
{
+ int err = 0;
+
/* We need to be able to take module references appropriately */
if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
return -EINVAL;
+ /*
+ * Temporarily enforce global restriction to a single driver. This was
+ * already the de-facto behaviour, since any possible combination of
+ * existing drivers would compete for at least the PCI or platform bus.
+ */
+ if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
+ return -EBUSY;
iommu->ops = ops;
if (hwdev)
- iommu->fwnode = hwdev->fwnode;
+ iommu->fwnode = dev_fwnode(hwdev);
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
- return 0;
+
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
+ iommu_buses[i]->iommu_ops = ops;
+ err = bus_iommu_probe(iommu_buses[i]);
+ }
+ if (err)
+ iommu_device_unregister(iommu);
+ return err;
}
EXPORT_SYMBOL_GPL(iommu_device_register);
void iommu_device_unregister(struct iommu_device *iommu)
{
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++)
+ bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group);
+
spin_lock(&iommu_device_lock);
list_del(&iommu->list);
spin_unlock(&iommu_device_lock);
@@ -654,7 +714,6 @@ struct iommu_group *iommu_group_alloc(void)
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
- ida_free(&iommu_group_ida, group->id);
kobject_put(&group->kobj);
return ERR_PTR(ret);
}
@@ -1612,13 +1671,6 @@ static int probe_iommu_group(struct device *dev, void *data)
return ret;
}
-static int remove_iommu_group(struct device *dev, void *data)
-{
- iommu_release_device(dev);
-
- return 0;
-}
-
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -1775,75 +1827,6 @@ int bus_iommu_probe(struct bus_type *bus)
return ret;
}
-static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
-{
- struct notifier_block *nb;
- int err;
-
- nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
- if (!nb)
- return -ENOMEM;
-
- nb->notifier_call = iommu_bus_notifier;
-
- err = bus_register_notifier(bus, nb);
- if (err)
- goto out_free;
-
- err = bus_iommu_probe(bus);
- if (err)
- goto out_err;
-
-
- return 0;
-
-out_err:
- /* Clean up */
- bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
- bus_unregister_notifier(bus, nb);
-
-out_free:
- kfree(nb);
-
- return err;
-}
-
-/**
- * bus_set_iommu - set iommu-callbacks for the bus
- * @bus: bus.
- * @ops: the callbacks provided by the iommu-driver
- *
- * This function is called by an iommu driver to set the iommu methods
- * used for a particular bus. Drivers for devices on that bus can use
- * the iommu-api after these ops are registered.
- * This special function is needed because IOMMUs are usually devices on
- * the bus itself, so the iommu drivers are not initialized when the bus
- * is set up. With this function the iommu-driver can set the iommu-ops
- * afterwards.
- */
-int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
-{
- int err;
-
- if (ops == NULL) {
- bus->iommu_ops = NULL;
- return 0;
- }
-
- if (bus->iommu_ops != NULL)
- return -EBUSY;
-
- bus->iommu_ops = ops;
-
- /* Do IOMMU specific setup for this bus-type */
- err = iommu_bus_init(bus, ops);
- if (err)
- bus->iommu_ops = NULL;
-
- return err;
-}
-EXPORT_SYMBOL_GPL(bus_set_iommu);
-
bool iommu_present(struct bus_type *bus)
{
return bus->iommu_ops != NULL;
@@ -1869,19 +1852,10 @@ bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
if (!ops->capable)
return false;
- return ops->capable(cap);
+ return ops->capable(dev, cap);
}
EXPORT_SYMBOL_GPL(device_iommu_capable);
-bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
-{
- if (!bus->iommu_ops || !bus->iommu_ops->capable)
- return false;
-
- return bus->iommu_ops->capable(cap);
-}
-EXPORT_SYMBOL_GPL(iommu_capable);
-
/**
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 47d1983dfa2a..a44ad92fc5eb 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -661,9 +661,6 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
unsigned long flags;
int i;
- if (!mag)
- return;
-
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (i = 0 ; i < mag->size; ++i) {
@@ -683,12 +680,12 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
static bool iova_magazine_full(struct iova_magazine *mag)
{
- return (mag && mag->size == IOVA_MAG_SIZE);
+ return mag->size == IOVA_MAG_SIZE;
}
static bool iova_magazine_empty(struct iova_magazine *mag)
{
- return (!mag || mag->size == 0);
+ return mag->size == 0;
}
static unsigned long iova_magazine_pop(struct iova_magazine *mag,
@@ -697,8 +694,6 @@ static unsigned long iova_magazine_pop(struct iova_magazine *mag,
int i;
unsigned long pfn;
- BUG_ON(iova_magazine_empty(mag));
-
/* Only fall back to the rbtree if we have no suitable pfns at all */
for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
if (i == 0)
@@ -713,8 +708,6 @@ static unsigned long iova_magazine_pop(struct iova_magazine *mag,
static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
{
- BUG_ON(iova_magazine_full(mag));
-
mag->pfns[mag->size++] = pfn;
}
@@ -882,7 +875,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
{
unsigned int log_size = order_base_2(size);
- if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
+ if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
return 0;
return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 1d42084d0276..3b30c0752274 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1090,11 +1090,6 @@ static int ipmmu_probe(struct platform_device *pdev)
ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
if (ret)
return ret;
-
-#if defined(CONFIG_IOMMU_DMA)
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &ipmmu_ops);
-#endif
}
/*
@@ -1168,32 +1163,4 @@ static struct platform_driver ipmmu_driver = {
.probe = ipmmu_probe,
.remove = ipmmu_remove,
};
-
-static int __init ipmmu_init(void)
-{
- struct device_node *np;
- static bool setup_done;
- int ret;
-
- if (setup_done)
- return 0;
-
- np = of_find_matching_node(NULL, ipmmu_of_ids);
- if (!np)
- return 0;
-
- of_node_put(np);
-
- ret = platform_driver_register(&ipmmu_driver);
- if (ret < 0)
- return ret;
-
-#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &ipmmu_ops);
-#endif
-
- setup_done = true;
- return 0;
-}
-subsys_initcall(ipmmu_init);
+builtin_platform_driver(ipmmu_driver);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6a24aa804ea3..16179a9a7283 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -792,8 +792,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
goto fail;
}
- bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
-
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 7e363b1f24df..5a4e00e4bbbc 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -138,6 +138,7 @@
#define PM_CLK_AO BIT(15)
#define IFA_IOMMU_PCIE_SUPPORT BIT(16)
#define PGTABLE_PA_35_EN BIT(17)
+#define TF_PORT_TO_ADDR_MT8173 BIT(18)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -157,6 +158,7 @@
enum mtk_iommu_plat {
M4U_MT2712,
M4U_MT6779,
+ M4U_MT6795,
M4U_MT8167,
M4U_MT8173,
M4U_MT8183,
@@ -955,7 +957,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int ban
* Global control settings are in bank0. May re-init these global registers
* since no sure if there is bank0 consumers.
*/
- if (data->plat_data->m4u_plat == M4U_MT8173) {
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, TF_PORT_TO_ADDR_MT8173)) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
} else {
@@ -1243,30 +1245,13 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->hw_list = &data->hw_list_head;
}
- if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
- if (ret)
- goto out_list_del;
- }
-
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
if (ret)
- goto out_bus_set_null;
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
- MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- ret = bus_set_iommu(&pci_bus_type, &mtk_iommu_ops);
- if (ret) /* PCIe fail don't affect platform_bus. */
- goto out_list_del;
- }
-#endif
+ goto out_list_del;
}
return ret;
-out_bus_set_null:
- bus_set_iommu(&platform_bus_type, NULL);
out_list_del:
list_del(&data->list);
iommu_device_unregister(&data->iommu);
@@ -1294,11 +1279,6 @@ static int mtk_iommu_remove(struct platform_device *pdev)
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
device_link_remove(data->smicomm_dev, &pdev->dev);
component_master_del(&pdev->dev, &mtk_iommu_com_ops);
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
- MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
-#ifdef CONFIG_PCI
- bus_set_iommu(&pci_bus_type, NULL);
-#endif
}
pm_runtime_disable(&pdev->dev);
for (i = 0; i < data->plat_data->banks_num; i++) {
@@ -1413,6 +1393,19 @@ static const struct mtk_iommu_plat_data mt6779_data = {
.larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
};
+static const struct mtk_iommu_plat_data mt6795_data = {
+ .m4u_plat = M4U_MT6795,
+ .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
+ HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM |
+ TF_PORT_TO_ADDR_MT8173,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */
+};
+
static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167,
.flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
@@ -1427,7 +1420,8 @@ static const struct mtk_iommu_plat_data mt8167_data = {
static const struct mtk_iommu_plat_data mt8173_data = {
.m4u_plat = M4U_MT8173,
.flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
- HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
+ HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM |
+ TF_PORT_TO_ADDR_MT8173,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.banks_num = 1,
.banks_enable = {true},
@@ -1524,6 +1518,7 @@ static const struct mtk_iommu_plat_data mt8195_data_vpp = {
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
+ { .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 128c7a3f1778..6e0e65831eb7 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -691,19 +691,11 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
if (ret)
goto out_sysfs_remove;
- if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_v1_ops);
- if (ret)
- goto out_dev_unreg;
- }
-
ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
if (ret)
- goto out_bus_set_null;
+ goto out_dev_unreg;
return ret;
-out_bus_set_null:
- bus_set_iommu(&platform_bus_type, NULL);
out_dev_unreg:
iommu_device_unregister(&data->iommu);
out_sysfs_remove:
@@ -718,9 +710,6 @@ static int mtk_iommu_v1_remove(struct platform_device *pdev)
iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu);
- if (iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, NULL);
-
clk_disable_unprepare(data->bclk);
devm_free_irq(&pdev->dev, data->irq, data);
component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index a99afb5d9011..259f65291d90 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -32,12 +32,12 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
- bytes = snprintf(p, maxcol, str, __stringify(name), \
+ if (len < maxcol) \
+ goto out; \
+ bytes = scnprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
- if (len < maxcol) \
- goto out; \
} while (0)
static ssize_t
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index d9cf2820c02e..07ee2600113c 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1776,14 +1776,8 @@ static int __init omap_iommu_init(void)
goto fail_driver;
}
- ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
- if (ret)
- goto fail_bus;
-
return 0;
-fail_bus:
- platform_driver_unregister(&omap_iommu_driver);
fail_driver:
kmem_cache_destroy(iopte_cachep);
return ret;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index ab57c4b8fade..a3fc59b814ab 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1300,8 +1300,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (!dma_dev)
dma_dev = &pdev->dev;
- bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
-
pm_runtime_enable(dev);
for (i = 0; i < iommu->num_irq; i++) {
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index c898bcbbce11..3c071782f6f1 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -39,7 +39,7 @@ static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
return container_of(dom, struct s390_domain, domain);
}
-static bool s390_iommu_capable(enum iommu_cap cap)
+static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -185,7 +185,12 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
static struct iommu_device *s390_iommu_probe_device(struct device *dev)
{
- struct zpci_dev *zdev = to_zpci_dev(dev);
+ struct zpci_dev *zdev;
+
+ if (!dev_is_pci(dev))
+ return ERR_PTR(-ENODEV);
+
+ zdev = to_zpci_dev(dev);
return &zdev->iommu_dev;
}
@@ -385,9 +390,3 @@ static const struct iommu_ops s390_iommu_ops = {
.free = s390_domain_free,
}
};
-
-static int __init s390_iommu_init(void)
-{
- return bus_set_iommu(&pci_bus_type, &s390_iommu_ops);
-}
-subsys_initcall(s390_iommu_init);
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 511959c8a14d..fadd2c907222 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -496,9 +496,6 @@ static int sprd_iommu_probe(struct platform_device *pdev)
if (ret)
goto remove_sysfs;
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &sprd_iommu_ops);
-
ret = sprd_iommu_clk_enable(sdev);
if (ret)
goto unregister_iommu;
@@ -534,8 +531,6 @@ static int sprd_iommu_remove(struct platform_device *pdev)
iommu_group_put(sdev->group);
sdev->group = NULL;
- bus_set_iommu(&platform_bus_type, NULL);
-
platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&sdev->iommu);
iommu_device_unregister(&sdev->iommu);
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index a84c63518773..cd9b74ee24de 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -965,8 +965,6 @@ static int sun50i_iommu_probe(struct platform_device *pdev)
if (ret < 0)
goto err_unregister;
- bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
-
return 0;
err_unregister:
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 2a8de975fe63..5b1af40221ec 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1083,8 +1083,8 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
/*
* This is a bit of a hack. Ideally we'd want to simply return this
- * value. However the IOMMU registration process will attempt to add
- * all devices to the IOMMU when bus_set_iommu() is called. In order
+ * value. However iommu_device_register() will attempt to add
+ * all devices to the IOMMU before we get that far. In order
* not to rely on global variables to track the IOMMU instance, we
* set it here so that it can be looked up from the .probe_device()
* callback via the IOMMU device's .drvdata field.
@@ -1138,32 +1138,15 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
return ERR_PTR(err);
err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
- if (err)
- goto remove_sysfs;
-
- err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
- if (err < 0)
- goto unregister;
-
-#ifdef CONFIG_PCI
- err = bus_set_iommu(&pci_bus_type, &tegra_smmu_ops);
- if (err < 0)
- goto unset_platform_bus;
-#endif
+ if (err) {
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return ERR_PTR(err);
+ }
if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_smmu_debugfs_init(smmu);
return smmu;
-
-unset_platform_bus: __maybe_unused;
- bus_set_iommu(&platform_bus_type, NULL);
-unregister:
- iommu_device_unregister(&smmu->iommu);
-remove_sysfs:
- iommu_device_sysfs_remove(&smmu->iommu);
-
- return ERR_PTR(err);
}
void tegra_smmu_remove(struct tegra_smmu *smmu)
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 80151176ba12..b7c22802f57c 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -7,9 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/amba/bus.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/freezer.h>
#include <linux/interval_tree.h>
@@ -17,7 +15,6 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
-#include <linux/platform_device.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ids.h>
@@ -25,6 +22,8 @@
#include <uapi/linux/virtio_iommu.h>
+#include "dma-iommu.h"
+
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
@@ -925,7 +924,7 @@ static struct virtio_driver virtio_iommu_drv;
static int viommu_match_node(struct device *dev, const void *data)
{
- return dev->parent->fwnode == data;
+ return device_match_fwnode(dev->parent, data);
}
static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
@@ -1006,7 +1005,7 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
-static bool viommu_capable(enum iommu_cap cap)
+static bool viommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -1156,26 +1155,6 @@ static int viommu_probe(struct virtio_device *vdev)
iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
-#ifdef CONFIG_PCI
- if (pci_bus_type.iommu_ops != &viommu_ops) {
- ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
- if (ret)
- goto err_unregister;
- }
-#endif
-#ifdef CONFIG_ARM_AMBA
- if (amba_bustype.iommu_ops != &viommu_ops) {
- ret = bus_set_iommu(&amba_bustype, &viommu_ops);
- if (ret)
- goto err_unregister;
- }
-#endif
- if (platform_bus_type.iommu_ops != &viommu_ops) {
- ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
- if (ret)
- goto err_unregister;
- }
-
vdev->priv = viommu;
dev_info(dev, "input address: %u bits\n",
@@ -1184,9 +1163,6 @@ static int viommu_probe(struct virtio_device *vdev)
return 0;
-err_unregister:
- iommu_device_sysfs_remove(&viommu->iommu);
- iommu_device_unregister(&viommu->iommu);
err_free_vqs:
vdev->config->del_vqs(vdev);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index eb5ea5b69cfa..7ef9f5e696d3 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -3,7 +3,7 @@ menu "IRQ chip support"
config IRQCHIP
def_bool y
- depends on OF_IRQ
+ depends on (OF_IRQ || ACPI_GENERIC_GSI)
config ARM_GIC
bool
@@ -481,6 +481,21 @@ config IMX_INTMUX
help
Support for the i.MX INTMUX interrupt multiplexer.
+config IMX_MU_MSI
+ tristate "i.MX MU used as MSI controller"
+ depends on OF && HAS_IOMEM
+ depends on ARCH_MXC || COMPILE_TEST
+ default m if ARCH_MXC
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_MSI_IRQ_DOMAIN
+ help
+ Provide a driver for the i.MX Messaging Unit block used as a
+ CPU-to-CPU MSI controller. This requires a specially crafted DT
+ to make use of this driver.
+
+ If unsure, say N
+
config LS1X_IRQ
bool "Loongson-1 Interrupt Controller"
depends on MACH_LOONGSON32
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b6acbca2248b..87b49a10962c 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
+obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index b249d4df899e..6e1ac330d7a6 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -13,7 +13,7 @@
#define pr_fmt(fmt) "GICv2m: " fmt
#include <linux/acpi.h>
-#include <linux/dma-iommu.h>
+#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index beead1a0191c..973ede0197e3 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -11,9 +11,9 @@
#include <linux/cpu.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/efi.h>
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index a2163d32f17d..e1efdec9e9ac 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -6,7 +6,7 @@
#define pr_fmt(fmt) "GICv3: " fmt
-#include <linux/dma-iommu.h>
+#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 262658fd5f9e..34d58567b78d 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -978,7 +978,7 @@ static int __gic_update_rdist_properties(struct redist_region *region,
u64 typer = gic_read_typer(ptr + GICR_TYPER);
u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
- /* Boot-time cleanip */
+ /* Boot-time cleanup */
if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
u64 val;
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
new file mode 100644
index 000000000000..229039eda1b1
--- /dev/null
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Freescale MU used as MSI controller
+ *
+ * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
+ * Copyright 2022 NXP
+ * Frank Li <Frank.Li@nxp.com>
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Based on drivers/mailbox/imx-mailbox.c
+ */
+
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/spinlock.h>
+
+#define IMX_MU_CHANS 4
+
+enum imx_mu_xcr {
+ IMX_MU_GIER,
+ IMX_MU_GCR,
+ IMX_MU_TCR,
+ IMX_MU_RCR,
+ IMX_MU_xCR_MAX,
+};
+
+enum imx_mu_xsr {
+ IMX_MU_SR,
+ IMX_MU_GSR,
+ IMX_MU_TSR,
+ IMX_MU_RSR,
+ IMX_MU_xSR_MAX
+};
+
+enum imx_mu_type {
+ IMX_MU_V2 = BIT(1),
+};
+
+/* Receive Interrupt Enable */
+#define IMX_MU_xCR_RIEn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xSR_RFn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+
+struct imx_mu_dcfg {
+ enum imx_mu_type type;
+ u32 xTR; /* Transmit Register0 */
+ u32 xRR; /* Receive Register0 */
+ u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
+ u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
+};
+
+struct imx_mu_msi {
+ raw_spinlock_t lock;
+ struct irq_domain *msi_domain;
+ void __iomem *regs;
+ phys_addr_t msiir_addr;
+ const struct imx_mu_dcfg *cfg;
+ unsigned long used;
+ struct clk *clk;
+};
+
+static void imx_mu_write(struct imx_mu_msi *msi_data, u32 val, u32 offs)
+{
+ iowrite32(val, msi_data->regs + offs);
+}
+
+static u32 imx_mu_read(struct imx_mu_msi *msi_data, u32 offs)
+{
+ return ioread32(msi_data->regs + offs);
+}
+
+static u32 imx_mu_xcr_rmw(struct imx_mu_msi *msi_data, enum imx_mu_xcr type, u32 set, u32 clr)
+{
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&msi_data->lock, flags);
+ val = imx_mu_read(msi_data, msi_data->cfg->xCR[type]);
+ val &= ~clr;
+ val |= set;
+ imx_mu_write(msi_data, val, msi_data->cfg->xCR[type]);
+ raw_spin_unlock_irqrestore(&msi_data->lock, flags);
+
+ return val;
+}
+
+static void imx_mu_msi_parent_mask_irq(struct irq_data *data)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(msi_data, data->hwirq));
+}
+
+static void imx_mu_msi_parent_unmask_irq(struct irq_data *data)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, IMX_MU_xCR_RIEn(msi_data, data->hwirq), 0);
+}
+
+static void imx_mu_msi_parent_ack_irq(struct irq_data *data)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4);
+}
+
+static struct irq_chip imx_mu_msi_irq_chip = {
+ .name = "MU-MSI",
+ .irq_ack = irq_chip_ack_parent,
+};
+
+static struct msi_domain_ops imx_mu_msi_irq_ops = {
+};
+
+static struct msi_domain_info imx_mu_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &imx_mu_msi_irq_ops,
+ .chip = &imx_mu_msi_irq_chip,
+};
+
+static void imx_mu_msi_parent_compose_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+ u64 addr = msi_data->msiir_addr + 4 * data->hwirq;
+
+ msg->address_hi = upper_32_bits(addr);
+ msg->address_lo = lower_32_bits(addr);
+ msg->data = data->hwirq;
+}
+
+static int imx_mu_msi_parent_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip imx_mu_msi_parent_chip = {
+ .name = "MU",
+ .irq_mask = imx_mu_msi_parent_mask_irq,
+ .irq_unmask = imx_mu_msi_parent_unmask_irq,
+ .irq_ack = imx_mu_msi_parent_ack_irq,
+ .irq_compose_msi_msg = imx_mu_msi_parent_compose_msg,
+ .irq_set_affinity = imx_mu_msi_parent_set_affinity,
+};
+
+static int imx_mu_msi_domain_irq_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct imx_mu_msi *msi_data = domain->host_data;
+ unsigned long flags;
+ int pos, err = 0;
+
+ WARN_ON(nr_irqs != 1);
+
+ raw_spin_lock_irqsave(&msi_data->lock, flags);
+ pos = find_first_zero_bit(&msi_data->used, IMX_MU_CHANS);
+ if (pos < IMX_MU_CHANS)
+ __set_bit(pos, &msi_data->used);
+ else
+ err = -ENOSPC;
+ raw_spin_unlock_irqrestore(&msi_data->lock, flags);
+
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, pos,
+ &imx_mu_msi_parent_chip, msi_data,
+ handle_edge_irq, NULL, NULL);
+ return 0;
+}
+
+static void imx_mu_msi_domain_irq_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&msi_data->lock, flags);
+ __clear_bit(d->hwirq, &msi_data->used);
+ raw_spin_unlock_irqrestore(&msi_data->lock, flags);
+}
+
+static const struct irq_domain_ops imx_mu_msi_domain_ops = {
+ .alloc = imx_mu_msi_domain_irq_alloc,
+ .free = imx_mu_msi_domain_irq_free,
+};
+
+static void imx_mu_msi_irq_handler(struct irq_desc *desc)
+{
+ struct imx_mu_msi *msi_data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 status;
+ int i;
+
+ status = imx_mu_read(msi_data, msi_data->cfg->xSR[IMX_MU_RSR]);
+
+ chained_irq_enter(chip, desc);
+ for (i = 0; i < IMX_MU_CHANS; i++) {
+ if (status & IMX_MU_xSR_RFn(msi_data, i))
+ generic_handle_domain_irq(msi_data->msi_domain, i);
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev)
+{
+ struct fwnode_handle *fwnodes = dev_fwnode(dev);
+ struct irq_domain *parent;
+
+ /* Initialize MSI domain parent */
+ parent = irq_domain_create_linear(fwnodes,
+ IMX_MU_CHANS,
+ &imx_mu_msi_domain_ops,
+ msi_data);
+ if (!parent) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ msi_data->msi_domain = platform_msi_create_irq_domain(fwnodes,
+ &imx_mu_msi_domain_info,
+ parent);
+
+ if (!msi_data->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
+
+ irq_domain_set_pm_device(msi_data->msi_domain, dev);
+
+ return 0;
+}
+
+/* Register offset of different version MU IP */
+static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
+ .type = 0,
+ .xTR = 0x0,
+ .xRR = 0x10,
+ .xSR = {
+ [IMX_MU_SR] = 0x20,
+ [IMX_MU_GSR] = 0x20,
+ [IMX_MU_TSR] = 0x20,
+ [IMX_MU_RSR] = 0x20,
+ },
+ .xCR = {
+ [IMX_MU_GIER] = 0x24,
+ [IMX_MU_GCR] = 0x24,
+ [IMX_MU_TCR] = 0x24,
+ [IMX_MU_RCR] = 0x24,
+ },
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
+ .type = 0,
+ .xTR = 0x20,
+ .xRR = 0x40,
+ .xSR = {
+ [IMX_MU_SR] = 0x60,
+ [IMX_MU_GSR] = 0x60,
+ [IMX_MU_TSR] = 0x60,
+ [IMX_MU_RSR] = 0x60,
+ },
+ .xCR = {
+ [IMX_MU_GIER] = 0x64,
+ [IMX_MU_GCR] = 0x64,
+ [IMX_MU_TCR] = 0x64,
+ [IMX_MU_RCR] = 0x64,
+ },
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
+ .type = IMX_MU_V2,
+ .xTR = 0x200,
+ .xRR = 0x280,
+ .xSR = {
+ [IMX_MU_SR] = 0xC,
+ [IMX_MU_GSR] = 0x118,
+ [IMX_MU_TSR] = 0x124,
+ [IMX_MU_RSR] = 0x12C,
+ },
+ .xCR = {
+ [IMX_MU_GIER] = 0x110,
+ [IMX_MU_GCR] = 0x114,
+ [IMX_MU_TCR] = 0x120,
+ [IMX_MU_RCR] = 0x128
+ },
+};
+
+static int __init imx_mu_of_init(struct device_node *dn,
+ struct device_node *parent,
+ const struct imx_mu_dcfg *cfg)
+{
+ struct platform_device *pdev = of_find_device_by_node(dn);
+ struct device_link *pd_link_a;
+ struct device_link *pd_link_b;
+ struct imx_mu_msi *msi_data;
+ struct resource *res;
+ struct device *pd_a;
+ struct device *pd_b;
+ struct device *dev;
+ int ret;
+ int irq;
+
+ dev = &pdev->dev;
+
+ msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
+ msi_data->cfg = cfg;
+
+ msi_data->regs = devm_platform_ioremap_resource_byname(pdev, "processor-a-side");
+ if (IS_ERR(msi_data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize 'regs'\n");
+ return PTR_ERR(msi_data->regs);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "processor-b-side");
+ if (!res)
+ return -EIO;
+
+ msi_data->msiir_addr = res->start + msi_data->cfg->xTR;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, msi_data);
+
+ msi_data->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(msi_data->clk))
+ return PTR_ERR(msi_data->clk);
+
+ pd_a = dev_pm_domain_attach_by_name(dev, "processor-a-side");
+ if (IS_ERR(pd_a))
+ return PTR_ERR(pd_a);
+
+ pd_b = dev_pm_domain_attach_by_name(dev, "processor-b-side");
+ if (IS_ERR(pd_b))
+ return PTR_ERR(pd_b);
+
+ pd_link_a = device_link_add(dev, pd_a,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+
+ if (!pd_link_a) {
+ dev_err(dev, "Failed to add device_link to mu a.\n");
+ goto err_pd_a;
+ }
+
+ pd_link_b = device_link_add(dev, pd_b,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+
+
+ if (!pd_link_b) {
+ dev_err(dev, "Failed to add device_link to mu a.\n");
+ goto err_pd_b;
+ }
+
+ ret = imx_mu_msi_domains_init(msi_data, dev);
+ if (ret)
+ goto err_dm_init;
+
+ pm_runtime_enable(dev);
+
+ irq_set_chained_handler_and_data(irq,
+ imx_mu_msi_irq_handler,
+ msi_data);
+
+ return 0;
+
+err_dm_init:
+ device_link_remove(dev, pd_b);
+err_pd_b:
+ device_link_remove(dev, pd_a);
+err_pd_a:
+ return -EINVAL;
+}
+
+static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
+{
+ struct imx_mu_msi *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
+{
+ struct imx_mu_msi *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ dev_err(dev, "failed to enable clock\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_mu_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
+ imx_mu_runtime_resume, NULL)
+};
+
+static int __init imx_mu_imx7ulp_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx7ulp);
+}
+
+static int __init imx_mu_imx6sx_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx6sx);
+}
+
+static int __init imx_mu_imx8ulp_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx8ulp);
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(imx_mu_msi)
+IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_of_init)
+IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_of_init)
+IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(imx_mu_msi, .pm = &imx_mu_pm_ops)
+
+
+MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
+MODULE_DESCRIPTION("Freescale MU MSI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
index 853b3972dbe7..d8d48b1f7c29 100644
--- a/drivers/irqchip/irq-ls-extirq.c
+++ b/drivers/irqchip/irq-ls-extirq.c
@@ -6,8 +6,7 @@
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -16,13 +15,41 @@
#define LS1021A_SCFGREVCR 0x200
struct ls_extirq_data {
- struct regmap *syscon;
- u32 intpcr;
+ void __iomem *intpcr;
+ raw_spinlock_t lock;
+ bool big_endian;
bool is_ls1021a_or_ls1043a;
u32 nirq;
struct irq_fwspec map[MAXIRQ];
};
+static void ls_extirq_intpcr_rmw(struct ls_extirq_data *priv, u32 mask,
+ u32 value)
+{
+ u32 intpcr;
+
+ /*
+ * Serialize concurrent calls to ls_extirq_set_type() from multiple
+ * IRQ descriptors, making sure the read-modify-write is atomic.
+ */
+ raw_spin_lock(&priv->lock);
+
+ if (priv->big_endian)
+ intpcr = ioread32be(priv->intpcr);
+ else
+ intpcr = ioread32(priv->intpcr);
+
+ intpcr &= ~mask;
+ intpcr |= value;
+
+ if (priv->big_endian)
+ iowrite32be(intpcr, priv->intpcr);
+ else
+ iowrite32(intpcr, priv->intpcr);
+
+ raw_spin_unlock(&priv->lock);
+}
+
static int
ls_extirq_set_type(struct irq_data *data, unsigned int type)
{
@@ -51,7 +78,8 @@ ls_extirq_set_type(struct irq_data *data, unsigned int type)
default:
return -EINVAL;
}
- regmap_update_bits(priv->syscon, priv->intpcr, mask, value);
+
+ ls_extirq_intpcr_rmw(priv, mask, value);
return irq_chip_set_type_parent(data, type);
}
@@ -143,7 +171,6 @@ ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node)
static int __init
ls_extirq_of_init(struct device_node *node, struct device_node *parent)
{
-
struct irq_domain *domain, *parent_domain;
struct ls_extirq_data *priv;
int ret;
@@ -151,40 +178,52 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent)
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("Cannot find parent domain\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_irq_find_host;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->syscon = syscon_node_to_regmap(node->parent);
- if (IS_ERR(priv->syscon)) {
- ret = PTR_ERR(priv->syscon);
- pr_err("Failed to lookup parent regmap\n");
- goto out;
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_alloc_priv;
}
- ret = of_property_read_u32(node, "reg", &priv->intpcr);
- if (ret) {
- pr_err("Missing INTPCR offset value\n");
- goto out;
+
+ /*
+ * All extirq OF nodes are under a scfg/syscon node with
+ * the 'ranges' property
+ */
+ priv->intpcr = of_iomap(node, 0);
+ if (!priv->intpcr) {
+ pr_err("Cannot ioremap OF node %pOF\n", node);
+ ret = -ENOMEM;
+ goto err_iomap;
}
ret = ls_extirq_parse_map(priv, node);
if (ret)
- goto out;
+ goto err_parse_map;
+ priv->big_endian = of_device_is_big_endian(parent);
priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") ||
of_device_is_compatible(node, "fsl,ls1043a-extirq");
+ raw_spin_lock_init(&priv->lock);
domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node,
&extirq_domain_ops, priv);
- if (!domain)
+ if (!domain) {
ret = -ENOMEM;
+ goto err_add_hierarchy;
+ }
-out:
- if (ret)
- kfree(priv);
+ return 0;
+
+err_add_hierarchy:
+err_parse_map:
+ iounmap(priv->intpcr);
+err_iomap:
+ kfree(priv);
+err_alloc_priv:
+err_irq_find_host:
return ret;
}
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index b4927e425f7b..527c90e0920e 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
@@ -18,7 +19,6 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
-#include <linux/dma-iommu.h>
#define MSI_IRQS_PER_MSIR 32
#define MSI_MSIR_OFFSET 4
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
index 56bf502d9c67..2a349082af81 100644
--- a/drivers/irqchip/irq-realtek-rtl.c
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -21,11 +21,33 @@
#define RTL_ICTL_IRR2 0x10
#define RTL_ICTL_IRR3 0x14
+#define RTL_ICTL_NUM_INPUTS 32
+
#define REG(x) (realtek_ictl_base + x)
static DEFINE_RAW_SPINLOCK(irq_lock);
static void __iomem *realtek_ictl_base;
+/*
+ * IRR0-IRR3 store 4 bits per interrupt, but Realtek uses inverted numbering,
+ * placing IRQ 31 in the first four bits. A routing value of '0' means the
+ * interrupt is left disconnected. Routing values {1..15} connect to output
+ * lines {0..14}.
+ */
+#define IRR_OFFSET(idx) (4 * (3 - (idx * 4) / 32))
+#define IRR_SHIFT(idx) ((idx * 4) % 32)
+
+static void write_irr(void __iomem *irr0, int idx, u32 value)
+{
+ unsigned int offset = IRR_OFFSET(idx);
+ unsigned int shift = IRR_SHIFT(idx);
+ u32 irr;
+
+ irr = readl(irr0 + offset) & ~(0xf << shift);
+ irr |= (value & 0xf) << shift;
+ writel(irr, irr0 + offset);
+}
+
static void realtek_ictl_unmask_irq(struct irq_data *i)
{
unsigned long flags;
@@ -62,8 +84,14 @@ static struct irq_chip realtek_ictl_irq = {
static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
+ unsigned long flags;
+
irq_set_chip_and_handler(irq, &realtek_ictl_irq, handle_level_irq);
+ raw_spin_lock_irqsave(&irq_lock, flags);
+ write_irr(REG(RTL_ICTL_IRR0), hw, 1);
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+
return 0;
}
@@ -95,90 +123,50 @@ out:
chained_irq_exit(chip, desc);
}
-/*
- * SoC interrupts are cascaded to MIPS CPU interrupts according to the
- * interrupt-map in the device tree. Each SoC interrupt gets 4 bits for
- * the CPU interrupt in an Interrupt Routing Register. Max 32 SoC interrupts
- * thus go into 4 IRRs. A routing value of '0' means the interrupt is left
- * disconnected. Routing values {1..15} connect to output lines {0..14}.
- */
-static int __init map_interrupts(struct device_node *node, struct irq_domain *domain)
-{
- struct device_node *cpu_ictl;
- const __be32 *imap;
- u32 imaplen, soc_int, cpu_int, tmp, regs[4];
- int ret, i, irr_regs[] = {
- RTL_ICTL_IRR3,
- RTL_ICTL_IRR2,
- RTL_ICTL_IRR1,
- RTL_ICTL_IRR0,
- };
- u8 mips_irqs_set;
-
- ret = of_property_read_u32(node, "#address-cells", &tmp);
- if (ret || tmp)
- return -EINVAL;
-
- imap = of_get_property(node, "interrupt-map", &imaplen);
- if (!imap || imaplen % 3)
- return -EINVAL;
-
- mips_irqs_set = 0;
- memset(regs, 0, sizeof(regs));
- for (i = 0; i < imaplen; i += 3 * sizeof(u32)) {
- soc_int = be32_to_cpup(imap);
- if (soc_int > 31)
- return -EINVAL;
-
- cpu_ictl = of_find_node_by_phandle(be32_to_cpup(imap + 1));
- if (!cpu_ictl)
- return -EINVAL;
- ret = of_property_read_u32(cpu_ictl, "#interrupt-cells", &tmp);
- of_node_put(cpu_ictl);
- if (ret || tmp != 1)
- return -EINVAL;
-
- cpu_int = be32_to_cpup(imap + 2);
- if (cpu_int > 7 || cpu_int < 2)
- return -EINVAL;
-
- if (!(mips_irqs_set & BIT(cpu_int))) {
- irq_set_chained_handler_and_data(cpu_int, realtek_irq_dispatch,
- domain);
- mips_irqs_set |= BIT(cpu_int);
- }
-
- /* Use routing values (1..6) for CPU interrupts (2..7) */
- regs[(soc_int * 4) / 32] |= (cpu_int - 1) << (soc_int * 4) % 32;
- imap += 3;
- }
-
- for (i = 0; i < 4; i++)
- writel(regs[i], REG(irr_regs[i]));
-
- return 0;
-}
-
static int __init realtek_rtl_of_init(struct device_node *node, struct device_node *parent)
{
+ struct of_phandle_args oirq;
struct irq_domain *domain;
- int ret;
+ unsigned int soc_irq;
+ int parent_irq;
realtek_ictl_base = of_iomap(node, 0);
if (!realtek_ictl_base)
return -ENXIO;
- /* Disable all cascaded interrupts */
+ /* Disable all cascaded interrupts and clear routing */
writel(0, REG(RTL_ICTL_GIMR));
+ for (soc_irq = 0; soc_irq < RTL_ICTL_NUM_INPUTS; soc_irq++)
+ write_irr(REG(RTL_ICTL_IRR0), soc_irq, 0);
+
+ if (WARN_ON(!of_irq_count(node))) {
+ /*
+ * If DT contains no parent interrupts, assume MIPS CPU IRQ 2
+ * (HW0) is connected to the first output. This is the case for
+ * all known hardware anyway. "interrupt-map" is deprecated, so
+ * don't bother trying to parse that.
+ */
+ oirq.np = of_find_compatible_node(NULL, NULL, "mti,cpu-interrupt-controller");
+ oirq.args_count = 1;
+ oirq.args[0] = 2;
+
+ parent_irq = irq_create_of_mapping(&oirq);
+
+ of_node_put(oirq.np);
+ } else {
+ parent_irq = of_irq_get(node, 0);
+ }
- domain = irq_domain_add_simple(node, 32, 0,
- &irq_domain_ops, NULL);
+ if (parent_irq < 0)
+ return parent_irq;
+ else if (!parent_irq)
+ return -ENODEV;
- ret = map_interrupts(node, domain);
- if (ret) {
- pr_err("invalid interrupt map\n");
- return ret;
- }
+ domain = irq_domain_add_linear(node, RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL);
+ if (!domain)
+ return -ENOMEM;
+
+ irq_set_chained_handler_and_data(parent_irq, realtek_irq_dispatch, domain);
return 0;
}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index af17459c1a5c..e964a8dd8512 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2345,8 +2345,7 @@ HFC_init(void)
static void __exit
HFC_cleanup(void)
{
- if (timer_pending(&hfc_tl))
- del_timer_sync(&hfc_tl);
+ del_timer_sync(&hfc_tl);
pci_unregister_driver(&hfc_driver);
}
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 61fe2ab910b8..b8228ca40454 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -317,19 +317,21 @@ static void do_attach(struct i2c_adapter *adapter)
if (x.running || strncmp(adapter->name, "uni-n", 5))
return;
+ of_node_get(adapter->dev.of_node);
np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775");
if (np) {
of_node_put(np);
} else {
- strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
+ strscpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
i2c_new_scanned_device(adapter, &info, scan_ds1775, NULL);
}
+ of_node_get(adapter->dev.of_node);
np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
if (np) {
of_node_put(np);
} else {
- strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
+ strscpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
i2c_new_scanned_device(adapter, &info, scan_adm1030, NULL);
}
}
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 0d51bdf01f43..f6deac85962e 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -445,8 +445,8 @@ static int pt3_fetch_thread(void *data)
pt3_proc_dma(adap);
delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC);
- set_current_state(TASK_UNINTERRUPTIBLE);
- freezable_schedule_hrtimeout_range(&delay,
+ set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
+ schedule_hrtimeout_range(&delay,
PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
HRTIMER_MODE_REL);
}
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 60c829113299..2c64f55cf01f 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -280,22 +280,6 @@ void cxl_handle_fault(struct work_struct *fault_work)
mmput(mm);
}
-static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
-{
- struct mm_struct *mm;
-
- mm = get_mem_context(ctx);
- if (mm == NULL) {
- pr_devel("cxl_prefault_one unable to get mm %i\n",
- pid_nr(ctx->pid));
- return;
- }
-
- cxl_fault_segment(ctx, mm, ea);
-
- mmput(mm);
-}
-
static u64 next_segment(u64 ea, u64 vsid)
{
if (vsid & SLB_VSID_B_1T)
@@ -306,23 +290,16 @@ static u64 next_segment(u64 ea, u64 vsid)
return ea + 1;
}
-static void cxl_prefault_vma(struct cxl_context *ctx)
+static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm)
{
u64 ea, last_esid = 0;
struct copro_slb slb;
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
int rc;
- struct mm_struct *mm;
-
- mm = get_mem_context(ctx);
- if (mm == NULL) {
- pr_devel("cxl_prefault_vm unable to get mm %i\n",
- pid_nr(ctx->pid));
- return;
- }
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
for (ea = vma->vm_start; ea < vma->vm_end;
ea = next_segment(ea, slb.vsid)) {
rc = copro_calculate_slb(mm, ea, &slb);
@@ -337,20 +314,28 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
}
}
mmap_read_unlock(mm);
-
- mmput(mm);
}
void cxl_prefault(struct cxl_context *ctx, u64 wed)
{
+ struct mm_struct *mm = get_mem_context(ctx);
+
+ if (mm == NULL) {
+ pr_devel("cxl_prefault unable to get mm %i\n",
+ pid_nr(ctx->pid));
+ return;
+ }
+
switch (ctx->afu->prefault_mode) {
case CXL_PREFAULT_WED:
- cxl_prefault_one(ctx, wed);
+ cxl_fault_segment(ctx, mm, wed);
break;
case CXL_PREFAULT_ALL:
- cxl_prefault_vma(ctx);
+ cxl_prefault_vma(ctx, mm);
break;
default:
break;
}
+
+ mmput(mm);
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ce89611a136e..54cd009aee50 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1140,8 +1140,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
+ unsigned int arg = card->erase_arg;
- mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, card->erase_arg);
+ if (mmc_card_broken_sd_discard(card))
+ arg = SD_ERASE_ARG;
+
+ mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg);
}
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 99045e138ba4..cfdd1ff40b86 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -73,6 +73,7 @@ struct mmc_fixup {
#define EXT_CSD_REV_ANY (-1u)
#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_SANDISK_SD 0x3
#define CID_MANFID_ATP 0x9
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
@@ -258,4 +259,9 @@ static inline int mmc_card_broken_hpi(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_HPI;
}
+static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
+}
+
#endif
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index be4393988086..29b9497936df 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -100,6 +100,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_TRIM_BROKEN),
+ /*
+ * Some SD cards reports discard support while they don't
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_DISCARD),
+
END_FIXUP
};
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 6edbf5c161ab..b970699743e0 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -128,6 +128,7 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
struct clk *ref_clk = priv->clk;
unsigned int freq, diff, best_freq = 0, diff_min = ~0;
unsigned int new_clock, clkh_shift = 0;
+ unsigned int new_upper_limit;
int i;
/*
@@ -153,13 +154,20 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
* greater than, new_clock. As we can divide by 1 << i for
* any i in [0, 9] we want the input clock to be as close as
* possible, but no greater than, new_clock << i.
+ *
+ * Add an upper limit of 1/1024 rate higher to the clock rate to fix
+ * clk rate jumping to lower rate due to rounding error (eg: RZ/G2L has
+ * 3 clk sources 533.333333 MHz, 400 MHz and 266.666666 MHz. The request
+ * for 533.333333 MHz will selects a slower 400 MHz due to rounding
+ * error (533333333 Hz / 4 * 4 = 533333332 Hz < 533333333 Hz)).
*/
for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) {
freq = clk_round_rate(ref_clk, new_clock << i);
- if (freq > (new_clock << i)) {
+ new_upper_limit = (new_clock << i) + ((new_clock << i) >> 10);
+ if (freq > new_upper_limit) {
/* Too fast; look for a slightly slower option */
freq = clk_round_rate(ref_clk, (new_clock << i) / 4 * 3);
- if (freq > (new_clock << i))
+ if (freq > new_upper_limit)
continue;
}
@@ -181,6 +189,7 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
unsigned int new_clock)
{
+ unsigned int clk_margin;
u32 clk = 0, clock;
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
@@ -194,7 +203,13 @@ static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
host->mmc->actual_clock = renesas_sdhi_clk_update(host, new_clock);
clock = host->mmc->actual_clock / 512;
- for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
+ /*
+ * Add a margin of 1/1024 rate higher to the clock rate in order
+ * to avoid clk variable setting a value of 0 due to the margin
+ * provided for actual_clock in renesas_sdhi_clk_update().
+ */
+ clk_margin = new_clock >> 10;
+ for (clk = 0x80000080; new_clock + clk_margin >= (clock << 1); clk >>= 1)
clock <<= 1;
/* 1/1 clock is option */
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 46c55ab4884c..b92a408f138d 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -309,7 +309,7 @@ static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host)
static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host)
{
- return 400000;
+ return 100000;
}
static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host,
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 2d2d8260c681..413925bce0ca 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -773,7 +773,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
dev_err(dev, "failed to set clk rate to %luHz: %d\n",
host_clk, err);
- tegra_host->curr_clk_rate = host_clk;
+ tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
if (tegra_host->ddr_signaling)
host->max_clk = host_clk;
else
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 15d4a38b1351..9e63b8c43f3e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -76,6 +76,7 @@ config WIREGUARD
tristate "WireGuard secure network tunnel"
depends on NET && INET
depends on IPV6 || !IPV6
+ depends on !KMSAN # KMSAN doesn't support the crypto configs below
select NET_UDP_TUNNEL
select DST_CACHE
select CRYPTO
@@ -85,8 +86,6 @@ config WIREGUARD
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select ARM_CRYPTO if ARM
- select ARM64_CRYPTO if ARM64
select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
select CRYPTO_POLY1305_ARM if ARM
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 841da29cef93..f6c0938027ec 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -178,6 +178,8 @@ struct kvaser_usb_dev_cfg {
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
+
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
int *actual_len);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 824cab80aa02..e91648ed7386 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -477,7 +477,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
/* This method might sleep. Do not call it in the atomic context
* of URB completions.
*/
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
{
usb_kill_anchored_urbs(&priv->tx_submitted);
kvaser_usb_reset_tx_urb_contexts(priv);
@@ -729,6 +729,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
init_usb_anchor(&priv->tx_submitted);
init_completion(&priv->start_comp);
init_completion(&priv->stop_comp);
+ init_completion(&priv->flush_comp);
priv->can.ctrlmode_supported = 0;
priv->dev = dev;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 6871d474dabf..7b52fda73d82 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -1916,7 +1916,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->flush_comp);
+ reinit_completion(&priv->flush_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
priv->channel);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 07f687f29b34..50f2ac8319ff 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -310,6 +310,38 @@ struct kvaser_cmd {
} u;
} __packed;
+#define CMD_SIZE_ANY 0xff
+#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field)
+
+static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
+ /* ignored events: */
+ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+};
+
+static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ /* ignored events: */
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+};
+
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
* handling. Some discrepancies between the two families exist:
*
@@ -397,6 +429,43 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
+static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ /* buffer size >= cmd->len ensured by caller */
+ u8 min_size = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf))
+ min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id];
+ break;
+ case KVASER_USBCAN:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan))
+ min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id];
+ break;
+ }
+
+ if (min_size == CMD_SIZE_ANY)
+ return 0;
+
+ if (min_size) {
+ min_size += CMD_HEADER_LEN;
+ if (cmd->len >= min_size)
+ return 0;
+
+ dev_err_ratelimited(&dev->intf->dev,
+ "Received command %u too short (size %u, needed %u)",
+ cmd->id, cmd->len, min_size);
+ return -EIO;
+ }
+
+ dev_warn_ratelimited(&dev->intf->dev,
+ "Unhandled command (%d, size %d)\n",
+ cmd->id, cmd->len);
+ return -EINVAL;
+}
+
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct sk_buff *skb, int *cmd_len,
@@ -502,6 +571,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
end:
kfree(buf);
+ if (err == 0)
+ err = kvaser_usb_leaf_verify_size(dev, cmd);
+
return err;
}
@@ -1133,6 +1205,9 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
+ if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
+ return;
+
switch (cmd->id) {
case CMD_START_CHIP_REPLY:
kvaser_usb_leaf_start_chip_reply(dev, cmd);
@@ -1351,9 +1426,13 @@ static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
switch (mode) {
case CAN_MODE_START:
+ kvaser_usb_unlink_tx_urbs(priv);
+
err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
if (err)
return err;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index aaee7c4248e6..1744d623999d 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1169,6 +1169,11 @@ static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv,
return ret;
}
+static bool adin1110_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &adin1110_netdev_ops;
+}
+
static int adin1110_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -1177,6 +1182,9 @@ static int adin1110_netdevice_event(struct notifier_block *unused,
struct netdev_notifier_changeupper_info *info = ptr;
int ret = 0;
+ if (!adin1110_port_dev_check(dev))
+ return NOTIFY_DONE;
+
switch (event) {
case NETDEV_CHANGEUPPER:
if (netif_is_bridge_master(info->upper_dev)) {
@@ -1202,11 +1210,6 @@ static void adin1110_disconnect_phy(void *data)
phy_disconnect(data);
}
-static bool adin1110_port_dev_check(const struct net_device *dev)
-{
- return dev->netdev_ops == &adin1110_netdev_ops;
-}
-
static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv)
{
struct adin1110_priv *priv = port_priv->priv;
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 2e6c5f258a1f..0ddfb5b5d53c 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -17,8 +17,3 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
-
-# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
-ifndef KBUILD_EXTRA_WARN
-CFLAGS_tg3.o += -Wno-array-bounds
-endif
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 16b73bb9acc7..5af16e5f9ad0 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -484,7 +484,7 @@ struct bcm_rsb {
/* Number of Receive hardware descriptor words */
#define SP_NUM_HW_RX_DESC_WORDS 1024
-#define SP_LT_NUM_HW_RX_DESC_WORDS 256
+#define SP_LT_NUM_HW_RX_DESC_WORDS 512
/* Internal linked-list RAM size */
#define SP_NUM_TX_DESC 1536
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index e6416332ec79..a842e1999122 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -7,7 +7,6 @@
#include <linux/math64.h>
#include <linux/refcount.h>
#include <net/pkt_cls.h>
-#include <net/pkt_sched.h>
#include <net/tc_act/tc_gate.h>
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
index 5ba618aed6ad..4a343f853b28 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -1182,8 +1182,10 @@ static int mcs_register_interrupts(struct mcs *mcs)
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
- if (!mcs->tx_sa_active)
+ if (!mcs->tx_sa_active) {
+ ret = -ENOMEM;
goto exit;
+ }
return ret;
exit:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 64f3acd7f67b..9809f551fc2e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -133,7 +133,7 @@ static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
default:
ret = -EINVAL;
goto fail;
- };
+ }
mutex_unlock(&mbox->lock);
@@ -284,7 +284,7 @@ static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
if (!sc_req) {
- return -ENOMEM;
+ ret = -ENOMEM;
goto fail;
}
@@ -594,7 +594,7 @@ static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
if (!req) {
- return -ENOMEM;
+ ret = -ENOMEM;
goto fail;
}
@@ -1653,6 +1653,7 @@ int cn10k_mcs_init(struct otx2_nic *pfvf)
return 0;
fail:
dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
+ mutex_unlock(&mbox->lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 5803d7f9137c..892ca88e0cf4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2810,7 +2810,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_del_mcam_entries;
+ goto err_mcs_free;
}
err = otx2_wq_init(pf);
@@ -2849,6 +2849,8 @@ err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_mcs_free:
+ cn10k_mcs_free(pf);
err_del_mcam_entries:
otx2_mcam_flow_del(pf);
err_ptp_destroy:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
index 6f2b95a5263e..1da9c1bc1ee9 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
@@ -96,6 +96,8 @@ int prestera_mall_replace(struct prestera_flow_block *block,
list_for_each_entry(binding, &block->binding_list, list) {
err = prestera_span_rule_add(binding, port, block->ingress);
+ if (err == -EEXIST)
+ return err;
if (err)
goto rollback;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
index 4f65df0ae5e8..aa080dc57ff0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -498,8 +498,8 @@ prestera_nexthop_group_get(struct prestera_switch *sw,
refcount_inc(&nh_grp->refcount);
} else {
nh_grp = __prestera_nexthop_group_create(sw, key);
- if (IS_ERR(nh_grp))
- return ERR_CAST(nh_grp);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
refcount_set(&nh_grp->refcount, 1);
}
@@ -651,7 +651,7 @@ prestera_fib_node_create(struct prestera_switch *sw,
case PRESTERA_FIB_TYPE_UC_NH:
fib_node->info.nh_grp = prestera_nexthop_group_get(sw,
nh_grp_key);
- if (!fib_node->info.nh_grp)
+ if (IS_ERR(fib_node->info.nh_grp))
goto err_nh_grp_get;
grp_id = fib_node->info.nh_grp->grp_id;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
index f0e9d6ea88c5..1005182ce3bc 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -107,7 +107,7 @@ static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
entry = prestera_span_entry_find_by_id(sw->span, span_id);
if (!entry)
- return false;
+ return -ENOENT;
if (!refcount_dec_and_test(&entry->ref_count))
return 0;
@@ -151,6 +151,9 @@ int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
{
int err;
+ if (binding->span_id == PRESTERA_SPAN_INVALID_ID)
+ return -ENOENT;
+
err = prestera_hw_span_unbind(binding->port, ingress);
if (err)
return err;
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index fe66ba8793cf..45ba0970504a 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -11,8 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
-
-# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
-ifndef KBUILD_EXTRA_WARN
-CFLAGS_mtk_ppe.o += -Wno-array-bounds
-endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index a53e205f4a89..be74e1403328 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -115,6 +115,7 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
struct mlx5e_flow_meters *flow_meters;
u8 cir_man, cir_exp, cbs_man, cbs_exp;
struct mlx5_aso_wqe *aso_wqe;
+ unsigned long expires;
struct mlx5_aso *aso;
u64 rate, burst;
u8 ds_cnt;
@@ -187,7 +188,12 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl);
/* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */
- err = mlx5_aso_poll_cq(aso, true, 10);
+ expires = jiffies + msecs_to_jiffies(10);
+ do {
+ err = mlx5_aso_poll_cq(aso, true);
+ if (err)
+ usleep_range(2, 10);
+ } while (err && time_is_after_jiffies(expires));
mutex_unlock(&flow_meters->aso_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 5da746da898d..41970067917b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1405,7 +1405,7 @@ static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_mac
MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
- err = mlx5_aso_poll_cq(maso, false, 10);
+ err = mlx5_aso_poll_cq(maso, false);
mutex_unlock(&aso->aso_lock);
return err;
@@ -1430,7 +1430,7 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
- err = mlx5_aso_poll_cq(maso, false, 10);
+ err = mlx5_aso_poll_cq(maso, false);
if (err)
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 21e14507ff5c..baa8092f335e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -381,20 +381,12 @@ void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
WRITE_ONCE(doorbell_cseg, NULL);
}
-int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms)
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data)
{
struct mlx5_aso_cq *cq = &aso->cq;
struct mlx5_cqe64 *cqe;
- unsigned long expires;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
-
- expires = jiffies + msecs_to_jiffies(interval_ms);
- while (!cqe && time_is_after_jiffies(expires)) {
- usleep_range(2, 10);
- cqe = mlx5_cqwq_get_cqe(&cq->wq);
- }
-
if (!cqe)
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
index d854e01d7fc5..2d40dcf9d42e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -83,7 +83,7 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
u32 obj_id, u32 opc_mode);
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
struct mlx5_wqe_ctrl_seg *doorbell_cseg);
-int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms);
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data);
struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
void mlx5_aso_destroy(struct mlx5_aso *aso);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 3ab3e4536b99..8593cafa6368 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -373,10 +373,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (ipv6_tun) {
key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
key_size +=
- sizeof(struct nfp_flower_ipv6_udp_tun);
+ sizeof(struct nfp_flower_ipv6_gre_tun);
} else {
key_size +=
- sizeof(struct nfp_flower_ipv4_udp_tun);
+ sizeof(struct nfp_flower_ipv4_gre_tun);
}
if (enc_op.key) {
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 62deed210a95..91f10f746dff 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2896,8 +2896,8 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0), DRV_NAME);
- if (IS_ERR(hpreg_res)) {
- err = PTR_ERR(hpreg_res);
+ if (!hpreg_res) {
+ err = -EBUSY;
dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
goto err_out_clear_quattro;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 3cbe4ec46234..7f86068f3ff6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2476,7 +2476,10 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
port = am65_common_get_port(common, i);
dl_port = &port->devlink_port;
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ if (port->ndev)
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ else
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
attrs.phys.port_number = port->port_id;
attrs.switch_id.id_len = sizeof(resource_size_t);
memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 25b38a374e3c..dd5919ec408b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -1051,7 +1051,8 @@ struct net_device_context {
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
-
+ /* completion variable to confirm vf association */
+ struct completion vf_add;
/* Is the current data path through the VF NIC? */
bool data_path_is_vf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f066de0da492..9352dad58996 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1580,6 +1580,10 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+
+ if (net_device_ctx->vf_alloc)
+ complete(&net_device_ctx->vf_add);
+
netdev_info(ndev, "VF slot %u %s\n",
net_device_ctx->vf_serial,
net_device_ctx->vf_alloc ? "added" : "removed");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5f08482065ca..89eb4f179a3c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2313,6 +2313,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
}
+ /* Fallback path to check synthetic vf with
+ * help of mac addr
+ */
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
+ netdev_notice(vf_netdev,
+ "falling back to mac addr based matching\n");
+ return ndev;
+ }
+ }
+
netdev_notice(vf_netdev,
"no netdev found for vf serial:%u\n", serial);
return NULL;
@@ -2409,6 +2421,11 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
if (net_device_ctx->data_path_is_vf == vf_is_up)
return NOTIFY_OK;
+ if (vf_is_up && !net_device_ctx->vf_alloc) {
+ netdev_info(ndev, "Waiting for the VF association from host\n");
+ wait_for_completion(&net_device_ctx->vf_add);
+ }
+
ret = netvsc_switch_datapath(ndev, vf_is_up);
if (ret) {
@@ -2440,6 +2457,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netvsc_vf_setxdp(vf_netdev, NULL);
+ reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
@@ -2479,6 +2497,7 @@ static int netvsc_probe(struct hv_device *dev,
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
+ init_completion(&net_device_ctx->vf_add);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 713e3354cb2e..8f8f73099de8 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1192,7 +1192,7 @@ void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->min_mtu = 0;
+ /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3757e069c486..54a17b576eac 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1838,7 +1838,7 @@ static int ksz886x_cable_test_start(struct phy_device *phydev)
return phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100);
}
-static int ksz886x_cable_test_result_trans(u16 status, u16 mask)
+static __always_inline int ksz886x_cable_test_result_trans(u16 status, u16 mask)
{
switch (FIELD_GET(mask, status)) {
case KSZ8081_LMD_STAT_NORMAL:
@@ -1854,13 +1854,13 @@ static int ksz886x_cable_test_result_trans(u16 status, u16 mask)
}
}
-static bool ksz886x_cable_test_failed(u16 status, u16 mask)
+static __always_inline bool ksz886x_cable_test_failed(u16 status, u16 mask)
{
return FIELD_GET(mask, status) ==
KSZ8081_LMD_STAT_FAIL;
}
-static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
+static __always_inline bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
{
switch (FIELD_GET(mask, status)) {
case KSZ8081_LMD_STAT_OPEN:
@@ -1871,7 +1871,8 @@ static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
return false;
}
-static int ksz886x_cable_test_fault_length(struct phy_device *phydev, u16 status, u16 data_mask)
+static __always_inline int ksz886x_cable_test_fault_length(struct phy_device *phydev,
+ u16 status, u16 data_mask)
{
int dt;
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 29e3fa86bac3..daac293e8ede 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -257,6 +257,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFF8024_ECC_100GBASE_SR4_25GBASE_SR:
phylink_set(modes, 100000baseSR4_Full);
phylink_set(modes, 25000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
break;
case SFF8024_ECC_100GBASE_LR4_25GBASE_LR:
case SFF8024_ECC_100GBASE_ER4_25GBASE_ER:
@@ -268,6 +269,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFF8024_ECC_25GBASE_CR_S:
case SFF8024_ECC_25GBASE_CR_N:
phylink_set(modes, 25000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
break;
case SFF8024_ECC_10GBASE_T_SFI:
case SFF8024_ECC_10GBASE_T_SR:
@@ -276,6 +278,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
break;
case SFF8024_ECC_5GBASE_T:
phylink_set(modes, 5000baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, interfaces);
break;
case SFF8024_ECC_2_5GBASE_T:
phylink_set(modes, 2500baseT_Full);
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
index 73d163704068..687dec49c1e1 100644
--- a/drivers/net/pse-pd/Kconfig
+++ b/drivers/net/pse-pd/Kconfig
@@ -14,6 +14,7 @@ if PSE_CONTROLLER
config PSE_REGULATOR
tristate "Regulator based PSE controller"
+ depends on REGULATOR || COMPILE_TEST
help
This module provides support for simple regulator based Ethernet Power
Sourcing Equipment without automatic classification support. For
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e0e57083d442..7106932c6f88 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -225,6 +225,9 @@ struct virtnet_info {
/* I like... big packets and I cannot lie! */
bool big_packets;
+ /* number of sg entries allocated for big packets */
+ unsigned int big_packets_num_skbfrags;
+
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
@@ -1331,10 +1334,10 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
char *p;
int i, err, offset;
- sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
+ sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
- /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
- for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
+ /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
+ for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
first = get_a_page(rq, gfp);
if (!first) {
if (list)
@@ -1365,7 +1368,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
/* chain first in list head */
first->private = (unsigned long)list;
- err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
first, gfp);
if (err < 0)
give_pages(rq, first);
@@ -3682,13 +3685,35 @@ static int virtnet_validate(struct virtio_device *vdev)
return 0;
}
+static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
+{
+ return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO);
+}
+
+static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
+{
+ bool guest_gso = virtnet_check_guest_gso(vi);
+
+ /* If device can receive ANY guest GSO packets, regardless of mtu,
+ * allocate packets of maximum size, otherwise limit it to only
+ * mtu size worth only.
+ */
+ if (mtu > ETH_DATA_LEN || guest_gso) {
+ vi->big_packets = true;
+ vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
+ }
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err = -ENOMEM;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
- int mtu;
+ int mtu = 0;
/* Find if host supports multiqueue/rss virtio_net device */
max_queue_pairs = 1;
@@ -3776,13 +3801,6 @@ static int virtnet_probe(struct virtio_device *vdev)
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
spin_lock_init(&vi->refill_lock);
- /* If we can receive ANY GSO packets, we must allocate large ones. */
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
- vi->big_packets = true;
-
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
@@ -3848,12 +3866,10 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->mtu = mtu;
dev->max_mtu = mtu;
-
- /* TODO: size buffers correctly in this case. */
- if (dev->mtu > ETH_DATA_LEN)
- vi->big_packets = true;
}
+ virtnet_set_big_packets(vi, mtu);
+
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 84d956ad4093..2d1e3fd9b526 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -2081,7 +2081,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
struct cfg80211_chan_def def;
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
enum nl80211_band band;
- u16 *he_mcs_mask;
+ u16 he_mcs_mask[NL80211_HE_NSS_MAX];
u8 max_nss, he_mcs;
u16 he_tx_mcs = 0, v = 0;
int i, he_nss, nss_idx;
@@ -2098,7 +2098,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
return;
band = def.chan->band;
- he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+ memcpy(he_mcs_mask, arvif->bitrate_mask.control[band].he_mcs,
+ sizeof(he_mcs_mask));
if (ath11k_peer_assoc_h_he_masked(he_mcs_mask))
return;
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index cb5414265a9b..58c0ab01771b 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -83,7 +83,8 @@ static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
if (!wait || !max || likely(bytes_read) || fail_stats > 110)
break;
- msleep_interruptible(ath9k_rng_delay_get(++fail_stats));
+ if (hwrng_msleep(rng, ath9k_rng_delay_get(++fail_stats)))
+ break;
}
if (wait && !bytes_read && max)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index cc92706b3d16..cbd8053a9e35 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -384,6 +384,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
iwl_mvm_txq_from_tid(sta, tid);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
@@ -478,6 +479,7 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index df51b5b1f171..a40636c90ec3 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -4973,6 +4973,8 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
}
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
+ if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates)
+ goto out;
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
hdr = (void *)skb->data;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 4901aa02b4fb..7378c4d1e156 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -696,10 +696,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
skb_reserve(skb, q->buf_offset);
- if (q == &dev->q_rx[MT_RXQ_MCU]) {
- u32 *rxfce = (u32 *)skb->cb;
- *rxfce = info;
- }
+ *(u32 *)skb->cb = info;
__skb_put(skb, len);
done++;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index d6aae60c440d..2ce1705c0f43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -345,6 +345,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_status = *(u32 *)skb->cb;
bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
u16 hdr_gap;
int phy_idx;
@@ -394,7 +395,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
spin_unlock_bh(&dev->sta_poll_lock);
}
- if ((rxd0 & csum_mask) == csum_mask)
+ if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
@@ -610,14 +612,14 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
* When header translation failure is indicated,
* the hardware will insert an extra 2-byte field
* containing the data length after the protocol
- * type field.
+ * type field. This happens either when the LLC-SNAP
+ * pattern did not match, or if a VLAN header was
+ * detected.
*/
pad_start = 12;
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
pad_start += 4;
-
- if (get_unaligned_be16(skb->data + pad_start) !=
- skb->len - pad_start - 2)
+ else
pad_start = 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index be97dede2634..a4bcc617c1a3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -233,6 +233,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
u8 remove_pad, amsdu_info;
u8 mode = 0, qos_ctl = 0;
struct mt7915_sta *msta = NULL;
+ u32 csum_status = *(u32 *)skb->cb;
bool hdr_trans;
u16 hdr_gap;
u16 seq_ctrl = 0;
@@ -288,7 +289,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask)
+ if ((rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
@@ -446,14 +448,14 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
* When header translation failure is indicated,
* the hardware will insert an extra 2-byte field
* containing the data length after the protocol
- * type field.
+ * type field. This happens either when the LLC-SNAP
+ * pattern did not match, or if a VLAN header was
+ * detected.
*/
pad_start = 12;
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
pad_start += 4;
-
- if (get_unaligned_be16(skb->data + pad_start) !=
- skb->len - pad_start - 2)
+ else
pad_start = 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index e4868c492bc0..650ab97ae052 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -230,6 +230,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7921_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
+ u32 csum_status = *(u32 *)skb->cb;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
@@ -290,7 +291,8 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask)
+ if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index e67cc7909bce..6c054850363f 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -60,14 +60,20 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
.skb = skb,
.info = IEEE80211_SKB_CB(skb),
};
+ struct ieee80211_rate_status rs = {};
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
struct mt76_wcid *wcid;
wcid = rcu_dereference(dev->wcid[cb->wcid]);
if (wcid) {
status.sta = wcid_to_sta(wcid);
- status.rates = NULL;
- status.n_rates = 0;
+ if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) {
+ rs.rate_idx = wcid->rate;
+ status.rates = &rs;
+ status.n_rates = 1;
+ } else {
+ status.n_rates = 0;
+ }
}
hw = mt76_tx_status_get_hw(dev, skb);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index ec5219680092..85ca5b4da3cf 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -652,7 +652,7 @@ void devm_namespace_disable(struct device *dev,
struct nd_namespace_common *ndns);
#if IS_ENABLED(CONFIG_ND_CLAIM)
/* max struct page size independent of kernel config */
-#define MAX_STRUCT_PAGE_SIZE 64
+#define MAX_STRUCT_PAGE_SIZE 128
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
#else
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 0e92ab4b3283..61af072ac98f 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -787,7 +787,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*
- * Also make sure size of struct page is less than 64. We
+ * Also make sure size of struct page is less than 128. We
* want to make sure we use large enough size here so that
* we don't have a dynamic reserve space depending on
* struct page size. But we also want to make sure we notice
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 00f2f81e20fa..0ea7e441e080 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -182,6 +182,7 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
for_each_node(node)
rcu_assign_pointer(head->current_path[node], NULL);
+ kblockd_schedule_work(&head->requeue_work);
}
static bool nvme_path_is_disabled(struct nvme_ns *ns)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5b796efa325b..bcbef6bc5672 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3521,12 +3521,16 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 5ad0ab2853a4..6e079abb22ee 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -996,7 +996,7 @@ static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- cancel_work_sync(&ctrl->err_work);
+ flush_work(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->reconnect_work);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 93e2e313fa70..1eed0fc26b3a 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2181,7 +2181,7 @@ out_fail:
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
{
- cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
+ flush_work(&to_tcp_ctrl(ctrl)->err_work);
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
}
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 96f0a12e507c..c34ac33b7338 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -579,7 +579,8 @@ u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
}
EXPORT_SYMBOL(of_translate_address);
-static struct device_node *__of_get_dma_parent(const struct device_node *np)
+#ifdef CONFIG_HAS_DMA
+struct device_node *__of_get_dma_parent(const struct device_node *np)
{
struct of_phandle_args args;
int ret, index;
@@ -596,6 +597,7 @@ static struct device_node *__of_get_dma_parent(const struct device_node *np)
return of_node_get(args.np);
}
+#endif
static struct device_node *of_get_next_dma_parent(struct device_node *np)
{
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 42da760e0f45..d5a5c35eba72 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -561,7 +561,7 @@ EXPORT_SYMBOL(of_device_is_compatible);
* a NULL terminated array of strings. Returns the best match
* score or 0.
*/
-int of_device_compatible_match(struct device_node *device,
+int of_device_compatible_match(const struct device_node *device,
const char *const *compat)
{
unsigned int tmp, score = 0;
@@ -1229,7 +1229,7 @@ int of_modalias_node(struct device_node *node, char *modalias, int len)
if (!compatible || strlen(compatible) > cplen)
return -ENODEV;
p = strchr(compatible, ',');
- strlcpy(modalias, p ? p + 1 : compatible, len);
+ strscpy(modalias, p ? p + 1 : compatible, len);
return 0;
}
EXPORT_SYMBOL_GPL(of_modalias_node);
@@ -2089,12 +2089,13 @@ int of_find_last_cache_level(unsigned int cpu)
struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
while (np) {
+ of_node_put(prev);
prev = np;
- of_node_put(np);
np = of_find_next_cache_node(np);
}
of_property_read_u32(prev, "cache-level", &cache_level);
+ of_node_put(prev);
return cache_level;
}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 75b6cbffa755..8cefe5a7d04e 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -116,12 +116,19 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
{
const struct iommu_ops *iommu;
const struct bus_dma_region *map = NULL;
+ struct device_node *bus_np;
u64 dma_start = 0;
u64 mask, end, size = 0;
bool coherent;
int ret;
- ret = of_dma_get_range(np, &map);
+ if (np == dev->of_node)
+ bus_np = __of_get_dma_parent(np);
+ else
+ bus_np = of_node_get(np);
+
+ ret = of_dma_get_range(bus_np, &map);
+ of_node_put(bus_np);
if (ret < 0) {
/*
* For legacy reasons, we have to assume some devices need
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 1c573e7a60bc..7b571a631639 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -828,15 +828,6 @@ uint32_t __init of_get_flat_dt_phandle(unsigned long node)
return fdt_get_phandle(initial_boot_params, node);
}
-struct fdt_scan_status {
- const char *name;
- int namelen;
- int depth;
- int found;
- int (*iterator)(unsigned long node, const char *uname, int depth, void *data);
- void *data;
-};
-
const char * __init of_flat_dt_get_machine_name(void)
{
const char *name;
@@ -936,6 +927,8 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
if (!prop)
return;
end = of_read_number(prop, len/4);
+ if (start > end)
+ return;
__early_init_dt_declare_initrd(start, end);
phys_initrd_start = start;
@@ -1178,7 +1171,7 @@ int __init early_init_dt_scan_chosen(char *cmdline)
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
- strlcpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
+ strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
/*
* CONFIG_CMDLINE is meant to be a default in case nothing else
@@ -1190,11 +1183,11 @@ int __init early_init_dt_scan_chosen(char *cmdline)
strlcat(cmdline, " ", COMMAND_LINE_SIZE);
strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#elif defined(CONFIG_CMDLINE_FORCE)
- strlcpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#else
/* No arguments from boot loader, use kernel's cmdl*/
if (!((char *)cmdline)[0])
- strlcpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif
#endif /* CONFIG_CMDLINE */
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index d22f605fa7ee..2bac44f09554 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -592,6 +592,9 @@ void __init of_irq_init(const struct of_device_id *matches)
ret = desc->irq_init_cb(desc->dev,
desc->interrupt_parent);
if (ret) {
+ pr_err("%s: Failed to init %pOF (%p), parent %p\n",
+ __func__, desc->dev, desc->dev,
+ desc->interrupt_parent);
of_node_clear_flag(desc->dev, OF_POPULATED);
kfree(desc);
continue;
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 9324483397f6..fb6792d381a6 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -155,12 +155,17 @@ struct bus_dma_region;
#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
int of_dma_get_range(struct device_node *np,
const struct bus_dma_region **map);
+struct device_node *__of_get_dma_parent(const struct device_node *np);
#else
static inline int of_dma_get_range(struct device_node *np,
const struct bus_dma_region **map)
{
return -ENODEV;
}
+static inline struct device_node *__of_get_dma_parent(const struct device_node *np)
+{
+ return of_get_parent(np);
+}
#endif
void fdt_init_reserved_mem(void);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index f9614552db82..b89ab5d9fea5 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -2465,7 +2465,7 @@ static int unittest_i2c_bus_probe(struct platform_device *pdev)
adap = &std->adap;
i2c_set_adapdata(adap, std);
adap->nr = -1;
- strlcpy(adap->name, pdev->name, sizeof(adap->name));
+ strscpy(adap->name, pdev->name, sizeof(adap->name));
adap->class = I2C_CLASS_DEPRECATED;
adap->algo = &unittest_i2c_algo;
adap->dev.parent = dev;
@@ -3465,6 +3465,9 @@ static int __init of_unittest(void)
pr_info("start of unittest - you will see error messages\n");
+ /* Taint the kernel so we know we've run tests. */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
/* adding data for unittest */
if (IS_ENABLED(CONFIG_UML))
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 6e5debdbc55b..2616585ca5f8 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -51,6 +51,7 @@ enum imx6_pcie_variants {
IMX7D,
IMX8MQ,
IMX8MM,
+ IMX8MP,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
@@ -61,6 +62,7 @@ struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
u32 flags;
int dbi_length;
+ const char *gpr;
};
struct imx6_pcie {
@@ -150,7 +152,8 @@ struct imx6_pcie {
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MM);
+ imx6_pcie->drvdata->variant != IMX8MM &&
+ imx6_pcie->drvdata->variant != IMX8MP);
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
@@ -301,6 +304,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->drvdata->variant) {
case IMX8MM:
+ case IMX8MP:
/*
* The PHY initialization had been done in the PHY
* driver, break here directly.
@@ -558,6 +562,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
break;
case IMX8MM:
case IMX8MQ:
+ case IMX8MP:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
dev_err(dev, "unable to enable pcie_aux clock\n");
@@ -602,6 +607,7 @@ static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
break;
case IMX8MM:
case IMX8MQ:
+ case IMX8MP:
clk_disable_unprepare(imx6_pcie->pcie_aux);
break;
default:
@@ -669,6 +675,7 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
reset_control_assert(imx6_pcie->pciephy_reset);
fallthrough;
case IMX8MM:
+ case IMX8MP:
reset_control_assert(imx6_pcie->apps_reset);
break;
case IMX6SX:
@@ -744,6 +751,7 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
break;
case IMX6Q: /* Nothing to do */
case IMX8MM:
+ case IMX8MP:
break;
}
@@ -793,6 +801,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
case IMX7D:
case IMX8MQ:
case IMX8MM:
+ case IMX8MP:
reset_control_deassert(imx6_pcie->apps_reset);
break;
}
@@ -812,6 +821,7 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
case IMX7D:
case IMX8MQ:
case IMX8MM:
+ case IMX8MP:
reset_control_assert(imx6_pcie->apps_reset);
break;
}
@@ -935,7 +945,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
if (imx6_pcie->phy) {
- ret = phy_power_on(imx6_pcie->phy);
+ ret = phy_init(imx6_pcie->phy);
if (ret) {
dev_err(dev, "pcie PHY power up failed\n");
goto err_clk_disable;
@@ -949,7 +959,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
if (imx6_pcie->phy) {
- ret = phy_init(imx6_pcie->phy);
+ ret = phy_power_on(imx6_pcie->phy);
if (ret) {
dev_err(dev, "waiting for PHY ready timeout!\n");
goto err_phy_off;
@@ -961,7 +971,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
err_phy_off:
if (imx6_pcie->phy)
- phy_power_off(imx6_pcie->phy);
+ phy_exit(imx6_pcie->phy);
err_clk_disable:
imx6_pcie_clk_disable(imx6_pcie);
err_reg_disable:
@@ -1179,6 +1189,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
}
break;
case IMX8MM:
+ case IMX8MP:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
if (IS_ERR(imx6_pcie->pcie_aux))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
@@ -1216,7 +1227,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
/* Grab GPR config register range */
imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
dev_err(dev, "unable to find iomuxc registers\n");
return PTR_ERR(imx6_pcie->iomuxc_gpr);
@@ -1295,12 +1306,14 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
.dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
},
[IMX6SX] = {
.variant = IMX6SX,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1308,17 +1321,26 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
},
[IMX7D] = {
.variant = IMX7D,
.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx7d-iomuxc-gpr",
},
[IMX8MQ] = {
.variant = IMX8MQ,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
},
[IMX8MM] = {
.variant = IMX8MM,
.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ },
+ [IMX8MP] = {
+ .variant = IMX8MP,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
},
};
@@ -1329,6 +1351,7 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
+ { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 7746f94a715f..39f3b37d4033 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -267,15 +267,6 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
-
- if (pp->msi_data) {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
-
- dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE);
- if (pp->msi_page)
- __free_page(pp->msi_page);
- }
}
static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
@@ -336,6 +327,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
+ u64 *msi_vaddr;
int ret;
u32 ctrl, num_ctrls;
@@ -375,22 +367,16 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
dw_chained_msi_isr, pp);
}
- ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
- pp->msi_page = alloc_page(GFP_DMA32);
- pp->msi_data = dma_map_page(dev, pp->msi_page, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
- ret = dma_mapping_error(dev, pp->msi_data);
- if (ret) {
- dev_err(pci->dev, "Failed to map MSI data\n");
- __free_page(pp->msi_page);
- pp->msi_page = NULL;
- pp->msi_data = 0;
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to alloc and map MSI data\n");
dw_pcie_free_msi(pp);
-
- return ret;
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 09b887093a84..a871ae7eb59e 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -243,7 +243,6 @@ struct dw_pcie_rp {
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
dma_addr_t msi_data;
- struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 7f67aad71df4..d09507f822a7 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
@@ -366,12 +367,11 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
char name[32];
int ret, i;
/* This is an optional property */
- ret = of_gpio_named_count(np, "hisilicon,clken-gpios");
+ ret = gpiod_count(dev, "hisilicon,clken");
if (ret < 0)
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index ec99116ad05c..6d0d1b759ca2 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mfd/syscon.h>
@@ -26,6 +27,7 @@
#define PARF_SYS_CTRL 0x00
#define PARF_DB_CTRL 0x10
#define PARF_PM_CTRL 0x20
+#define PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PARF_MHI_BASE_ADDR_LOWER 0x178
#define PARF_MHI_BASE_ADDR_UPPER 0x17c
#define PARF_DEBUG_INT_EN 0x190
@@ -45,6 +47,11 @@
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
@@ -83,6 +90,9 @@
#define PARF_PM_CTRL_READY_ENTR_L23 BIT(2)
#define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5)
+/* PARF_MHI_CLOCK_RESET_CTRL fields */
+#define PARF_MSTR_AXI_CLK_EN BIT(1)
+
/* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */
#define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0)
@@ -95,6 +105,7 @@
/* PARF_SYS_CTRL register fields */
#define PARF_SYS_CTRL_AUX_PWR_DET BIT(4)
#define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6)
+#define PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS BIT(10)
#define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11)
/* PARF_DB_CTRL register fields */
@@ -130,21 +141,33 @@ enum qcom_pcie_ep_link_status {
QCOM_PCIE_EP_LINK_DOWN,
};
-static struct clk_bulk_data qcom_pcie_ep_clks[] = {
- { .id = "cfg" },
- { .id = "aux" },
- { .id = "bus_master" },
- { .id = "bus_slave" },
- { .id = "ref" },
- { .id = "sleep" },
- { .id = "slave_q2a" },
-};
-
+/**
+ * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
+ * @pci: Designware PCIe controller struct
+ * @parf: Qualcomm PCIe specific PARF register base
+ * @elbi: Designware PCIe specific ELBI register base
+ * @mmio: MMIO register base
+ * @perst_map: PERST regmap
+ * @mmio_res: MMIO region resource
+ * @core_reset: PCIe Endpoint core reset
+ * @reset: PERST# GPIO
+ * @wake: WAKE# GPIO
+ * @phy: PHY controller block
+ * @debugfs: PCIe Endpoint Debugfs directory
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count
+ * @perst_en: Flag for PERST enable
+ * @perst_sep_en: Flag for PERST separation enable
+ * @link_status: PCIe Link status
+ * @global_irq: Qualcomm PCIe specific Global IRQ
+ * @perst_irq: PERST# IRQ
+ */
struct qcom_pcie_ep {
struct dw_pcie pci;
void __iomem *parf;
void __iomem *elbi;
+ void __iomem *mmio;
struct regmap *perst_map;
struct resource *mmio_res;
@@ -152,6 +175,10 @@ struct qcom_pcie_ep {
struct gpio_desc *reset;
struct gpio_desc *wake;
struct phy *phy;
+ struct dentry *debugfs;
+
+ struct clk_bulk_data *clks;
+ int num_clks;
u32 perst_en;
u32 perst_sep_en;
@@ -193,8 +220,10 @@ static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep)
*/
static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
{
- regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
- regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+ if (pcie_ep->perst_map) {
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+ }
}
static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
@@ -227,8 +256,7 @@ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
{
int ret;
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ ret = clk_bulk_prepare_enable(pcie_ep->num_clks, pcie_ep->clks);
if (ret)
return ret;
@@ -249,8 +277,7 @@ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
err_phy_exit:
phy_exit(pcie_ep->phy);
err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
return ret;
}
@@ -259,8 +286,7 @@ static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
{
phy_power_off(pcie_ep->phy);
phy_exit(pcie_ep->phy);
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
}
static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
@@ -318,8 +344,14 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
val &= ~PARF_Q2A_FLUSH_EN;
writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH);
- /* Disable DBI Wakeup, core clock CGC and enable AUX power */
+ /*
+ * Disable Master AXI clock during idle. Do not allow DBI access
+ * to take the core out of L1. Disable core clock gating that
+ * gates PIPE clock from propagating to core clock. Report to the
+ * host that Vaux is present.
+ */
val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL);
+ val &= ~PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS;
val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE |
PARF_SYS_CTRL_CORE_CLK_CGC_DIS |
PARF_SYS_CTRL_AUX_PWR_DET;
@@ -375,6 +407,11 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER);
writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER);
+ /* Gate Master AXI clock to MHI bus during L1SS */
+ val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val &= ~PARF_MSTR_AXI_CLK_EN;
+ val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+
dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
/* Enable LTSSM */
@@ -437,11 +474,19 @@ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mmio");
+ if (!pcie_ep->mmio_res) {
+ dev_err(dev, "Failed to get mmio resource\n");
+ return -EINVAL;
+ }
+
+ pcie_ep->mmio = devm_pci_remap_cfg_resource(dev, pcie_ep->mmio_res);
+ if (IS_ERR(pcie_ep->mmio))
+ return PTR_ERR(pcie_ep->mmio);
syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0);
if (!syscon) {
- dev_err(dev, "Failed to parse qcom,perst-regs\n");
- return -EINVAL;
+ dev_dbg(dev, "PERST separation not available\n");
+ return 0;
}
pcie_ep->perst_map = syscon_node_to_regmap(syscon);
@@ -474,14 +519,15 @@ static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep);
if (ret) {
- dev_err(&pdev->dev, "Failed to get io resources %d\n", ret);
+ dev_err(dev, "Failed to get io resources %d\n", ret);
return ret;
}
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
- if (ret)
- return ret;
+ pcie_ep->num_clks = devm_clk_bulk_get_all(dev, &pcie_ep->clks);
+ if (pcie_ep->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return pcie_ep->num_clks;
+ }
pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core");
if (IS_ERR(pcie_ep->core_reset))
@@ -495,7 +541,7 @@ static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
if (IS_ERR(pcie_ep->wake))
return PTR_ERR(pcie_ep->wake);
- pcie_ep->phy = devm_phy_optional_get(&pdev->dev, "pciephy");
+ pcie_ep->phy = devm_phy_optional_get(dev, "pciephy");
if (IS_ERR(pcie_ep->phy))
ret = PTR_ERR(pcie_ep->phy);
@@ -571,13 +617,13 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
struct qcom_pcie_ep *pcie_ep)
{
- int irq, ret;
+ int ret;
- irq = platform_get_irq_byname(pdev, "global");
- if (irq < 0)
- return irq;
+ pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
+ if (pcie_ep->global_irq < 0)
+ return pcie_ep->global_irq;
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
qcom_pcie_ep_global_irq_thread,
IRQF_ONESHOT,
"global_irq", pcie_ep);
@@ -594,7 +640,7 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
"perst_irq", pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
- disable_irq(irq);
+ disable_irq(pcie_ep->global_irq);
return ret;
}
@@ -617,6 +663,37 @@ static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
}
+static int qcom_pcie_ep_link_transition_count(struct seq_file *s, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = (struct qcom_pcie_ep *)
+ dev_get_drvdata(s->private);
+
+ seq_printf(s, "L0s transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
+
+ seq_printf(s, "L1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
+
+ seq_printf(s, "L1.1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
+
+ seq_printf(s, "L1.2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
+
+ seq_printf(s, "L2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
+
+ return 0;
+}
+
+static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+
+ debugfs_create_devm_seqfile(pci->dev, "link_transition_count", pcie_ep->debugfs,
+ qcom_pcie_ep_link_transition_count);
+}
+
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
.core_init_notifier = true,
@@ -649,6 +726,7 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qcom_pcie_ep *pcie_ep;
+ char *name;
int ret;
pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
@@ -680,8 +758,21 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
if (ret)
goto err_disable_resources;
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_disable_irqs;
+ }
+
+ pcie_ep->debugfs = debugfs_create_dir(name, NULL);
+ qcom_pcie_ep_init_debugfs(pcie_ep);
+
return 0;
+err_disable_irqs:
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
err_disable_resources:
qcom_pcie_disable_resources(pcie_ep);
@@ -692,6 +783,11 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev)
{
struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev);
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
+ debugfs_remove_recursive(pcie_ep->debugfs);
+
if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
return 0;
@@ -702,8 +798,10 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev)
static const struct of_device_id qcom_pcie_ep_match[] = {
{ .compatible = "qcom,sdx55-pcie-ep", },
+ { .compatible = "qcom,sm8450-pcie-ep", },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
static struct platform_driver qcom_pcie_ep_driver = {
.probe = qcom_pcie_ep_probe,
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 66886dc6e777..f711acacaeaf 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -180,7 +180,7 @@ struct qcom_pcie_resources_2_3_3 {
/* 6 clocks typically, 7 for sm8250 */
struct qcom_pcie_resources_2_7_0 {
- struct clk_bulk_data clks[9];
+ struct clk_bulk_data clks[12];
int num_clks;
struct regulator_bulk_data supplies[2];
struct reset_control *pci_reset;
@@ -208,17 +208,12 @@ struct qcom_pcie_ops {
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
- void (*post_deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
int (*config_sid)(struct qcom_pcie *pcie);
};
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
- unsigned int has_tbu_clk:1;
- unsigned int has_ddrss_sf_tbu_clk:1;
- unsigned int has_aggre0_clk:1;
- unsigned int has_aggre1_clk:1;
};
struct qcom_pcie {
@@ -1175,6 +1170,7 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ unsigned int num_clks, num_opt_clks;
unsigned int idx;
int ret;
@@ -1195,18 +1191,25 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
res->clks[idx++].id = "bus_master";
res->clks[idx++].id = "bus_slave";
res->clks[idx++].id = "slave_q2a";
- if (pcie->cfg->has_tbu_clk)
- res->clks[idx++].id = "tbu";
- if (pcie->cfg->has_ddrss_sf_tbu_clk)
- res->clks[idx++].id = "ddrss_sf_tbu";
- if (pcie->cfg->has_aggre0_clk)
- res->clks[idx++].id = "aggre0";
- if (pcie->cfg->has_aggre1_clk)
- res->clks[idx++].id = "aggre1";
+ num_clks = idx;
+
+ ret = devm_clk_bulk_get(dev, num_clks, res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->clks[idx++].id = "tbu";
+ res->clks[idx++].id = "ddrss_sf_tbu";
+ res->clks[idx++].id = "aggre0";
+ res->clks[idx++].id = "aggre1";
+ res->clks[idx++].id = "noc_aggr_4";
+ res->clks[idx++].id = "noc_aggr_south_sf";
+ res->clks[idx++].id = "cnoc_qx";
+
+ num_opt_clks = idx - num_clks;
res->num_clks = idx;
- ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
+ ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
if (ret < 0)
return ret;
@@ -1509,15 +1512,13 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (pcie->cfg->ops->config_sid) {
ret = pcie->cfg->ops->config_sid(pcie);
if (ret)
- goto err;
+ goto err_assert_reset;
}
return 0;
-err:
+err_assert_reset:
qcom_ep_reset_assert(pcie);
- if (pcie->cfg->ops->post_deinit)
- pcie->cfg->ops->post_deinit(pcie);
err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
@@ -1601,68 +1602,35 @@ static const struct qcom_pcie_ops ops_2_9_0 = {
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
-static const struct qcom_pcie_cfg apq8084_cfg = {
+static const struct qcom_pcie_cfg cfg_1_0_0 = {
.ops = &ops_1_0_0,
};
-static const struct qcom_pcie_cfg ipq8064_cfg = {
+static const struct qcom_pcie_cfg cfg_1_9_0 = {
+ .ops = &ops_1_9_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_1_0 = {
.ops = &ops_2_1_0,
};
-static const struct qcom_pcie_cfg msm8996_cfg = {
+static const struct qcom_pcie_cfg cfg_2_3_2 = {
.ops = &ops_2_3_2,
};
-static const struct qcom_pcie_cfg ipq8074_cfg = {
+static const struct qcom_pcie_cfg cfg_2_3_3 = {
.ops = &ops_2_3_3,
};
-static const struct qcom_pcie_cfg ipq4019_cfg = {
+static const struct qcom_pcie_cfg cfg_2_4_0 = {
.ops = &ops_2_4_0,
};
-static const struct qcom_pcie_cfg sdm845_cfg = {
+static const struct qcom_pcie_cfg cfg_2_7_0 = {
.ops = &ops_2_7_0,
- .has_tbu_clk = true,
-};
-
-static const struct qcom_pcie_cfg sm8150_cfg = {
- /* sm8150 has qcom IP rev 1.5.0. However 1.5.0 ops are same as
- * 1.9.0, so reuse the same.
- */
- .ops = &ops_1_9_0,
-};
-
-static const struct qcom_pcie_cfg sm8250_cfg = {
- .ops = &ops_1_9_0,
- .has_tbu_clk = true,
- .has_ddrss_sf_tbu_clk = true,
-};
-
-static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
- .ops = &ops_1_9_0,
- .has_ddrss_sf_tbu_clk = true,
- .has_aggre0_clk = true,
- .has_aggre1_clk = true,
-};
-
-static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {
- .ops = &ops_1_9_0,
- .has_ddrss_sf_tbu_clk = true,
- .has_aggre1_clk = true,
-};
-
-static const struct qcom_pcie_cfg sc7280_cfg = {
- .ops = &ops_1_9_0,
- .has_tbu_clk = true,
-};
-
-static const struct qcom_pcie_cfg sc8180x_cfg = {
- .ops = &ops_1_9_0,
- .has_tbu_clk = true,
};
-static const struct qcom_pcie_cfg ipq6018_cfg = {
+static const struct qcom_pcie_cfg cfg_2_9_0 = {
.ops = &ops_2_9_0,
};
@@ -1761,22 +1729,24 @@ err_pm_runtime_put:
}
static const struct of_device_id qcom_pcie_match[] = {
- { .compatible = "qcom,pcie-apq8084", .data = &apq8084_cfg },
- { .compatible = "qcom,pcie-ipq8064", .data = &ipq8064_cfg },
- { .compatible = "qcom,pcie-ipq8064-v2", .data = &ipq8064_cfg },
- { .compatible = "qcom,pcie-apq8064", .data = &ipq8064_cfg },
- { .compatible = "qcom,pcie-msm8996", .data = &msm8996_cfg },
- { .compatible = "qcom,pcie-ipq8074", .data = &ipq8074_cfg },
- { .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg },
- { .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg },
- { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg },
- { .compatible = "qcom,pcie-sm8150", .data = &sm8150_cfg },
- { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg },
- { .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg },
- { .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
- { .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
- { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
- { .compatible = "qcom,pcie-ipq6018", .data = &ipq6018_cfg },
+ { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
+ { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
+ { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
+ { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ }
};
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 966c8b48bd96..ba36bbc5897d 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -33,6 +33,7 @@
#define PCIE_CORE_DEV_ID_REG 0x0
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_DEV_REV_REG 0x8
+#define PCIE_CORE_SSDEV_ID_REG 0x2c
#define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_PCIERR_CAP 0x100
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
@@ -1077,7 +1078,10 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+ bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
+ bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
bridge->has_pcie = true;
+ bridge->pcie_start = PCIE_CORE_PCIEXP_CAP;
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index 88980a44461d..0cfd9d5a497c 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -103,13 +103,6 @@
#define FARADAY_PCI_DMA_MEM2_BASE 0x00000000
#define FARADAY_PCI_DMA_MEM3_BASE 0x00000000
-/* Defines for PCI configuration command register */
-#define PCI_CONF_ENABLE BIT(31)
-#define PCI_CONF_WHERE(r) ((r) & 0xFC)
-#define PCI_CONF_BUS(b) (((b) & 0xFF) << 16)
-#define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11)
-#define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8)
-
/**
* struct faraday_pci_variant - encodes IP block differences
* @cascaded_irq: this host has cascaded IRQs from an interrupt controller
@@ -190,11 +183,8 @@ static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
unsigned int fn, int config, int size,
u32 *value)
{
- writel(PCI_CONF_BUS(bus_number) |
- PCI_CONF_DEVICE(PCI_SLOT(fn)) |
- PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
- PCI_CONF_WHERE(config) |
- PCI_CONF_ENABLE,
+ writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn),
+ PCI_FUNC(fn), config),
p->base + FTPCI_CONFIG);
*value = readl(p->base + FTPCI_DATA);
@@ -225,11 +215,8 @@ static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
{
int ret = PCIBIOS_SUCCESSFUL;
- writel(PCI_CONF_BUS(bus_number) |
- PCI_CONF_DEVICE(PCI_SLOT(fn)) |
- PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
- PCI_CONF_WHERE(config) |
- PCI_CONF_ENABLE,
+ writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn),
+ PCI_FUNC(fn), config),
p->base + FTPCI_CONFIG);
switch (size) {
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index af915c951f06..1ced73726a26 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -523,7 +523,7 @@ static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
/* Are the new iobase/iolimit values invalid? */
if (conf->iolimit < conf->iobase ||
- conf->iolimitupper < conf->iobaseupper)
+ le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper))
return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
&desired, &port->iowin);
@@ -535,10 +535,10 @@ static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
* is the CPU address.
*/
desired.remap = ((conf->iobase & 0xF0) << 8) |
- (conf->iobaseupper << 16);
+ (le16_to_cpu(conf->iobaseupper) << 16);
desired.base = port->pcie->io.start + desired.remap;
desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
- (conf->iolimitupper << 16)) -
+ (le16_to_cpu(conf->iolimitupper) << 16)) -
desired.remap) +
1;
@@ -552,7 +552,7 @@ static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new membase/memlimit values invalid? */
- if (conf->memlimit < conf->membase)
+ if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase))
return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
&desired, &port->memwin);
@@ -562,8 +562,8 @@ static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
* window to setup, according to the PCI-to-PCI bridge
* specifications.
*/
- desired.base = ((conf->membase & 0xFFF0) << 16);
- desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16);
+ desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) -
desired.base + 1;
return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
@@ -946,6 +946,7 @@ static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
bridge->subsystem_vendor_id = ssdev_id & 0xffff;
bridge->subsystem_id = ssdev_id >> 16;
bridge->has_pcie = true;
+ bridge->pcie_start = PCIE_CAP_PCIEXP;
bridge->data = port;
bridge->ops = &mvebu_pci_bridge_emul_ops;
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 8e323e93be91..24478ae5a345 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -415,13 +415,6 @@ static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
* address (access to which generates correct config transaction) falls in
* this 4 KiB region.
*/
-static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
- unsigned int where)
-{
- return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
- (PCI_FUNC(devfn) << 8) | (where & 0xff);
-}
-
static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn,
int where)
@@ -443,7 +436,9 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int offset;
u32 base;
- offset = tegra_pcie_conf_offset(bus->number, devfn, where);
+ offset = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where) &
+ ~PCI_CONF1_ENABLE;
/* move 4 KiB window to offset within the FPCI region */
base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index a2c3c207a04b..66f37e403a09 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -516,8 +516,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
u32 stat, idx;
int ret, i;
- reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
- GPIOD_OUT_LOW, "PERST#");
+ reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset",
+ GPIOD_OUT_LOW, "PERST#");
if (IS_ERR(reset))
return PTR_ERR(reset);
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 11cdb9b6f109..b8612ce5f4d0 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -1071,7 +1071,7 @@ static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
.remove = mtk_pcie_remove,
.driver = {
- .name = "mtk-pcie",
+ .name = "mtk-pcie-gen3",
.of_match_table = mtk_pcie_of_match,
.pm = &mtk_pcie_pm_ops,
},
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 33eb37a2225c..4bd1abf26008 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -30,6 +30,8 @@
#include <linux/reset.h>
#include <linux/sys_soc.h>
+#include "../pci.h"
+
/* MediaTek-specific configuration registers */
#define PCIE_FTS_NUM 0x70c
#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
@@ -120,19 +122,12 @@ static inline void pcie_port_write(struct mt7621_pcie_port *port,
writel_relaxed(val, port->base + reg);
}
-static inline u32 mt7621_pcie_get_cfgaddr(unsigned int bus, unsigned int slot,
- unsigned int func, unsigned int where)
-{
- return (((where & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) |
- (func << 8) | (where & 0xfc) | 0x80000000;
-}
-
static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
struct mt7621_pcie *pcie = bus->sysdata;
- u32 address = mt7621_pcie_get_cfgaddr(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
+ u32 address = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
@@ -147,7 +142,7 @@ static struct pci_ops mt7621_pcie_ops = {
static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
{
- u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg);
+ u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg);
pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
return pcie_read(pcie, RALINK_PCI_CONFIG_DATA);
@@ -156,7 +151,7 @@ static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
u32 reg, u32 val)
{
- u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg);
+ u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg);
pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 9037a7827eca..fdd2ec09651e 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -526,7 +526,7 @@ static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
!desc.pci.msi_attrib.is_virtual;
- if (!desc.pci.msi_attrib.can_mask) {
+ if (desc.pci.msi_attrib.can_mask) {
addr = pci_msix_desc_addr(&desc);
desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 4496a7c5c478..88dc66ee1c46 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -649,7 +649,7 @@ struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
if (!closest_pdevs)
return NULL;
- while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+ for_each_pci_dev(pdev) {
if (!pci_has_p2pmem(pdev))
continue;
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 9c2ca28e3ecf..9334b2dd4764 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -22,11 +22,7 @@
#define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
#define PCI_CAP_SSID_SIZEOF (PCI_SSVID_DEVICE_ID + 2)
-#define PCI_CAP_SSID_START PCI_BRIDGE_CONF_END
-#define PCI_CAP_SSID_END (PCI_CAP_SSID_START + PCI_CAP_SSID_SIZEOF)
#define PCI_CAP_PCIE_SIZEOF (PCI_EXP_SLTSTA2 + 2)
-#define PCI_CAP_PCIE_START PCI_CAP_SSID_END
-#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_CAP_PCIE_SIZEOF)
/**
* struct pci_bridge_reg_behavior - register bits behaviors
@@ -324,7 +320,7 @@ pci_bridge_emul_read_ssid(struct pci_bridge_emul *bridge, int reg, u32 *value)
switch (reg) {
case PCI_CAP_LIST_ID:
*value = PCI_CAP_ID_SSVID |
- (bridge->has_pcie ? (PCI_CAP_PCIE_START << 8) : 0);
+ ((bridge->pcie_start > bridge->ssid_start) ? (bridge->pcie_start << 8) : 0);
return PCI_BRIDGE_EMUL_HANDLED;
case PCI_SSVID_VENDOR_ID:
@@ -365,18 +361,33 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
if (!bridge->pci_regs_behavior)
return -ENOMEM;
- if (bridge->subsystem_vendor_id)
- bridge->conf.capabilities_pointer = PCI_CAP_SSID_START;
- else if (bridge->has_pcie)
- bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
- else
- bridge->conf.capabilities_pointer = 0;
+ /* If ssid_start and pcie_start were not specified then choose the lowest possible value. */
+ if (!bridge->ssid_start && !bridge->pcie_start) {
+ if (bridge->subsystem_vendor_id)
+ bridge->ssid_start = PCI_BRIDGE_CONF_END;
+ if (bridge->has_pcie)
+ bridge->pcie_start = bridge->ssid_start + PCI_CAP_SSID_SIZEOF;
+ } else if (!bridge->ssid_start && bridge->subsystem_vendor_id) {
+ if (bridge->pcie_start - PCI_BRIDGE_CONF_END >= PCI_CAP_SSID_SIZEOF)
+ bridge->ssid_start = PCI_BRIDGE_CONF_END;
+ else
+ bridge->ssid_start = bridge->pcie_start + PCI_CAP_PCIE_SIZEOF;
+ } else if (!bridge->pcie_start && bridge->has_pcie) {
+ if (bridge->ssid_start - PCI_BRIDGE_CONF_END >= PCI_CAP_PCIE_SIZEOF)
+ bridge->pcie_start = PCI_BRIDGE_CONF_END;
+ else
+ bridge->pcie_start = bridge->ssid_start + PCI_CAP_SSID_SIZEOF;
+ }
+
+ bridge->conf.capabilities_pointer = min(bridge->ssid_start, bridge->pcie_start);
if (bridge->conf.capabilities_pointer)
bridge->conf.status |= cpu_to_le16(PCI_STATUS_CAP_LIST);
if (bridge->has_pcie) {
bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
+ bridge->pcie_conf.next = (bridge->ssid_start > bridge->pcie_start) ?
+ bridge->ssid_start : 0;
bridge->pcie_conf.cap |= cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4);
bridge->pcie_cap_regs_behavior =
kmemdup(pcie_cap_regs_behavior,
@@ -459,15 +470,17 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
read_op = bridge->ops->read_base;
cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
- } else if (reg >= PCI_CAP_SSID_START && reg < PCI_CAP_SSID_END && bridge->subsystem_vendor_id) {
+ } else if (reg >= bridge->ssid_start && reg < bridge->ssid_start + PCI_CAP_SSID_SIZEOF &&
+ bridge->subsystem_vendor_id) {
/* Emulated PCI Bridge Subsystem Vendor ID capability */
- reg -= PCI_CAP_SSID_START;
+ reg -= bridge->ssid_start;
read_op = pci_bridge_emul_read_ssid;
cfgspace = NULL;
behavior = NULL;
- } else if (reg >= PCI_CAP_PCIE_START && reg < PCI_CAP_PCIE_END && bridge->has_pcie) {
+ } else if (reg >= bridge->pcie_start && reg < bridge->pcie_start + PCI_CAP_PCIE_SIZEOF &&
+ bridge->has_pcie) {
/* Our emulated PCIe capability */
- reg -= PCI_CAP_PCIE_START;
+ reg -= bridge->pcie_start;
read_op = bridge->ops->read_pcie;
cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
@@ -538,9 +551,10 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
write_op = bridge->ops->write_base;
cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
- } else if (reg >= PCI_CAP_PCIE_START && reg < PCI_CAP_PCIE_END && bridge->has_pcie) {
+ } else if (reg >= bridge->pcie_start && reg < bridge->pcie_start + PCI_CAP_PCIE_SIZEOF &&
+ bridge->has_pcie) {
/* Our emulated PCIe capability */
- reg -= PCI_CAP_PCIE_START;
+ reg -= bridge->pcie_start;
write_op = bridge->ops->write_pcie;
cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
index 71392b67471d..2a0e59c7f0d9 100644
--- a/drivers/pci/pci-bridge-emul.h
+++ b/drivers/pci/pci-bridge-emul.h
@@ -131,6 +131,8 @@ struct pci_bridge_emul {
struct pci_bridge_reg_behavior *pci_regs_behavior;
struct pci_bridge_reg_behavior *pcie_cap_regs_behavior;
void *data;
+ u8 pcie_start;
+ u8 ssid_start;
bool has_pcie;
u16 subsystem_vendor_id;
u16 subsystem_id;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 49238ddd39ee..107d77f3c846 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -774,6 +774,12 @@ static int pci_pm_suspend(struct device *dev)
pci_dev->skip_bus_pm = false;
+ /*
+ * Disabling PTM allows some systems, e.g., Intel mobile chips
+ * since Coffee Lake, to enter a lower-power PM state.
+ */
+ pci_suspend_ptm(pci_dev);
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_SUSPEND);
@@ -867,20 +873,15 @@ static int pci_pm_suspend_noirq(struct device *dev)
}
}
- if (pci_dev->skip_bus_pm) {
+ if (!pci_dev->state_saved) {
+ pci_save_state(pci_dev);
+
/*
- * Either the device is a bridge with a child in D0 below it, or
- * the function is running for the second time in a row without
- * going through full resume, which is possible only during
- * suspend-to-idle in a spurious wakeup case. The device should
- * be in D0 at this point, but if it is a bridge, it may be
- * necessary to save its state.
+ * If the device is a bridge with a child in D0 below it,
+ * it needs to stay in D0, so check skip_bus_pm to avoid
+ * putting it into a low-power state in that case.
*/
- if (!pci_dev->state_saved)
- pci_save_state(pci_dev);
- } else if (!pci_dev->state_saved) {
- pci_save_state(pci_dev);
- if (pci_power_manageable(pci_dev))
+ if (!pci_dev->skip_bus_pm && pci_power_manageable(pci_dev))
pci_prepare_to_sleep(pci_dev);
}
@@ -987,6 +988,8 @@ static int pci_pm_resume(struct device *dev)
if (pci_dev->state_saved)
pci_restore_standard_config(pci_dev);
+ pci_resume_ptm(pci_dev);
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
@@ -1274,6 +1277,8 @@ static int pci_pm_runtime_suspend(struct device *dev)
pci_power_t prev = pci_dev->current_state;
int error;
+ pci_suspend_ptm(pci_dev);
+
/*
* If pci_dev->driver is not set (unbound), we leave the device in D0,
* but it may go to D3cold when the bridge above it runtime suspends.
@@ -1335,6 +1340,7 @@ static int pci_pm_runtime_resume(struct device *dev)
* D3cold when the bridge above it runtime suspended.
*/
pci_pm_default_resume_early(pci_dev);
+ pci_resume_ptm(pci_dev);
if (!pci_dev->driver)
return 0;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index fc804e08e3cb..0a2eeb82cebd 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/aperture.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -1373,6 +1374,112 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+#define pci_dev_resource_resize_attr(n) \
+static ssize_t resource##n##_resize_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char * buf) \
+{ \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ ssize_t ret; \
+ \
+ pci_config_pm_runtime_get(pdev); \
+ \
+ ret = sysfs_emit(buf, "%016llx\n", \
+ (u64)pci_rebar_get_possible_sizes(pdev, n)); \
+ \
+ pci_config_pm_runtime_put(pdev); \
+ \
+ return ret; \
+} \
+ \
+static ssize_t resource##n##_resize_store(struct device *dev, \
+ struct device_attribute *attr,\
+ const char *buf, size_t count)\
+{ \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ unsigned long size, flags; \
+ int ret, i; \
+ u16 cmd; \
+ \
+ if (kstrtoul(buf, 0, &size) < 0) \
+ return -EINVAL; \
+ \
+ device_lock(dev); \
+ if (dev->driver) { \
+ ret = -EBUSY; \
+ goto unlock; \
+ } \
+ \
+ pci_config_pm_runtime_get(pdev); \
+ \
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \
+ ret = aperture_remove_conflicting_pci_devices(pdev, \
+ "resourceN_resize"); \
+ if (ret) \
+ goto pm_put; \
+ } \
+ \
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd); \
+ pci_write_config_word(pdev, PCI_COMMAND, \
+ cmd & ~PCI_COMMAND_MEMORY); \
+ \
+ flags = pci_resource_flags(pdev, n); \
+ \
+ pci_remove_resource_files(pdev); \
+ \
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) { \
+ if (pci_resource_len(pdev, i) && \
+ pci_resource_flags(pdev, i) == flags) \
+ pci_release_resource(pdev, i); \
+ } \
+ \
+ ret = pci_resize_resource(pdev, n, size); \
+ \
+ pci_assign_unassigned_bus_resources(pdev->bus); \
+ \
+ if (pci_create_resource_files(pdev)) \
+ pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\
+ \
+ pci_write_config_word(pdev, PCI_COMMAND, cmd); \
+pm_put: \
+ pci_config_pm_runtime_put(pdev); \
+unlock: \
+ device_unlock(dev); \
+ \
+ return ret ? ret : count; \
+} \
+static DEVICE_ATTR_RW(resource##n##_resize)
+
+pci_dev_resource_resize_attr(0);
+pci_dev_resource_resize_attr(1);
+pci_dev_resource_resize_attr(2);
+pci_dev_resource_resize_attr(3);
+pci_dev_resource_resize_attr(4);
+pci_dev_resource_resize_attr(5);
+
+static struct attribute *resource_resize_attrs[] = {
+ &dev_attr_resource0_resize.attr,
+ &dev_attr_resource1_resize.attr,
+ &dev_attr_resource2_resize.attr,
+ &dev_attr_resource3_resize.attr,
+ &dev_attr_resource4_resize.attr,
+ &dev_attr_resource5_resize.attr,
+ NULL,
+};
+
+static umode_t resource_resize_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode;
+}
+
+static const struct attribute_group pci_dev_resource_resize_group = {
+ .attrs = resource_resize_attrs,
+ .is_visible = resource_resize_is_visible,
+};
+
int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
{
if (!sysfs_initialized)
@@ -1494,6 +1601,7 @@ const struct attribute_group *pci_dev_groups[] = {
#ifdef CONFIG_ACPI
&pci_dev_acpi_attr_group,
#endif
+ &pci_dev_resource_resize_group,
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 95bc329e74c0..2127aba3550b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -66,13 +66,15 @@ struct pci_pme_device {
static void pci_dev_d3_sleep(struct pci_dev *dev)
{
- unsigned int delay = dev->d3hot_delay;
-
- if (delay < pci_pm_d3hot_delay)
- delay = pci_pm_d3hot_delay;
-
- if (delay)
- msleep(delay);
+ unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
+ unsigned int upper;
+
+ if (delay_ms) {
+ /* Use a 20% upper bound, 1ms minimum */
+ upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
+ usleep_range(delay_ms * USEC_PER_MSEC,
+ (delay_ms + upper) * USEC_PER_MSEC);
+ }
}
bool pci_reset_supported(struct pci_dev *dev)
@@ -1663,6 +1665,7 @@ int pci_save_state(struct pci_dev *dev)
return i;
pci_save_ltr_state(dev);
+ pci_save_aspm_l1ss_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1769,6 +1772,7 @@ void pci_restore_state(struct pci_dev *dev)
* LTR itself (in the PCIe capability).
*/
pci_restore_ltr_state(dev);
+ pci_restore_aspm_l1ss_state(dev);
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
@@ -2706,24 +2710,12 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- /*
- * There are systems (for example, Intel mobile chips since Coffee
- * Lake) where the power drawn while suspended can be significantly
- * reduced by disabling PTM on PCIe root ports as this allows the
- * port to enter a lower-power PM state and the SoC to reach a
- * lower-power idle state as a whole.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- pci_disable_ptm(dev);
-
pci_enable_wake(dev, target_state, wakeup);
error = pci_set_power_state(dev, target_state);
- if (error) {
+ if (error)
pci_enable_wake(dev, target_state, false);
- pci_restore_ptm_state(dev);
- }
return error;
}
@@ -2764,24 +2756,12 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- /*
- * There are systems (for example, Intel mobile chips since Coffee
- * Lake) where the power drawn while suspended can be significantly
- * reduced by disabling PTM on PCIe root ports as this allows the
- * port to enter a lower-power PM state and the SoC to reach a
- * lower-power idle state as a whole.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- pci_disable_ptm(dev);
-
__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
error = pci_set_power_state(dev, target_state);
- if (error) {
+ if (error)
pci_enable_wake(dev, target_state, false);
- pci_restore_ptm_state(dev);
- }
return error;
}
@@ -3485,6 +3465,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
pci_err(dev, "unable to allocate suspend buffer for LTR\n");
+ error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
+ 2 * sizeof(u32));
+ if (error)
+ pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
+
pci_allocate_vc_save_buffers(dev);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 785f31086313..b1ebb7ab8805 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -505,13 +505,17 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
#endif /* CONFIG_PCI_IOV */
#ifdef CONFIG_PCIE_PTM
+void pci_ptm_init(struct pci_dev *dev);
void pci_save_ptm_state(struct pci_dev *dev);
void pci_restore_ptm_state(struct pci_dev *dev);
-void pci_disable_ptm(struct pci_dev *dev);
+void pci_suspend_ptm(struct pci_dev *dev);
+void pci_resume_ptm(struct pci_dev *dev);
#else
+static inline void pci_ptm_init(struct pci_dev *dev) { }
static inline void pci_save_ptm_state(struct pci_dev *dev) { }
static inline void pci_restore_ptm_state(struct pci_dev *dev) { }
-static inline void pci_disable_ptm(struct pci_dev *dev) { }
+static inline void pci_suspend_ptm(struct pci_dev *dev) { }
+static inline void pci_resume_ptm(struct pci_dev *dev) { }
#endif
unsigned long pci_cardbus_resource_alignment(struct resource *);
@@ -561,10 +565,14 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+void pci_save_aspm_l1ss_state(struct pci_dev *dev);
+void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
+static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -575,12 +583,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
-#ifdef CONFIG_PCIE_PTM
-void pci_ptm_init(struct pci_dev *dev);
-#else
-static inline void pci_ptm_init(struct pci_dev *dev) { }
-#endif
-
struct pci_dev_reset_methods {
u16 vendor;
u16 device;
@@ -774,4 +776,49 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
}
#endif
+/*
+ * Config Address for PCI Configuration Mechanism #1
+ *
+ * See PCI Local Bus Specification, Revision 3.0,
+ * Section 3.2.2.3.2, Figure 3-2, p. 50.
+ */
+
+#define PCI_CONF1_BUS_SHIFT 16 /* Bus number */
+#define PCI_CONF1_DEV_SHIFT 11 /* Device number */
+#define PCI_CONF1_FUNC_SHIFT 8 /* Function number */
+
+#define PCI_CONF1_BUS_MASK 0xff
+#define PCI_CONF1_DEV_MASK 0x1f
+#define PCI_CONF1_FUNC_MASK 0x7
+#define PCI_CONF1_REG_MASK 0xfc /* Limit aligned offset to a maximum of 256B */
+
+#define PCI_CONF1_ENABLE BIT(31)
+#define PCI_CONF1_BUS(x) (((x) & PCI_CONF1_BUS_MASK) << PCI_CONF1_BUS_SHIFT)
+#define PCI_CONF1_DEV(x) (((x) & PCI_CONF1_DEV_MASK) << PCI_CONF1_DEV_SHIFT)
+#define PCI_CONF1_FUNC(x) (((x) & PCI_CONF1_FUNC_MASK) << PCI_CONF1_FUNC_SHIFT)
+#define PCI_CONF1_REG(x) ((x) & PCI_CONF1_REG_MASK)
+
+#define PCI_CONF1_ADDRESS(bus, dev, func, reg) \
+ (PCI_CONF1_ENABLE | \
+ PCI_CONF1_BUS(bus) | \
+ PCI_CONF1_DEV(dev) | \
+ PCI_CONF1_FUNC(func) | \
+ PCI_CONF1_REG(reg))
+
+/*
+ * Extension of PCI Config Address for accessing extended PCIe registers
+ *
+ * No standardized specification, but used on lot of non-ECAM-compliant ARM SoCs
+ * or on AMD Barcelona and new CPUs. Reserved bits [27:24] of PCI Config Address
+ * are used for specifying additional 4 high bits of PCI Express register.
+ */
+
+#define PCI_CONF1_EXT_REG_SHIFT 16
+#define PCI_CONF1_EXT_REG_MASK 0xf00
+#define PCI_CONF1_EXT_REG(x) (((x) & PCI_CONF1_EXT_REG_MASK) << PCI_CONF1_EXT_REG_SHIFT)
+
+#define PCI_CONF1_EXT_ADDRESS(bus, dev, func, reg) \
+ (PCI_CONF1_ADDRESS(bus, dev, func, reg) | \
+ PCI_CONF1_EXT_REG(reg))
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index a8aec190986c..53a1fa306e1e 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
@@ -350,29 +351,43 @@ static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
return 0;
}
+/*
+ * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1
+ * register. Ports enter L1.2 when the most recent LTR value is greater
+ * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we
+ * don't enter L1.2 too aggressively.
+ *
+ * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3.
+ */
static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
{
- u32 threshold_ns = threshold_us * 1000;
+ u64 threshold_ns = (u64) threshold_us * 1000;
- /* See PCIe r3.1, sec 7.33.3 and sec 6.18 */
- if (threshold_ns < 32) {
- *scale = 0;
+ /*
+ * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
+ * value of 0x3ff.
+ */
+ if (threshold_ns <= 0x3ff * 1) {
+ *scale = 0; /* Value times 1ns */
*value = threshold_ns;
- } else if (threshold_ns < 1024) {
- *scale = 1;
- *value = threshold_ns >> 5;
- } else if (threshold_ns < 32768) {
- *scale = 2;
- *value = threshold_ns >> 10;
- } else if (threshold_ns < 1048576) {
- *scale = 3;
- *value = threshold_ns >> 15;
- } else if (threshold_ns < 33554432) {
- *scale = 4;
- *value = threshold_ns >> 20;
+ } else if (threshold_ns <= 0x3ff * 32) {
+ *scale = 1; /* Value times 32ns */
+ *value = roundup(threshold_ns, 32) / 32;
+ } else if (threshold_ns <= 0x3ff * 1024) {
+ *scale = 2; /* Value times 1024ns */
+ *value = roundup(threshold_ns, 1024) / 1024;
+ } else if (threshold_ns <= 0x3ff * 32768) {
+ *scale = 3; /* Value times 32768ns */
+ *value = roundup(threshold_ns, 32768) / 32768;
+ } else if (threshold_ns <= 0x3ff * 1048576) {
+ *scale = 4; /* Value times 1048576ns */
+ *value = roundup(threshold_ns, 1048576) / 1048576;
+ } else if (threshold_ns <= 0x3ff * (u64) 33554432) {
+ *scale = 5; /* Value times 33554432ns */
+ *value = roundup(threshold_ns, 33554432) / 33554432;
} else {
*scale = 5;
- *value = threshold_ns >> 25;
+ *value = 0x3ff; /* Max representable value */
}
}
@@ -455,6 +470,31 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
pci_write_config_dword(pdev, pos, val);
}
+static void aspm_program_l1ss(struct pci_dev *dev, u32 ctl1, u32 ctl2)
+{
+ u16 l1ss = dev->l1ss;
+ u32 l1_2_enable;
+
+ /*
+ * Per PCIe r6.0, sec 5.5.4, T_POWER_ON in PCI_L1SS_CTL2 must be
+ * programmed prior to setting the L1.2 enable bits in PCI_L1SS_CTL1.
+ */
+ pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL2, ctl2);
+
+ /*
+ * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD in
+ * PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
+ * enable bits, even though they're all in PCI_L1SS_CTL1.
+ */
+ l1_2_enable = ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+
+ pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, ctl1);
+ if (l1_2_enable)
+ pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1,
+ ctl1 | l1_2_enable);
+}
+
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -464,7 +504,6 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
u32 ctl1 = 0, ctl2 = 0;
u32 pctl1, pctl2, cctl1, cctl2;
- u32 pl1_2_enables, cl1_2_enables;
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
@@ -513,39 +552,78 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
ctl2 == pctl2 && ctl2 == cctl2)
return;
- /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
- pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
- cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ pctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
+ pctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
+ aspm_program_l1ss(parent, pctl1, ctl2);
+
+ cctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
+ cctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
+ aspm_program_l1ss(child, cctl1, ctl2);
+}
- if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
- }
+static void aspm_l1ss_init(struct pcie_link_state *link)
+{
+ struct pci_dev *child = link->downstream, *parent = link->pdev;
+ u32 parent_l1ss_cap, child_l1ss_cap;
+ u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
- /* Program T_POWER_ON times in both ports */
- pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
- pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+ if (!parent->l1ss || !child->l1ss)
+ return;
- /* Program Common_Mode_Restore_Time in upstream device */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+ /* Setup L1 substate */
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
+ &parent_l1ss_cap);
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
+ &child_l1ss_cap);
- /* Program LTR_L1.2_THRESHOLD time in both ports */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
-
- if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
- pl1_2_enables);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
- cl1_2_enables);
- }
+ if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
+ parent_l1ss_cap = 0;
+ if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
+ child_l1ss_cap = 0;
+
+ /*
+ * If we don't have LTR for the entire path from the Root Complex
+ * to this device, we can't use ASPM L1.2 because it relies on the
+ * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
+ */
+ if (!child->ltr_path)
+ child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
+
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
+ link->aspm_support |= ASPM_STATE_L1_1;
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
+ link->aspm_support |= ASPM_STATE_L1_2;
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
+ link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
+ link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
+
+ if (parent_l1ss_cap)
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ &parent_l1ss_ctl1);
+ if (child_l1ss_cap)
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ &child_l1ss_ctl1);
+
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
+ link->aspm_enabled |= ASPM_STATE_L1_1;
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
+ link->aspm_enabled |= ASPM_STATE_L1_2;
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
+ link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
+ link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
+
+ if (link->aspm_support & ASPM_STATE_L1SS)
+ aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
}
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
@@ -553,8 +631,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
struct pci_dev *child = link->downstream, *parent = link->pdev;
u32 parent_lnkcap, child_lnkcap;
u16 parent_lnkctl, child_lnkctl;
- u32 parent_l1ss_cap, child_l1ss_cap;
- u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
struct pci_bus *linkbus = parent->subordinate;
if (blacklist) {
@@ -609,52 +685,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
link->aspm_enabled |= ASPM_STATE_L1;
- /* Setup L1 substate */
- pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
- &parent_l1ss_cap);
- pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
- &child_l1ss_cap);
-
- if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
- parent_l1ss_cap = 0;
- if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
- child_l1ss_cap = 0;
-
- /*
- * If we don't have LTR for the entire path from the Root Complex
- * to this device, we can't use ASPM L1.2 because it relies on the
- * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
- */
- if (!child->ltr_path)
- child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
-
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1;
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2;
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
-
- if (parent_l1ss_cap)
- pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- &parent_l1ss_ctl1);
- if (child_l1ss_cap)
- pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
- &child_l1ss_ctl1);
-
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1;
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2;
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
-
- if (link->aspm_support & ASPM_STATE_L1SS)
- aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
+ aspm_l1ss_init(link);
/* Save default state */
link->aspm_default = link->aspm_enabled;
@@ -726,6 +757,43 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_L1SS_CTL1_L1SS_MASK, val);
}
+void pci_save_aspm_l1ss_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u16 l1ss = dev->l1ss;
+ u32 *cap;
+
+ if (!l1ss)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = (u32 *)&save_state->cap.data[0];
+ pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL1, cap++);
+}
+
+void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u32 *cap, ctl1, ctl2;
+ u16 l1ss = dev->l1ss;
+
+ if (!l1ss)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = (u32 *)&save_state->cap.data[0];
+ ctl2 = *cap++;
+ ctl1 = *cap;
+ aspm_program_l1ss(dev, ctl1, ctl2);
+}
+
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 3e9afee02e8d..f5ffea17c7f8 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -335,11 +335,16 @@ void pci_dpc_init(struct pci_dev *pdev)
return;
pdev->dpc_rp_extensions = true;
- pdev->dpc_rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
- if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
- pci_err(pdev, "RP PIO log size %u is invalid\n",
- pdev->dpc_rp_log_size);
- pdev->dpc_rp_log_size = 0;
+
+ /* Quirks may set dpc_rp_log_size if device or firmware is buggy */
+ if (!pdev->dpc_rp_log_size) {
+ pdev->dpc_rp_log_size =
+ (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
+ if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ pci_err(pdev, "RP PIO log size %u is invalid\n",
+ pdev->dpc_rp_log_size);
+ pdev->dpc_rp_log_size = 0;
+ }
}
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 368a254e3124..b4e5f553467c 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -9,30 +9,38 @@
#include <linux/pci.h>
#include "../pci.h"
-static void pci_ptm_info(struct pci_dev *dev)
+/*
+ * If the next upstream device supports PTM, return it; otherwise return
+ * NULL. PTM Messages are local, so both link partners must support it.
+ */
+static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev)
{
- char clock_desc[8];
+ struct pci_dev *ups = pci_upstream_bridge(dev);
- switch (dev->ptm_granularity) {
- case 0:
- snprintf(clock_desc, sizeof(clock_desc), "unknown");
- break;
- case 255:
- snprintf(clock_desc, sizeof(clock_desc), ">254ns");
- break;
- default:
- snprintf(clock_desc, sizeof(clock_desc), "%uns",
- dev->ptm_granularity);
- break;
- }
- pci_info(dev, "PTM enabled%s, %s granularity\n",
- dev->ptm_root ? " (root)" : "", clock_desc);
+ /*
+ * Switch Downstream Ports are not permitted to have a PTM
+ * capability; their PTM behavior is controlled by the Upstream
+ * Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a
+ * Switch Downstream Port, look up one more level.
+ */
+ if (ups && pci_pcie_type(ups) == PCI_EXP_TYPE_DOWNSTREAM)
+ ups = pci_upstream_bridge(ups);
+
+ if (ups && ups->ptm_cap)
+ return ups;
+
+ return NULL;
}
-void pci_disable_ptm(struct pci_dev *dev)
+/*
+ * Find the PTM Capability (if present) and extract the information we need
+ * to use it.
+ */
+void pci_ptm_init(struct pci_dev *dev)
{
- int ptm;
- u16 ctrl;
+ u16 ptm;
+ u32 cap;
+ struct pci_dev *ups;
if (!pci_is_pcie(dev))
return;
@@ -41,21 +49,47 @@ void pci_disable_ptm(struct pci_dev *dev)
if (!ptm)
return;
- pci_read_config_word(dev, ptm + PCI_PTM_CTRL, &ctrl);
- ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
- pci_write_config_word(dev, ptm + PCI_PTM_CTRL, ctrl);
+ dev->ptm_cap = ptm;
+ pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32));
+
+ pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
+ dev->ptm_granularity = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
+
+ /*
+ * Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the
+ * furthest upstream Time Source as the PTM Root. For Endpoints,
+ * "the Effective Granularity is the maximum Local Clock Granularity
+ * reported by the PTM Root and all intervening PTM Time Sources."
+ */
+ ups = pci_upstream_ptm(dev);
+ if (ups) {
+ if (ups->ptm_granularity == 0)
+ dev->ptm_granularity = 0;
+ else if (ups->ptm_granularity > dev->ptm_granularity)
+ dev->ptm_granularity = ups->ptm_granularity;
+ } else if (cap & PCI_PTM_CAP_ROOT) {
+ dev->ptm_root = 1;
+ } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+
+ /*
+ * Per sec 7.9.15.3, this should be the Local Clock
+ * Granularity of the associated Time Source. But it
+ * doesn't say how to find that Time Source.
+ */
+ dev->ptm_granularity = 0;
+ }
+
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM)
+ pci_enable_ptm(dev, NULL);
}
void pci_save_ptm_state(struct pci_dev *dev)
{
- int ptm;
+ u16 ptm = dev->ptm_cap;
struct pci_cap_saved_state *save_state;
- u16 *cap;
+ u32 *cap;
- if (!pci_is_pcie(dev))
- return;
-
- ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
if (!ptm)
return;
@@ -63,146 +97,152 @@ void pci_save_ptm_state(struct pci_dev *dev)
if (!save_state)
return;
- cap = (u16 *)&save_state->cap.data[0];
- pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
+ cap = (u32 *)&save_state->cap.data[0];
+ pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, cap);
}
void pci_restore_ptm_state(struct pci_dev *dev)
{
+ u16 ptm = dev->ptm_cap;
struct pci_cap_saved_state *save_state;
- int ptm;
- u16 *cap;
+ u32 *cap;
- if (!pci_is_pcie(dev))
+ if (!ptm)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
- ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!save_state || !ptm)
+ if (!save_state)
return;
- cap = (u16 *)&save_state->cap.data[0];
- pci_write_config_word(dev, ptm + PCI_PTM_CTRL, *cap);
+ cap = (u32 *)&save_state->cap.data[0];
+ pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, *cap);
}
-void pci_ptm_init(struct pci_dev *dev)
+/* Enable PTM in the Control register if possible */
+static int __pci_enable_ptm(struct pci_dev *dev)
{
- int pos;
- u32 cap, ctrl;
- u8 local_clock;
+ u16 ptm = dev->ptm_cap;
struct pci_dev *ups;
+ u32 ctrl;
- if (!pci_is_pcie(dev))
- return;
-
- /*
- * Enable PTM only on interior devices (root ports, switch ports,
- * etc.) on the assumption that it causes no link traffic until an
- * endpoint enables it.
- */
- if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
- return;
+ if (!ptm)
+ return -EINVAL;
/*
- * Switch Downstream Ports are not permitted to have a PTM
- * capability; their PTM behavior is controlled by the Upstream
- * Port (PCIe r5.0, sec 7.9.16).
+ * A device uses local PTM Messages to request time information
+ * from a PTM Root that's farther upstream. Every device along the
+ * path must support PTM and have it enabled so it can handle the
+ * messages. Therefore, if this device is not a PTM Root, the
+ * upstream link partner must have PTM enabled before we can enable
+ * PTM.
*/
- ups = pci_upstream_bridge(dev);
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM &&
- ups && ups->ptm_enabled) {
- dev->ptm_granularity = ups->ptm_granularity;
- dev->ptm_enabled = 1;
- return;
+ if (!dev->ptm_root) {
+ ups = pci_upstream_ptm(dev);
+ if (!ups || !ups->ptm_enabled)
+ return -EINVAL;
}
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return;
-
- pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u16));
-
- pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
- local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
-
- /*
- * There's no point in enabling PTM unless it's enabled in the
- * upstream device or this device can be a PTM Root itself. Per
- * the spec recommendation (PCIe r3.1, sec 7.32.3), select the
- * furthest upstream Time Source as the PTM Root.
- */
- if (ups && ups->ptm_enabled) {
- ctrl = PCI_PTM_CTRL_ENABLE;
- if (ups->ptm_granularity == 0)
- dev->ptm_granularity = 0;
- else if (ups->ptm_granularity > local_clock)
- dev->ptm_granularity = ups->ptm_granularity;
- } else {
- if (cap & PCI_PTM_CAP_ROOT) {
- ctrl = PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT;
- dev->ptm_root = 1;
- dev->ptm_granularity = local_clock;
- } else
- return;
- }
+ pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
+ ctrl |= PCI_PTM_CTRL_ENABLE;
+ ctrl &= ~PCI_PTM_GRANULARITY_MASK;
ctrl |= dev->ptm_granularity << 8;
- pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
- dev->ptm_enabled = 1;
+ if (dev->ptm_root)
+ ctrl |= PCI_PTM_CTRL_ROOT;
- pci_ptm_info(dev);
+ pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
+ return 0;
}
+/**
+ * pci_enable_ptm() - Enable Precision Time Measurement
+ * @dev: PCI device
+ * @granularity: pointer to return granularity
+ *
+ * Enable Precision Time Measurement for @dev. If successful and
+ * @granularity is non-NULL, return the Effective Granularity.
+ *
+ * Return: zero if successful, or -EINVAL if @dev lacks a PTM Capability or
+ * is not a PTM Root and lacks an upstream path of PTM-enabled devices.
+ */
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
{
- int pos;
- u32 cap, ctrl;
- struct pci_dev *ups;
-
- if (!pci_is_pcie(dev))
- return -EINVAL;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return -EINVAL;
-
- pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
- if (!(cap & PCI_PTM_CAP_REQ))
- return -EINVAL;
-
- /*
- * For a PCIe Endpoint, PTM is only useful if the endpoint can
- * issue PTM requests to upstream devices that have PTM enabled.
- *
- * For Root Complex Integrated Endpoints, there is no upstream
- * device, so there must be some implementation-specific way to
- * associate the endpoint with a time source.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT) {
- ups = pci_upstream_bridge(dev);
- if (!ups || !ups->ptm_enabled)
- return -EINVAL;
+ int rc;
+ char clock_desc[8];
- dev->ptm_granularity = ups->ptm_granularity;
- } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
- dev->ptm_granularity = 0;
- } else
- return -EINVAL;
+ rc = __pci_enable_ptm(dev);
+ if (rc)
+ return rc;
- ctrl = PCI_PTM_CTRL_ENABLE;
- ctrl |= dev->ptm_granularity << 8;
- pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
dev->ptm_enabled = 1;
- pci_ptm_info(dev);
-
if (granularity)
*granularity = dev->ptm_granularity;
+
+ switch (dev->ptm_granularity) {
+ case 0:
+ snprintf(clock_desc, sizeof(clock_desc), "unknown");
+ break;
+ case 255:
+ snprintf(clock_desc, sizeof(clock_desc), ">254ns");
+ break;
+ default:
+ snprintf(clock_desc, sizeof(clock_desc), "%uns",
+ dev->ptm_granularity);
+ break;
+ }
+ pci_info(dev, "PTM enabled%s, %s granularity\n",
+ dev->ptm_root ? " (root)" : "", clock_desc);
+
return 0;
}
EXPORT_SYMBOL(pci_enable_ptm);
+static void __pci_disable_ptm(struct pci_dev *dev)
+{
+ u16 ptm = dev->ptm_cap;
+ u32 ctrl;
+
+ if (!ptm)
+ return;
+
+ pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
+ ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
+ pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
+}
+
+/**
+ * pci_disable_ptm() - Disable Precision Time Measurement
+ * @dev: PCI device
+ *
+ * Disable Precision Time Measurement for @dev.
+ */
+void pci_disable_ptm(struct pci_dev *dev)
+{
+ if (dev->ptm_enabled) {
+ __pci_disable_ptm(dev);
+ dev->ptm_enabled = 0;
+ }
+}
+EXPORT_SYMBOL(pci_disable_ptm);
+
+/*
+ * Disable PTM, but preserve dev->ptm_enabled so we silently re-enable it on
+ * resume if necessary.
+ */
+void pci_suspend_ptm(struct pci_dev *dev)
+{
+ if (dev->ptm_enabled)
+ __pci_disable_ptm(dev);
+}
+
+/* If PTM was enabled before suspend, re-enable it when resuming */
+void pci_resume_ptm(struct pci_dev *dev)
+{
+ if (dev->ptm_enabled)
+ __pci_enable_ptm(dev);
+}
+
bool pcie_ptm_enabled(struct pci_dev *dev)
{
if (!dev)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c5286b027f00..b66fa42c4b1f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1297,7 +1297,7 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
!is_cardbus && !broken) {
- unsigned int cmax;
+ unsigned int cmax, buses;
/*
* Bus already configured by firmware, process it in the
@@ -1322,7 +1322,8 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
child->bridge_ctl = bctl;
}
- cmax = pci_scan_child_bus(child);
+ buses = subordinate - secondary;
+ cmax = pci_scan_child_bus_extend(child, buses);
if (cmax > subordinate)
pci_warn(dev, "bridge has subordinate %02x but max busn %02x\n",
subordinate, cmax);
@@ -2920,8 +2921,8 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* hotplug bridges too much during the second scan below.
*/
used_buses++;
- if (cmax - max > 1)
- used_buses += cmax - max - 1;
+ if (max - cmax > 1)
+ used_buses += max - cmax - 1;
}
/* Scan bridges that need to be reconfigured */
@@ -2929,7 +2930,6 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
unsigned int buses = 0;
if (!hotplug_bridges && normal_bridges == 1) {
-
/*
* There is only one bridge on the bus (upstream
* port) so it gets all available buses which it
@@ -2938,7 +2938,6 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
*/
buses = available_buses;
} else if (dev->is_hotplug_bridge) {
-
/*
* Distribute the extra buses between hotplug
* bridges if any.
@@ -2957,7 +2956,7 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
/*
* Make sure a hotplug bridge has at least the minimum requested
* number of buses but allow it to grow up to the maximum available
- * bus number of there is room.
+ * bus number if there is room.
*/
if (bus->self && bus->self->is_hotplug_bridge) {
used_buses = max_t(unsigned int, available_buses,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4944798e75b5..285acc4aaccc 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5956,3 +5956,39 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b1, aspm_l1_acceptable_latency
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c0, aspm_l1_acceptable_latency);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency);
#endif
+
+#ifdef CONFIG_PCIE_DPC
+/*
+ * Intel Tiger Lake and Alder Lake BIOS has a bug that clears the DPC
+ * RP PIO Log Size of the integrated Thunderbolt PCIe Root Ports.
+ */
+static void dpc_log_size(struct pci_dev *dev)
+{
+ u16 dpc, val;
+
+ dpc = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
+ if (!dpc)
+ return;
+
+ pci_read_config_word(dev, dpc + PCI_EXP_DPC_CAP, &val);
+ if (!(val & PCI_EXP_DPC_CAP_RP_EXT))
+ return;
+
+ if (!((val & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8)) {
+ pci_info(dev, "Overriding RP PIO Log Size to 4\n");
+ dev->dpc_rp_log_size = 4;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x462f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x463f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x466e, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a23, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a25, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a27, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a29, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+#endif
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8cb68e6f6ef9..dc6a30ee6edf 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1745,119 +1745,6 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus,
}
#endif
-/*
- * First try will not touch PCI bridge res.
- * Second and later try will clear small leaf bridge res.
- * Will stop till to the max depth if can not find good one.
- */
-void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
-{
- LIST_HEAD(realloc_head);
- /* List of resources that want additional resources */
- struct list_head *add_list = NULL;
- int tried_times = 0;
- enum release_type rel_type = leaf_only;
- LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
- int pci_try_num = 1;
- enum enable_type enable_local;
-
- /* Don't realloc if asked to do so */
- enable_local = pci_realloc_detect(bus, pci_realloc_enable);
- if (pci_realloc_enabled(enable_local)) {
- int max_depth = pci_bus_get_depth(bus);
-
- pci_try_num = max_depth + 1;
- dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
- max_depth, pci_try_num);
- }
-
-again:
- /*
- * Last try will use add_list, otherwise will try good to have as must
- * have, so can realloc parent bridge resource
- */
- if (tried_times + 1 == pci_try_num)
- add_list = &realloc_head;
- /*
- * Depth first, calculate sizes and alignments of all subordinate buses.
- */
- __pci_bus_size_bridges(bus, add_list);
-
- /* Depth last, allocate resources and update the hardware. */
- __pci_bus_assign_resources(bus, add_list, &fail_head);
- if (add_list)
- BUG_ON(!list_empty(add_list));
- tried_times++;
-
- /* Any device complain? */
- if (list_empty(&fail_head))
- goto dump;
-
- if (tried_times >= pci_try_num) {
- if (enable_local == undefined)
- dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
- else if (enable_local == auto_enabled)
- dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
-
- free_list(&fail_head);
- goto dump;
- }
-
- dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
- tried_times + 1);
-
- /* Third times and later will not check if it is leaf */
- if ((tried_times + 1) > 2)
- rel_type = whole_subtree;
-
- /*
- * Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- rel_type);
-
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
-
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
-
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
- }
- }
- free_list(&fail_head);
-
- goto again;
-
-dump:
- /* Dump the resource on buses */
- pci_bus_dump_resources(bus);
-}
-
-void __init pci_assign_unassigned_resources(void)
-{
- struct pci_bus *root_bus;
-
- list_for_each_entry(root_bus, &pci_root_buses, node) {
- pci_assign_unassigned_root_bus_resources(root_bus);
-
- /* Make sure the root bridge has a companion ACPI device */
- if (ACPI_HANDLE(root_bus->bridge))
- acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
- }
-}
-
static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
struct list_head *add_list,
resource_size_t new_size)
@@ -1881,7 +1768,10 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
}
res->end = res->start + new_size - 1;
- remove_from_list(add_list, res);
+
+ /* If the resource is part of the add_list remove it now */
+ if (add_list)
+ remove_from_list(add_list, res);
}
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
@@ -2029,13 +1919,15 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
}
static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
- struct list_head *add_list)
+ struct list_head *add_list)
{
struct resource available_io, available_mmio, available_mmio_pref;
if (!bridge->is_hotplug_bridge)
return;
+ pci_dbg(bridge, "distributing available resources\n");
+
/* Take the initial extra resources from the hotplug port */
available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
@@ -2047,6 +1939,174 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
available_mmio_pref);
}
+static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
+{
+ const struct resource *r;
+
+ /*
+ * Check the child device's resources and if they are not yet
+ * assigned it means we are configuring them (not the boot
+ * firmware) so we should be able to extend the upstream
+ * bridge's (that's the hotplug downstream PCIe port) resources
+ * in the same way we do with the normal hotplug case.
+ */
+ r = &dev->resource[PCI_BRIDGE_IO_WINDOW];
+ if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN))
+ return false;
+ r = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
+ if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN))
+ return false;
+ r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN))
+ return false;
+
+ return true;
+}
+
+static void pci_root_bus_distribute_available_resources(struct pci_bus *bus,
+ struct list_head *add_list)
+{
+ struct pci_dev *dev, *bridge = bus->self;
+
+ for_each_pci_bridge(dev, bus) {
+ struct pci_bus *b;
+
+ b = dev->subordinate;
+ if (!b)
+ continue;
+
+ /*
+ * Need to check "bridge" here too because it is NULL
+ * in case of root bus.
+ */
+ if (bridge && pci_bridge_resources_not_assigned(dev)) {
+ pci_bridge_distribute_available_resources(bridge, add_list);
+ /*
+ * There is only PCIe upstream port on the bus
+ * so we don't need to go futher.
+ */
+ return;
+ }
+
+ pci_root_bus_distribute_available_resources(b, add_list);
+ }
+}
+
+/*
+ * First try will not touch PCI bridge res.
+ * Second and later try will clear small leaf bridge res.
+ * Will stop till to the max depth if can not find good one.
+ */
+void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
+{
+ LIST_HEAD(realloc_head);
+ /* List of resources that want additional resources */
+ struct list_head *add_list = NULL;
+ int tried_times = 0;
+ enum release_type rel_type = leaf_only;
+ LIST_HEAD(fail_head);
+ struct pci_dev_resource *fail_res;
+ int pci_try_num = 1;
+ enum enable_type enable_local;
+
+ /* Don't realloc if asked to do so */
+ enable_local = pci_realloc_detect(bus, pci_realloc_enable);
+ if (pci_realloc_enabled(enable_local)) {
+ int max_depth = pci_bus_get_depth(bus);
+
+ pci_try_num = max_depth + 1;
+ dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
+ max_depth, pci_try_num);
+ }
+
+again:
+ /*
+ * Last try will use add_list, otherwise will try good to have as must
+ * have, so can realloc parent bridge resource
+ */
+ if (tried_times + 1 == pci_try_num)
+ add_list = &realloc_head;
+ /*
+ * Depth first, calculate sizes and alignments of all subordinate buses.
+ */
+ __pci_bus_size_bridges(bus, add_list);
+
+ pci_root_bus_distribute_available_resources(bus, add_list);
+
+ /* Depth last, allocate resources and update the hardware. */
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
+ if (add_list)
+ BUG_ON(!list_empty(add_list));
+ tried_times++;
+
+ /* Any device complain? */
+ if (list_empty(&fail_head))
+ goto dump;
+
+ if (tried_times >= pci_try_num) {
+ if (enable_local == undefined)
+ dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ else if (enable_local == auto_enabled)
+ dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+
+ free_list(&fail_head);
+ goto dump;
+ }
+
+ dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
+ tried_times + 1);
+
+ /* Third times and later will not check if it is leaf */
+ if ((tried_times + 1) > 2)
+ rel_type = whole_subtree;
+
+ /*
+ * Try to release leaf bridge's resources that doesn't fit resource of
+ * child device under that bridge.
+ */
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & PCI_RES_TYPE_MASK,
+ rel_type);
+
+ /* Restore size and flags */
+ list_for_each_entry(fail_res, &fail_head, list) {
+ struct resource *res = fail_res->res;
+ int idx;
+
+ res->start = fail_res->start;
+ res->end = fail_res->end;
+ res->flags = fail_res->flags;
+
+ if (pci_is_bridge(fail_res->dev)) {
+ idx = res - &fail_res->dev->resource[0];
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
+ }
+ free_list(&fail_head);
+
+ goto again;
+
+dump:
+ /* Dump the resource on buses */
+ pci_bus_dump_resources(bus);
+}
+
+void __init pci_assign_unassigned_resources(void)
+{
+ struct pci_bus *root_bus;
+
+ list_for_each_entry(root_bus, &pci_root_buses, node) {
+ pci_assign_unassigned_root_bus_resources(root_bus);
+
+ /* Make sure the root bridge has a companion ACPI device */
+ if (ACPI_HANDLE(root_bus->bridge))
+ acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
+ }
+}
+
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
{
struct pci_bus *parent = bridge->subordinate;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 439ac5f5907a..b492e67c3d87 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -214,6 +214,17 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
root = pci_find_parent_resource(dev, res);
if (!root) {
+ /*
+ * If dev is behind a bridge, accesses will only reach it
+ * if res is inside the relevant bridge window.
+ */
+ if (pci_upstream_bridge(dev))
+ return -ENXIO;
+
+ /*
+ * On the root bus, assume the host bridge will forward
+ * everything.
+ */
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 689271c4245c..7378e2f3e525 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -521,24 +521,14 @@ static int pcifront_rescan_root(struct pcifront_device *pdev,
int err;
struct pci_bus *b;
-#ifndef CONFIG_PCI_DOMAINS
- if (domain != 0) {
- dev_err(&pdev->xdev->dev,
- "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
- dev_err(&pdev->xdev->dev,
- "Please compile with CONFIG_PCI_DOMAINS\n");
- return -EINVAL;
- }
-#endif
-
- dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n",
- domain, bus);
-
b = pci_find_bus(domain, bus);
if (!b)
/* If the bus is unknown, create it. */
return pcifront_scan_root(pdev, domain, bus);
+ dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n",
+ domain, bus);
+
err = pcifront_scan_bus(pdev, domain, bus, b);
/* Claim resources before going "live" with our devices */
@@ -819,76 +809,73 @@ out:
return err;
}
-static int pcifront_try_connect(struct pcifront_device *pdev)
+static void pcifront_connect(struct pcifront_device *pdev)
{
- int err = -EFAULT;
+ int err;
int i, num_roots, len;
char str[64];
unsigned int domain, bus;
-
- /* Only connect once */
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
- XenbusStateInitialised)
- goto out;
-
- err = pcifront_connect_and_init_dma(pdev);
- if (err && err != -EEXIST) {
- xenbus_dev_fatal(pdev->xdev, err,
- "Error setting up PCI Frontend");
- goto out;
- }
-
err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
"root_num", "%d", &num_roots);
if (err == -ENOENT) {
xenbus_dev_error(pdev->xdev, err,
"No PCI Roots found, trying 0000:00");
- err = pcifront_scan_root(pdev, 0, 0);
+ err = pcifront_rescan_root(pdev, 0, 0);
if (err) {
xenbus_dev_fatal(pdev->xdev, err,
"Error scanning PCI root 0000:00");
- goto out;
+ return;
}
num_roots = 0;
} else if (err != 1) {
- if (err == 0)
- err = -EINVAL;
- xenbus_dev_fatal(pdev->xdev, err,
+ xenbus_dev_fatal(pdev->xdev, err >= 0 ? -EINVAL : err,
"Error reading number of PCI roots");
- goto out;
+ return;
}
for (i = 0; i < num_roots; i++) {
len = snprintf(str, sizeof(str), "root-%d", i);
- if (unlikely(len >= (sizeof(str) - 1))) {
- err = -ENOMEM;
- goto out;
- }
+ if (unlikely(len >= (sizeof(str) - 1)))
+ return;
err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
"%x:%x", &domain, &bus);
if (err != 2) {
- if (err >= 0)
- err = -EINVAL;
- xenbus_dev_fatal(pdev->xdev, err,
+ xenbus_dev_fatal(pdev->xdev, err >= 0 ? -EINVAL : err,
"Error reading PCI root %d", i);
- goto out;
+ return;
}
- err = pcifront_scan_root(pdev, domain, bus);
+ err = pcifront_rescan_root(pdev, domain, bus);
if (err) {
xenbus_dev_fatal(pdev->xdev, err,
"Error scanning PCI root %04x:%02x",
domain, bus);
- goto out;
+ return;
}
}
- err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
+ xenbus_switch_state(pdev->xdev, XenbusStateConnected);
+}
-out:
- return err;
+static void pcifront_try_connect(struct pcifront_device *pdev)
+{
+ int err;
+
+ /* Only connect once */
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateInitialised)
+ return;
+
+ err = pcifront_connect_and_init_dma(pdev);
+ if (err && err != -EEXIST) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error setting up PCI Frontend");
+ return;
+ }
+
+ pcifront_connect(pdev);
}
static int pcifront_try_disconnect(struct pcifront_device *pdev)
@@ -914,80 +901,37 @@ out:
return err;
}
-static int pcifront_attach_devices(struct pcifront_device *pdev)
+static void pcifront_attach_devices(struct pcifront_device *pdev)
{
- int err = -EFAULT;
- int i, num_roots, len;
- unsigned int domain, bus;
- char str[64];
-
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ if (xenbus_read_driver_state(pdev->xdev->nodename) ==
XenbusStateReconfiguring)
- goto out;
-
- err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
- "root_num", "%d", &num_roots);
- if (err == -ENOENT) {
- xenbus_dev_error(pdev->xdev, err,
- "No PCI Roots found, trying 0000:00");
- err = pcifront_rescan_root(pdev, 0, 0);
- if (err) {
- xenbus_dev_fatal(pdev->xdev, err,
- "Error scanning PCI root 0000:00");
- goto out;
- }
- num_roots = 0;
- } else if (err != 1) {
- if (err == 0)
- err = -EINVAL;
- xenbus_dev_fatal(pdev->xdev, err,
- "Error reading number of PCI roots");
- goto out;
- }
-
- for (i = 0; i < num_roots; i++) {
- len = snprintf(str, sizeof(str), "root-%d", i);
- if (unlikely(len >= (sizeof(str) - 1))) {
- err = -ENOMEM;
- goto out;
- }
-
- err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
- "%x:%x", &domain, &bus);
- if (err != 2) {
- if (err >= 0)
- err = -EINVAL;
- xenbus_dev_fatal(pdev->xdev, err,
- "Error reading PCI root %d", i);
- goto out;
- }
-
- err = pcifront_rescan_root(pdev, domain, bus);
- if (err) {
- xenbus_dev_fatal(pdev->xdev, err,
- "Error scanning PCI root %04x:%02x",
- domain, bus);
- goto out;
- }
- }
-
- xenbus_switch_state(pdev->xdev, XenbusStateConnected);
-
-out:
- return err;
+ pcifront_connect(pdev);
}
static int pcifront_detach_devices(struct pcifront_device *pdev)
{
int err = 0;
int i, num_devs;
+ enum xenbus_state state;
unsigned int domain, bus, slot, func;
struct pci_dev *pci_dev;
char str[64];
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
- XenbusStateConnected)
+ state = xenbus_read_driver_state(pdev->xdev->nodename);
+ if (state == XenbusStateInitialised) {
+ dev_dbg(&pdev->xdev->dev, "Handle skipped connect.\n");
+ /* We missed Connected and need to initialize. */
+ err = pcifront_connect_and_init_dma(pdev);
+ if (err && err != -EEXIST) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error setting up PCI Frontend");
+ goto out;
+ }
+
+ goto out_switch_state;
+ } else if (state != XenbusStateConnected) {
goto out;
+ }
err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d",
&num_devs);
@@ -1048,6 +992,7 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
domain, bus, slot, func);
}
+ out_switch_state:
err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring);
out:
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 6ce05ef4844d..00e3a637f7b6 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -44,7 +44,9 @@
* This allows us to perform the check, i.e, perfmon_capable(),
* in the context of the event owner, once, during the event_init().
*/
-#define SPE_PMU_HW_FLAGS_CX BIT(0)
+#define SPE_PMU_HW_FLAGS_CX 0x00001
+
+static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_CX);
static void set_spe_event_has_cx(struct perf_event *event)
{
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index ad7d2edfc414..c93286483b42 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -59,7 +59,7 @@ struct imx8_pcie_phy {
bool clkreq_unused;
};
-static int imx8_pcie_phy_init(struct phy *phy)
+static int imx8_pcie_phy_power_on(struct phy *phy)
{
int ret;
u32 val, pad_mode;
@@ -137,14 +137,14 @@ static int imx8_pcie_phy_init(struct phy *phy)
return ret;
}
-static int imx8_pcie_phy_power_on(struct phy *phy)
+static int imx8_pcie_phy_init(struct phy *phy)
{
struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
return clk_prepare_enable(imx8_phy->clk);
}
-static int imx8_pcie_phy_power_off(struct phy *phy)
+static int imx8_pcie_phy_exit(struct phy *phy)
{
struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
@@ -155,8 +155,8 @@ static int imx8_pcie_phy_power_off(struct phy *phy)
static const struct phy_ops imx8_pcie_phy_ops = {
.init = imx8_pcie_phy_init,
+ .exit = imx8_pcie_phy_exit,
.power_on = imx8_pcie_phy_power_on,
- .power_off = imx8_pcie_phy_power_off,
.owner = THIS_MODULE,
};
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index d768dcf75cf1..f71fefff400f 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -135,6 +135,20 @@ config PINCTRL_BM1880
help
Pinctrl driver for Bitmain BM1880 SoC.
+config PINCTRL_CY8C95X0
+ tristate "Cypress CY8C95X0 I2C pinctrl and GPIO driver"
+ depends on I2C
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select REGMAP_I2C
+ help
+ Support for 20/40/60 pin Cypress Cy8C95x0 pinctrl/gpio I2C expander.
+ This driver can also be built as a module. If so, the module will be
+ called pinctrl-cy8c95x0.
+
config PINCTRL_DA850_PUPD
tristate "TI DA850/OMAP-L138/AM18XX pull-up and pull-down groups"
depends on OF && (ARCH_DAVINCI_DA850 || COMPILE_TEST)
@@ -324,6 +338,11 @@ config PINCTRL_OCELOT
select GENERIC_PINMUX_FUNCTIONS
select OF_GPIO
select REGMAP_MMIO
+ help
+ Support for the internal GPIO interfaces on Microsemi Ocelot and
+ Jaguar2 SoCs.
+
+ If conpiled as a module, the module name will be pinctrl-ocelot.
config PINCTRL_OXNAS
bool
@@ -415,23 +434,6 @@ config PINCTRL_ST
select PINCONF
select GPIOLIB_IRQCHIP
-config PINCTRL_STARFIVE
- tristate "Pinctrl and GPIO driver for the StarFive JH7100 SoC"
- depends on SOC_STARFIVE || COMPILE_TEST
- depends on OF
- default SOC_STARFIVE
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select GENERIC_PINCONF
- select GPIOLIB
- select GPIOLIB_IRQCHIP
- select OF_GPIO
- help
- Say yes here to support pin control on the StarFive JH7100 SoC.
- This also provides an interface to the GPIO pins not used by other
- peripherals supporting inputs, outputs, configuring pull-up/pull-down
- and interrupts on input changes.
-
config PINCTRL_STMFX
tristate "STMicroelectronics STMFX GPIO expander pinctrl driver"
depends on I2C
@@ -529,6 +531,7 @@ source "drivers/pinctrl/renesas/Kconfig"
source "drivers/pinctrl/samsung/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
source "drivers/pinctrl/sprd/Kconfig"
+source "drivers/pinctrl/starfive/Kconfig"
source "drivers/pinctrl/stm32/Kconfig"
source "drivers/pinctrl/sunplus/Kconfig"
source "drivers/pinctrl/sunxi/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index e76f5cdc64b0..89bfa01b5231 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_BM1880) += pinctrl-bm1880.o
+obj-$(CONFIG_PINCTRL_CY8C95X0) += pinctrl-cy8c95x0.o
obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
@@ -43,7 +44,6 @@ obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
-obj-$(CONFIG_PINCTRL_STARFIVE) += pinctrl-starfive.o
obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
@@ -70,6 +70,7 @@ obj-$(CONFIG_PINCTRL_RENESAS) += renesas/
obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
obj-$(CONFIG_PINCTRL_SPEAR) += spear/
obj-y += sprd/
+obj-$(CONFIG_SOC_STARFIVE) += starfive/
obj-$(CONFIG_PINCTRL_STM32) += stm32/
obj-y += sunplus/
obj-$(CONFIG_PINCTRL_SUNXI) += sunxi/
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 83d47ff1cea8..a30912a92f05 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -92,19 +92,10 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr)
{
- int ret;
-
pr_debug("Disabling signal %s for %s\n", expr->signal,
expr->function);
- ret = aspeed_sig_expr_eval(ctx, expr, true);
- if (ret < 0)
- return ret;
-
- if (ret)
- return aspeed_sig_expr_set(ctx, expr, false);
-
- return 0;
+ return aspeed_sig_expr_set(ctx, expr, false);
}
/**
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6318.c b/drivers/pinctrl/bcm/pinctrl-bcm6318.c
index 9311220fb6cb..64073546310e 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6318.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6318.c
@@ -27,12 +27,6 @@
#define BCM6318_PAD_REG 0x54
#define BCM6328_PAD_MASK GENMASK(3, 0)
-struct bcm6318_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
-};
-
struct bcm6318_function {
const char *name;
const char * const *groups;
@@ -146,64 +140,57 @@ static unsigned gpio47_pins[] = { 47 };
static unsigned gpio48_pins[] = { 48 };
static unsigned gpio49_pins[] = { 49 };
-#define BCM6318_GROUP(n) \
- { \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
- }
-
-static struct bcm6318_pingroup bcm6318_groups[] = {
- BCM6318_GROUP(gpio0),
- BCM6318_GROUP(gpio1),
- BCM6318_GROUP(gpio2),
- BCM6318_GROUP(gpio3),
- BCM6318_GROUP(gpio4),
- BCM6318_GROUP(gpio5),
- BCM6318_GROUP(gpio6),
- BCM6318_GROUP(gpio7),
- BCM6318_GROUP(gpio8),
- BCM6318_GROUP(gpio9),
- BCM6318_GROUP(gpio10),
- BCM6318_GROUP(gpio11),
- BCM6318_GROUP(gpio12),
- BCM6318_GROUP(gpio13),
- BCM6318_GROUP(gpio14),
- BCM6318_GROUP(gpio15),
- BCM6318_GROUP(gpio16),
- BCM6318_GROUP(gpio17),
- BCM6318_GROUP(gpio18),
- BCM6318_GROUP(gpio19),
- BCM6318_GROUP(gpio20),
- BCM6318_GROUP(gpio21),
- BCM6318_GROUP(gpio22),
- BCM6318_GROUP(gpio23),
- BCM6318_GROUP(gpio24),
- BCM6318_GROUP(gpio25),
- BCM6318_GROUP(gpio26),
- BCM6318_GROUP(gpio27),
- BCM6318_GROUP(gpio28),
- BCM6318_GROUP(gpio29),
- BCM6318_GROUP(gpio30),
- BCM6318_GROUP(gpio31),
- BCM6318_GROUP(gpio32),
- BCM6318_GROUP(gpio33),
- BCM6318_GROUP(gpio34),
- BCM6318_GROUP(gpio35),
- BCM6318_GROUP(gpio36),
- BCM6318_GROUP(gpio37),
- BCM6318_GROUP(gpio38),
- BCM6318_GROUP(gpio39),
- BCM6318_GROUP(gpio40),
- BCM6318_GROUP(gpio41),
- BCM6318_GROUP(gpio42),
- BCM6318_GROUP(gpio43),
- BCM6318_GROUP(gpio44),
- BCM6318_GROUP(gpio45),
- BCM6318_GROUP(gpio46),
- BCM6318_GROUP(gpio47),
- BCM6318_GROUP(gpio48),
- BCM6318_GROUP(gpio49),
+static struct pingroup bcm6318_groups[] = {
+ BCM_PIN_GROUP(gpio0),
+ BCM_PIN_GROUP(gpio1),
+ BCM_PIN_GROUP(gpio2),
+ BCM_PIN_GROUP(gpio3),
+ BCM_PIN_GROUP(gpio4),
+ BCM_PIN_GROUP(gpio5),
+ BCM_PIN_GROUP(gpio6),
+ BCM_PIN_GROUP(gpio7),
+ BCM_PIN_GROUP(gpio8),
+ BCM_PIN_GROUP(gpio9),
+ BCM_PIN_GROUP(gpio10),
+ BCM_PIN_GROUP(gpio11),
+ BCM_PIN_GROUP(gpio12),
+ BCM_PIN_GROUP(gpio13),
+ BCM_PIN_GROUP(gpio14),
+ BCM_PIN_GROUP(gpio15),
+ BCM_PIN_GROUP(gpio16),
+ BCM_PIN_GROUP(gpio17),
+ BCM_PIN_GROUP(gpio18),
+ BCM_PIN_GROUP(gpio19),
+ BCM_PIN_GROUP(gpio20),
+ BCM_PIN_GROUP(gpio21),
+ BCM_PIN_GROUP(gpio22),
+ BCM_PIN_GROUP(gpio23),
+ BCM_PIN_GROUP(gpio24),
+ BCM_PIN_GROUP(gpio25),
+ BCM_PIN_GROUP(gpio26),
+ BCM_PIN_GROUP(gpio27),
+ BCM_PIN_GROUP(gpio28),
+ BCM_PIN_GROUP(gpio29),
+ BCM_PIN_GROUP(gpio30),
+ BCM_PIN_GROUP(gpio31),
+ BCM_PIN_GROUP(gpio32),
+ BCM_PIN_GROUP(gpio33),
+ BCM_PIN_GROUP(gpio34),
+ BCM_PIN_GROUP(gpio35),
+ BCM_PIN_GROUP(gpio36),
+ BCM_PIN_GROUP(gpio37),
+ BCM_PIN_GROUP(gpio38),
+ BCM_PIN_GROUP(gpio39),
+ BCM_PIN_GROUP(gpio40),
+ BCM_PIN_GROUP(gpio41),
+ BCM_PIN_GROUP(gpio42),
+ BCM_PIN_GROUP(gpio43),
+ BCM_PIN_GROUP(gpio44),
+ BCM_PIN_GROUP(gpio45),
+ BCM_PIN_GROUP(gpio46),
+ BCM_PIN_GROUP(gpio47),
+ BCM_PIN_GROUP(gpio48),
+ BCM_PIN_GROUP(gpio49),
};
/* GPIO_MODE */
@@ -368,10 +355,10 @@ static const char *bcm6318_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
static int bcm6318_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group, const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
*pins = bcm6318_groups[group].pins;
- *num_pins = bcm6318_groups[group].num_pins;
+ *npins = bcm6318_groups[group].npins;
return 0;
}
@@ -424,7 +411,7 @@ static int bcm6318_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned selector, unsigned group)
{
struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- const struct bcm6318_pingroup *pg = &bcm6318_groups[group];
+ const struct pingroup *pg = &bcm6318_groups[group];
const struct bcm6318_function *f = &bcm6318_funcs[selector];
bcm6318_rmw_mux(pc, pg->pins[0], f->mode_val, f->mux_val);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63268.c b/drivers/pinctrl/bcm/pinctrl-bcm63268.c
index 1c1060a39597..80c2fc55ffa2 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm63268.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63268.c
@@ -40,12 +40,6 @@ enum bcm63268_pinctrl_reg {
BCM63268_BASEMODE,
};
-struct bcm63268_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
-};
-
struct bcm63268_function {
const char *name;
const char * const *groups;
@@ -185,74 +179,67 @@ static unsigned vdsl_phy1_grp_pins[] = { 12, 13 };
static unsigned vdsl_phy2_grp_pins[] = { 24, 25 };
static unsigned vdsl_phy3_grp_pins[] = { 26, 27 };
-#define BCM63268_GROUP(n) \
- { \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
- }
-
-static struct bcm63268_pingroup bcm63268_groups[] = {
- BCM63268_GROUP(gpio0),
- BCM63268_GROUP(gpio1),
- BCM63268_GROUP(gpio2),
- BCM63268_GROUP(gpio3),
- BCM63268_GROUP(gpio4),
- BCM63268_GROUP(gpio5),
- BCM63268_GROUP(gpio6),
- BCM63268_GROUP(gpio7),
- BCM63268_GROUP(gpio8),
- BCM63268_GROUP(gpio9),
- BCM63268_GROUP(gpio10),
- BCM63268_GROUP(gpio11),
- BCM63268_GROUP(gpio12),
- BCM63268_GROUP(gpio13),
- BCM63268_GROUP(gpio14),
- BCM63268_GROUP(gpio15),
- BCM63268_GROUP(gpio16),
- BCM63268_GROUP(gpio17),
- BCM63268_GROUP(gpio18),
- BCM63268_GROUP(gpio19),
- BCM63268_GROUP(gpio20),
- BCM63268_GROUP(gpio21),
- BCM63268_GROUP(gpio22),
- BCM63268_GROUP(gpio23),
- BCM63268_GROUP(gpio24),
- BCM63268_GROUP(gpio25),
- BCM63268_GROUP(gpio26),
- BCM63268_GROUP(gpio27),
- BCM63268_GROUP(gpio28),
- BCM63268_GROUP(gpio29),
- BCM63268_GROUP(gpio30),
- BCM63268_GROUP(gpio31),
- BCM63268_GROUP(gpio32),
- BCM63268_GROUP(gpio33),
- BCM63268_GROUP(gpio34),
- BCM63268_GROUP(gpio35),
- BCM63268_GROUP(gpio36),
- BCM63268_GROUP(gpio37),
- BCM63268_GROUP(gpio38),
- BCM63268_GROUP(gpio39),
- BCM63268_GROUP(gpio40),
- BCM63268_GROUP(gpio41),
- BCM63268_GROUP(gpio42),
- BCM63268_GROUP(gpio43),
- BCM63268_GROUP(gpio44),
- BCM63268_GROUP(gpio45),
- BCM63268_GROUP(gpio46),
- BCM63268_GROUP(gpio47),
- BCM63268_GROUP(gpio48),
- BCM63268_GROUP(gpio49),
- BCM63268_GROUP(gpio50),
- BCM63268_GROUP(gpio51),
+static struct pingroup bcm63268_groups[] = {
+ BCM_PIN_GROUP(gpio0),
+ BCM_PIN_GROUP(gpio1),
+ BCM_PIN_GROUP(gpio2),
+ BCM_PIN_GROUP(gpio3),
+ BCM_PIN_GROUP(gpio4),
+ BCM_PIN_GROUP(gpio5),
+ BCM_PIN_GROUP(gpio6),
+ BCM_PIN_GROUP(gpio7),
+ BCM_PIN_GROUP(gpio8),
+ BCM_PIN_GROUP(gpio9),
+ BCM_PIN_GROUP(gpio10),
+ BCM_PIN_GROUP(gpio11),
+ BCM_PIN_GROUP(gpio12),
+ BCM_PIN_GROUP(gpio13),
+ BCM_PIN_GROUP(gpio14),
+ BCM_PIN_GROUP(gpio15),
+ BCM_PIN_GROUP(gpio16),
+ BCM_PIN_GROUP(gpio17),
+ BCM_PIN_GROUP(gpio18),
+ BCM_PIN_GROUP(gpio19),
+ BCM_PIN_GROUP(gpio20),
+ BCM_PIN_GROUP(gpio21),
+ BCM_PIN_GROUP(gpio22),
+ BCM_PIN_GROUP(gpio23),
+ BCM_PIN_GROUP(gpio24),
+ BCM_PIN_GROUP(gpio25),
+ BCM_PIN_GROUP(gpio26),
+ BCM_PIN_GROUP(gpio27),
+ BCM_PIN_GROUP(gpio28),
+ BCM_PIN_GROUP(gpio29),
+ BCM_PIN_GROUP(gpio30),
+ BCM_PIN_GROUP(gpio31),
+ BCM_PIN_GROUP(gpio32),
+ BCM_PIN_GROUP(gpio33),
+ BCM_PIN_GROUP(gpio34),
+ BCM_PIN_GROUP(gpio35),
+ BCM_PIN_GROUP(gpio36),
+ BCM_PIN_GROUP(gpio37),
+ BCM_PIN_GROUP(gpio38),
+ BCM_PIN_GROUP(gpio39),
+ BCM_PIN_GROUP(gpio40),
+ BCM_PIN_GROUP(gpio41),
+ BCM_PIN_GROUP(gpio42),
+ BCM_PIN_GROUP(gpio43),
+ BCM_PIN_GROUP(gpio44),
+ BCM_PIN_GROUP(gpio45),
+ BCM_PIN_GROUP(gpio46),
+ BCM_PIN_GROUP(gpio47),
+ BCM_PIN_GROUP(gpio48),
+ BCM_PIN_GROUP(gpio49),
+ BCM_PIN_GROUP(gpio50),
+ BCM_PIN_GROUP(gpio51),
/* multi pin groups */
- BCM63268_GROUP(nand_grp),
- BCM63268_GROUP(dectpd_grp),
- BCM63268_GROUP(vdsl_phy0_grp),
- BCM63268_GROUP(vdsl_phy1_grp),
- BCM63268_GROUP(vdsl_phy2_grp),
- BCM63268_GROUP(vdsl_phy3_grp),
+ BCM_PIN_GROUP(nand_grp),
+ BCM_PIN_GROUP(dectpd_grp),
+ BCM_PIN_GROUP(vdsl_phy0_grp),
+ BCM_PIN_GROUP(vdsl_phy1_grp),
+ BCM_PIN_GROUP(vdsl_phy2_grp),
+ BCM_PIN_GROUP(vdsl_phy3_grp),
};
static const char * const led_groups[] = {
@@ -487,10 +474,10 @@ static const char *bcm63268_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
static int bcm63268_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group,
const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
*pins = bcm63268_groups[group].pins;
- *num_pins = bcm63268_groups[group].num_pins;
+ *npins = bcm63268_groups[group].npins;
return 0;
}
@@ -545,13 +532,13 @@ static int bcm63268_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned selector, unsigned group)
{
struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- const struct bcm63268_pingroup *pg = &bcm63268_groups[group];
+ const struct pingroup *pg = &bcm63268_groups[group];
const struct bcm63268_function *f = &bcm63268_funcs[selector];
unsigned i;
unsigned int reg;
unsigned int val, mask;
- for (i = 0; i < pg->num_pins; i++)
+ for (i = 0; i < pg->npins; i++)
bcm63268_set_gpio(pc, pg->pins[i]);
switch (f->reg) {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6328.c b/drivers/pinctrl/bcm/pinctrl-bcm6328.c
index ffa8864abab6..1e8cc2c80c81 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6328.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6328.c
@@ -26,12 +26,6 @@
#define BCM6328_MUX_OTHER_REG 0x24
#define BCM6328_MUX_MASK GENMASK(1, 0)
-struct bcm6328_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
-};
-
struct bcm6328_function {
const char *name;
const char * const *groups;
@@ -125,49 +119,42 @@ static unsigned gpio31_pins[] = { 31 };
static unsigned hsspi_cs1_pins[] = { 36 };
static unsigned usb_port1_pins[] = { 38 };
-#define BCM6328_GROUP(n) \
- { \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
- }
-
-static struct bcm6328_pingroup bcm6328_groups[] = {
- BCM6328_GROUP(gpio0),
- BCM6328_GROUP(gpio1),
- BCM6328_GROUP(gpio2),
- BCM6328_GROUP(gpio3),
- BCM6328_GROUP(gpio4),
- BCM6328_GROUP(gpio5),
- BCM6328_GROUP(gpio6),
- BCM6328_GROUP(gpio7),
- BCM6328_GROUP(gpio8),
- BCM6328_GROUP(gpio9),
- BCM6328_GROUP(gpio10),
- BCM6328_GROUP(gpio11),
- BCM6328_GROUP(gpio12),
- BCM6328_GROUP(gpio13),
- BCM6328_GROUP(gpio14),
- BCM6328_GROUP(gpio15),
- BCM6328_GROUP(gpio16),
- BCM6328_GROUP(gpio17),
- BCM6328_GROUP(gpio18),
- BCM6328_GROUP(gpio19),
- BCM6328_GROUP(gpio20),
- BCM6328_GROUP(gpio21),
- BCM6328_GROUP(gpio22),
- BCM6328_GROUP(gpio23),
- BCM6328_GROUP(gpio24),
- BCM6328_GROUP(gpio25),
- BCM6328_GROUP(gpio26),
- BCM6328_GROUP(gpio27),
- BCM6328_GROUP(gpio28),
- BCM6328_GROUP(gpio29),
- BCM6328_GROUP(gpio30),
- BCM6328_GROUP(gpio31),
-
- BCM6328_GROUP(hsspi_cs1),
- BCM6328_GROUP(usb_port1),
+static struct pingroup bcm6328_groups[] = {
+ BCM_PIN_GROUP(gpio0),
+ BCM_PIN_GROUP(gpio1),
+ BCM_PIN_GROUP(gpio2),
+ BCM_PIN_GROUP(gpio3),
+ BCM_PIN_GROUP(gpio4),
+ BCM_PIN_GROUP(gpio5),
+ BCM_PIN_GROUP(gpio6),
+ BCM_PIN_GROUP(gpio7),
+ BCM_PIN_GROUP(gpio8),
+ BCM_PIN_GROUP(gpio9),
+ BCM_PIN_GROUP(gpio10),
+ BCM_PIN_GROUP(gpio11),
+ BCM_PIN_GROUP(gpio12),
+ BCM_PIN_GROUP(gpio13),
+ BCM_PIN_GROUP(gpio14),
+ BCM_PIN_GROUP(gpio15),
+ BCM_PIN_GROUP(gpio16),
+ BCM_PIN_GROUP(gpio17),
+ BCM_PIN_GROUP(gpio18),
+ BCM_PIN_GROUP(gpio19),
+ BCM_PIN_GROUP(gpio20),
+ BCM_PIN_GROUP(gpio21),
+ BCM_PIN_GROUP(gpio22),
+ BCM_PIN_GROUP(gpio23),
+ BCM_PIN_GROUP(gpio24),
+ BCM_PIN_GROUP(gpio25),
+ BCM_PIN_GROUP(gpio26),
+ BCM_PIN_GROUP(gpio27),
+ BCM_PIN_GROUP(gpio28),
+ BCM_PIN_GROUP(gpio29),
+ BCM_PIN_GROUP(gpio30),
+ BCM_PIN_GROUP(gpio31),
+
+ BCM_PIN_GROUP(hsspi_cs1),
+ BCM_PIN_GROUP(usb_port1),
};
/* GPIO_MODE */
@@ -292,10 +279,10 @@ static const char *bcm6328_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
static int bcm6328_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group, const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
*pins = bcm6328_groups[group].pins;
- *num_pins = bcm6328_groups[group].num_pins;
+ *npins = bcm6328_groups[group].npins;
return 0;
}
@@ -338,7 +325,7 @@ static int bcm6328_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned selector, unsigned group)
{
struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- const struct bcm6328_pingroup *pg = &bcm6328_groups[group];
+ const struct pingroup *pg = &bcm6328_groups[group];
const struct bcm6328_function *f = &bcm6328_funcs[selector];
bcm6328_rmw_mux(pc, pg->pins[0], f->mode_val, f->mux_val);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6358.c b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
index 9f6cd7447887..891de49d76e7 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6358.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
@@ -35,9 +35,7 @@
#define BCM6358_MODE_MUX_SYS_IRQ BIT(15)
struct bcm6358_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
+ struct pingroup grp;
const uint16_t mode_val;
@@ -131,9 +129,7 @@ static unsigned sys_irq_grp_pins[] = { 5 };
#define BCM6358_GPIO_MUX_GROUP(n, bit, dir) \
{ \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
+ .grp = BCM_PIN_GROUP(n), \
.mode_val = BCM6358_MODE_MUX_##bit, \
.direction = dir, \
}
@@ -219,15 +215,15 @@ static int bcm6358_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
static const char *bcm6358_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
unsigned group)
{
- return bcm6358_groups[group].name;
+ return bcm6358_groups[group].grp.name;
}
static int bcm6358_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group, const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
- *pins = bcm6358_groups[group].pins;
- *num_pins = bcm6358_groups[group].num_pins;
+ *pins = bcm6358_groups[group].grp.pins;
+ *npins = bcm6358_groups[group].grp.npins;
return 0;
}
@@ -264,12 +260,12 @@ static int bcm6358_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned int mask = val;
unsigned pin;
- for (pin = 0; pin < pg->num_pins; pin++)
+ for (pin = 0; pin < pg->grp.npins; pin++)
mask |= (unsigned long)bcm6358_pins[pin].drv_data;
regmap_field_update_bits(priv->overlays, mask, val);
- for (pin = 0; pin < pg->num_pins; pin++) {
+ for (pin = 0; pin < pg->grp.npins; pin++) {
struct pinctrl_gpio_range *range;
unsigned int hw_gpio = bcm6358_pins[pin].number;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6362.c b/drivers/pinctrl/bcm/pinctrl-bcm6362.c
index 13c7230949b2..d9ba1b6c2aeb 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6362.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6362.c
@@ -35,12 +35,6 @@ enum bcm6362_pinctrl_reg {
BCM6362_BASEMODE,
};
-struct bcm6362_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
-};
-
struct bcm6362_function {
const char *name;
const char * const *groups;
@@ -162,63 +156,56 @@ static unsigned nand_grp_pins[] = {
18, 19, 20, 21, 22, 23, 27,
};
-#define BCM6362_GROUP(n) \
- { \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
- }
-
-static struct bcm6362_pingroup bcm6362_groups[] = {
- BCM6362_GROUP(gpio0),
- BCM6362_GROUP(gpio1),
- BCM6362_GROUP(gpio2),
- BCM6362_GROUP(gpio3),
- BCM6362_GROUP(gpio4),
- BCM6362_GROUP(gpio5),
- BCM6362_GROUP(gpio6),
- BCM6362_GROUP(gpio7),
- BCM6362_GROUP(gpio8),
- BCM6362_GROUP(gpio9),
- BCM6362_GROUP(gpio10),
- BCM6362_GROUP(gpio11),
- BCM6362_GROUP(gpio12),
- BCM6362_GROUP(gpio13),
- BCM6362_GROUP(gpio14),
- BCM6362_GROUP(gpio15),
- BCM6362_GROUP(gpio16),
- BCM6362_GROUP(gpio17),
- BCM6362_GROUP(gpio18),
- BCM6362_GROUP(gpio19),
- BCM6362_GROUP(gpio20),
- BCM6362_GROUP(gpio21),
- BCM6362_GROUP(gpio22),
- BCM6362_GROUP(gpio23),
- BCM6362_GROUP(gpio24),
- BCM6362_GROUP(gpio25),
- BCM6362_GROUP(gpio26),
- BCM6362_GROUP(gpio27),
- BCM6362_GROUP(gpio28),
- BCM6362_GROUP(gpio29),
- BCM6362_GROUP(gpio30),
- BCM6362_GROUP(gpio31),
- BCM6362_GROUP(gpio32),
- BCM6362_GROUP(gpio33),
- BCM6362_GROUP(gpio34),
- BCM6362_GROUP(gpio35),
- BCM6362_GROUP(gpio36),
- BCM6362_GROUP(gpio37),
- BCM6362_GROUP(gpio38),
- BCM6362_GROUP(gpio39),
- BCM6362_GROUP(gpio40),
- BCM6362_GROUP(gpio41),
- BCM6362_GROUP(gpio42),
- BCM6362_GROUP(gpio43),
- BCM6362_GROUP(gpio44),
- BCM6362_GROUP(gpio45),
- BCM6362_GROUP(gpio46),
- BCM6362_GROUP(gpio47),
- BCM6362_GROUP(nand_grp),
+static struct pingroup bcm6362_groups[] = {
+ BCM_PIN_GROUP(gpio0),
+ BCM_PIN_GROUP(gpio1),
+ BCM_PIN_GROUP(gpio2),
+ BCM_PIN_GROUP(gpio3),
+ BCM_PIN_GROUP(gpio4),
+ BCM_PIN_GROUP(gpio5),
+ BCM_PIN_GROUP(gpio6),
+ BCM_PIN_GROUP(gpio7),
+ BCM_PIN_GROUP(gpio8),
+ BCM_PIN_GROUP(gpio9),
+ BCM_PIN_GROUP(gpio10),
+ BCM_PIN_GROUP(gpio11),
+ BCM_PIN_GROUP(gpio12),
+ BCM_PIN_GROUP(gpio13),
+ BCM_PIN_GROUP(gpio14),
+ BCM_PIN_GROUP(gpio15),
+ BCM_PIN_GROUP(gpio16),
+ BCM_PIN_GROUP(gpio17),
+ BCM_PIN_GROUP(gpio18),
+ BCM_PIN_GROUP(gpio19),
+ BCM_PIN_GROUP(gpio20),
+ BCM_PIN_GROUP(gpio21),
+ BCM_PIN_GROUP(gpio22),
+ BCM_PIN_GROUP(gpio23),
+ BCM_PIN_GROUP(gpio24),
+ BCM_PIN_GROUP(gpio25),
+ BCM_PIN_GROUP(gpio26),
+ BCM_PIN_GROUP(gpio27),
+ BCM_PIN_GROUP(gpio28),
+ BCM_PIN_GROUP(gpio29),
+ BCM_PIN_GROUP(gpio30),
+ BCM_PIN_GROUP(gpio31),
+ BCM_PIN_GROUP(gpio32),
+ BCM_PIN_GROUP(gpio33),
+ BCM_PIN_GROUP(gpio34),
+ BCM_PIN_GROUP(gpio35),
+ BCM_PIN_GROUP(gpio36),
+ BCM_PIN_GROUP(gpio37),
+ BCM_PIN_GROUP(gpio38),
+ BCM_PIN_GROUP(gpio39),
+ BCM_PIN_GROUP(gpio40),
+ BCM_PIN_GROUP(gpio41),
+ BCM_PIN_GROUP(gpio42),
+ BCM_PIN_GROUP(gpio43),
+ BCM_PIN_GROUP(gpio44),
+ BCM_PIN_GROUP(gpio45),
+ BCM_PIN_GROUP(gpio46),
+ BCM_PIN_GROUP(gpio47),
+ BCM_PIN_GROUP(nand_grp),
};
static const char * const led_groups[] = {
@@ -463,10 +450,10 @@ static const char *bcm6362_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
static int bcm6362_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group, const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
*pins = bcm6362_groups[group].pins;
- *num_pins = bcm6362_groups[group].num_pins;
+ *npins = bcm6362_groups[group].npins;
return 0;
}
@@ -519,13 +506,13 @@ static int bcm6362_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned selector, unsigned group)
{
struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- const struct bcm6362_pingroup *pg = &bcm6362_groups[group];
+ const struct pingroup *pg = &bcm6362_groups[group];
const struct bcm6362_function *f = &bcm6362_funcs[selector];
unsigned i;
unsigned int reg;
unsigned int val, mask;
- for (i = 0; i < pg->num_pins; i++)
+ for (i = 0; i < pg->npins; i++)
bcm6362_set_gpio(pc, pg->pins[i]);
switch (f->reg) {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6368.c b/drivers/pinctrl/bcm/pinctrl-bcm6368.c
index b33a74aec82b..6208467ba6f9 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6368.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6368.c
@@ -26,12 +26,6 @@
#define BCM6368_BASEMODE_GPIO 0x0
#define BCM6368_BASEMODE_UART1 0x1
-struct bcm6368_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
-};
-
struct bcm6368_function {
const char *name;
const char * const *groups;
@@ -127,47 +121,40 @@ static unsigned gpio30_pins[] = { 30 };
static unsigned gpio31_pins[] = { 31 };
static unsigned uart1_grp_pins[] = { 30, 31, 32, 33 };
-#define BCM6368_GROUP(n) \
- { \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
- }
-
-static struct bcm6368_pingroup bcm6368_groups[] = {
- BCM6368_GROUP(gpio0),
- BCM6368_GROUP(gpio1),
- BCM6368_GROUP(gpio2),
- BCM6368_GROUP(gpio3),
- BCM6368_GROUP(gpio4),
- BCM6368_GROUP(gpio5),
- BCM6368_GROUP(gpio6),
- BCM6368_GROUP(gpio7),
- BCM6368_GROUP(gpio8),
- BCM6368_GROUP(gpio9),
- BCM6368_GROUP(gpio10),
- BCM6368_GROUP(gpio11),
- BCM6368_GROUP(gpio12),
- BCM6368_GROUP(gpio13),
- BCM6368_GROUP(gpio14),
- BCM6368_GROUP(gpio15),
- BCM6368_GROUP(gpio16),
- BCM6368_GROUP(gpio17),
- BCM6368_GROUP(gpio18),
- BCM6368_GROUP(gpio19),
- BCM6368_GROUP(gpio20),
- BCM6368_GROUP(gpio21),
- BCM6368_GROUP(gpio22),
- BCM6368_GROUP(gpio23),
- BCM6368_GROUP(gpio24),
- BCM6368_GROUP(gpio25),
- BCM6368_GROUP(gpio26),
- BCM6368_GROUP(gpio27),
- BCM6368_GROUP(gpio28),
- BCM6368_GROUP(gpio29),
- BCM6368_GROUP(gpio30),
- BCM6368_GROUP(gpio31),
- BCM6368_GROUP(uart1_grp),
+static struct pingroup bcm6368_groups[] = {
+ BCM_PIN_GROUP(gpio0),
+ BCM_PIN_GROUP(gpio1),
+ BCM_PIN_GROUP(gpio2),
+ BCM_PIN_GROUP(gpio3),
+ BCM_PIN_GROUP(gpio4),
+ BCM_PIN_GROUP(gpio5),
+ BCM_PIN_GROUP(gpio6),
+ BCM_PIN_GROUP(gpio7),
+ BCM_PIN_GROUP(gpio8),
+ BCM_PIN_GROUP(gpio9),
+ BCM_PIN_GROUP(gpio10),
+ BCM_PIN_GROUP(gpio11),
+ BCM_PIN_GROUP(gpio12),
+ BCM_PIN_GROUP(gpio13),
+ BCM_PIN_GROUP(gpio14),
+ BCM_PIN_GROUP(gpio15),
+ BCM_PIN_GROUP(gpio16),
+ BCM_PIN_GROUP(gpio17),
+ BCM_PIN_GROUP(gpio18),
+ BCM_PIN_GROUP(gpio19),
+ BCM_PIN_GROUP(gpio20),
+ BCM_PIN_GROUP(gpio21),
+ BCM_PIN_GROUP(gpio22),
+ BCM_PIN_GROUP(gpio23),
+ BCM_PIN_GROUP(gpio24),
+ BCM_PIN_GROUP(gpio25),
+ BCM_PIN_GROUP(gpio26),
+ BCM_PIN_GROUP(gpio27),
+ BCM_PIN_GROUP(gpio28),
+ BCM_PIN_GROUP(gpio29),
+ BCM_PIN_GROUP(gpio30),
+ BCM_PIN_GROUP(gpio31),
+ BCM_PIN_GROUP(uart1_grp),
};
static const char * const analog_afe_0_groups[] = {
@@ -358,10 +345,10 @@ static const char *bcm6368_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
static int bcm6368_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group, const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
*pins = bcm6368_groups[group].pins;
- *num_pins = bcm6368_groups[group].num_pins;
+ *npins = bcm6368_groups[group].npins;
return 0;
}
@@ -393,14 +380,14 @@ static int bcm6368_pinctrl_set_mux(struct pinctrl_dev *pctldev,
{
struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
struct bcm6368_priv *priv = pc->driver_data;
- const struct bcm6368_pingroup *pg = &bcm6368_groups[group];
+ const struct pingroup *pg = &bcm6368_groups[group];
const struct bcm6368_function *fun = &bcm6368_funcs[selector];
int i, pin;
if (fun->basemode) {
unsigned int mask = 0;
- for (i = 0; i < pg->num_pins; i++) {
+ for (i = 0; i < pg->npins; i++) {
pin = pg->pins[i];
if (pin < BCM63XX_BANK_GPIOS)
mask |= BIT(pin);
@@ -419,7 +406,7 @@ static int bcm6368_pinctrl_set_mux(struct pinctrl_dev *pctldev,
BIT(pin));
}
- for (pin = 0; pin < pg->num_pins; pin++) {
+ for (pin = 0; pin < pg->npins; pin++) {
struct pinctrl_gpio_range *range;
int hw_gpio = bcm6368_pins[pin].number;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63xx.h b/drivers/pinctrl/bcm/pinctrl-bcm63xx.h
index d58c8cd5b6b8..95243027ecd9 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm63xx.h
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63xx.h
@@ -21,6 +21,8 @@ struct bcm63xx_pinctrl_soc {
unsigned int ngpios;
};
+#define BCM_PIN_GROUP(n) PINCTRL_PINGROUP(#n, n##_pins, ARRAY_SIZE(n##_pins))
+
struct bcm63xx_pinctrl {
struct device *dev;
struct regmap *regs;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
index 65a86543c58c..465cc96814a1 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -233,10 +233,8 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"cru_gpio_control");
ns_pinctrl->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ns_pinctrl->base)) {
- dev_err(dev, "Failed to map pinctrl regs\n");
+ if (IS_ERR(ns_pinctrl->base))
return PTR_ERR(ns_pinctrl->base);
- }
memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index a073eedd71aa..1e427ea4d31b 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -209,7 +209,7 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
for (i = 0; i < pctrl->desc->ngroups; i++) {
desc_group = pctrl->desc->groups + i;
- /* compute the maxiumum number of functions a group can have */
+ /* compute the maximum number of functions a group can have */
max_functions += 1 << (desc_group->bit_width + 1);
}
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index d96b1130efd3..7a32f77792d9 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -119,28 +119,32 @@ config PINCTRL_IMX7ULP
config PINCTRL_IMX8MM
tristate "IMX8MM pinctrl driver"
- depends on ARCH_MXC
+ depends on OF
+ depends on SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mm pinctrl driver
config PINCTRL_IMX8MN
tristate "IMX8MN pinctrl driver"
- depends on ARCH_MXC
+ depends on OF
+ depends on SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mn pinctrl driver
config PINCTRL_IMX8MP
tristate "IMX8MP pinctrl driver"
- depends on ARCH_MXC
+ depends on OF
+ depends on SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mp pinctrl driver
config PINCTRL_IMX8MQ
tristate "IMX8MQ pinctrl driver"
- depends on ARCH_MXC
+ depends on OF
+ depends on SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mq pinctrl driver
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 1600a2c18eee..fed02c6fea06 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -162,6 +162,18 @@ config PINCTRL_MT8186
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
+config PINCTRL_MT8188
+ bool "MediaTek MT8188 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+ help
+ Say yes here to support pin controller and gpio driver
+ on MediaTek MT8188 SoC.
+ In MTK platform, we support virtual gpio and use it to
+ map specific eint which doesn't have real gpio pin.
+
config PINCTRL_MT8192
bool "Mediatek MT8192 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index c8f226ae36c9..53265404a39d 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_PINCTRL_MT8167) += pinctrl-mt8167.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
obj-$(CONFIG_PINCTRL_MT8186) += pinctrl-mt8186.o
+obj-$(CONFIG_PINCTRL_MT8188) += pinctrl-mt8188.o
obj-$(CONFIG_PINCTRL_MT8192) += pinctrl-mt8192.o
obj-$(CONFIG_PINCTRL_MT8195) += pinctrl-mt8195.o
obj-$(CONFIG_PINCTRL_MT8365) += pinctrl-mt8365.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8188.c b/drivers/pinctrl/mediatek/pinctrl-mt8188.c
new file mode 100644
index 000000000000..d0e75c1b4417
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8188.c
@@ -0,0 +1,1673 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Hui Liu <hui.liu@mediatek.com>
+ *
+ */
+
+#include <linux/module.h>
+#include "pinctrl-mtk-mt8188.h"
+#include "pinctrl-paris.h"
+
+/* MT8188 have multiple bases to program pin configuration listed as the below:
+ * iocfg[0]:0x10005000, iocfg[1]:0x11c00000, iocfg[2]:0x11e10000,
+ * iocfg[3]:0x11e20000, iocfg[4]:0x11ea0000
+ * _i_based could be used to indicate what base the pin should be mapped into.
+ */
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 1)
+
+static const struct mtk_pin_field_calc mt8188_pin_mode_range[] = {
+ PIN_FIELD(0, 177, 0x0300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_dir_range[] = {
+ PIN_FIELD(0, 177, 0x0000, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_di_range[] = {
+ PIN_FIELD(0, 177, 0x0200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_do_range[] = {
+ PIN_FIELD(0, 177, 0x0100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x0170, 0x10, 8, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x0170, 0x10, 9, 1),
+ PIN_FIELD_BASE(2, 2, 1, 0x0170, 0x10, 10, 1),
+ PIN_FIELD_BASE(3, 3, 1, 0x0170, 0x10, 11, 1),
+ PIN_FIELD_BASE(4, 4, 1, 0x0170, 0x10, 18, 1),
+ PIN_FIELD_BASE(5, 5, 1, 0x0170, 0x10, 18, 1),
+ PIN_FIELD_BASE(6, 6, 1, 0x0170, 0x10, 18, 1),
+ PIN_FIELD_BASE(7, 7, 1, 0x0170, 0x10, 12, 1),
+ PIN_FIELD_BASE(8, 8, 1, 0x0170, 0x10, 13, 1),
+ PIN_FIELD_BASE(9, 9, 1, 0x0170, 0x10, 14, 1),
+ PIN_FIELD_BASE(10, 10, 1, 0x0170, 0x10, 15, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x0170, 0x10, 19, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x0160, 0x10, 12, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x0160, 0x10, 13, 1),
+ PIN_FIELD_BASE(14, 14, 2, 0x0160, 0x10, 14, 1),
+ PIN_FIELD_BASE(15, 15, 2, 0x0160, 0x10, 15, 1),
+ PIN_FIELD_BASE(16, 16, 3, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(17, 17, 3, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(18, 18, 4, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(19, 19, 4, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(21, 21, 4, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(22, 22, 4, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 4, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 4, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 1, 0x0170, 0x10, 17, 1),
+ PIN_FIELD_BASE(26, 26, 1, 0x0170, 0x10, 17, 1),
+ PIN_FIELD_BASE(27, 27, 1, 0x0170, 0x10, 17, 1),
+ PIN_FIELD_BASE(28, 28, 1, 0x0170, 0x10, 18, 1),
+ PIN_FIELD_BASE(29, 29, 1, 0x0170, 0x10, 16, 1),
+ PIN_FIELD_BASE(30, 30, 1, 0x0170, 0x10, 17, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x0170, 0x10, 19, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0170, 0x10, 19, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x0170, 0x10, 20, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0170, 0x10, 20, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0170, 0x10, 19, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0170, 0x10, 20, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0170, 0x10, 21, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0170, 0x10, 20, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x0170, 0x10, 21, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x0170, 0x10, 21, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x0170, 0x10, 21, 1),
+ PIN_FIELD_BASE(42, 42, 2, 0x0160, 0x10, 21, 1),
+ PIN_FIELD_BASE(43, 43, 2, 0x0160, 0x10, 22, 1),
+ PIN_FIELD_BASE(44, 44, 2, 0x0160, 0x10, 21, 1),
+ PIN_FIELD_BASE(45, 45, 2, 0x0160, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x0170, 0x10, 16, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x0170, 0x10, 16, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x0170, 0x10, 16, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x0170, 0x10, 25, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x0170, 0x10, 28, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x0160, 0x10, 29, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x0160, 0x10, 31, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x0170, 0x10, 26, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x0170, 0x10, 29, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x0170, 0x10, 27, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x0170, 0x10, 30, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x0160, 0x10, 30, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x0170, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 4, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(66, 66, 4, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(67, 67, 4, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(68, 68, 4, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x0180, 0x10, 0, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x0170, 0x10, 31, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x0180, 0x10, 4, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x0180, 0x10, 3, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x0180, 0x10, 1, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x0180, 0x10, 2, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x0180, 0x10, 6, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x0180, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 1, 0x0180, 0x10, 8, 1),
+ PIN_FIELD_BASE(78, 78, 1, 0x0180, 0x10, 7, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(80, 80, 4, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(81, 81, 4, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 4, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x0160, 0x10, 26, 1),
+ PIN_FIELD_BASE(84, 84, 2, 0x0160, 0x10, 26, 1),
+ PIN_FIELD_BASE(85, 85, 2, 0x0160, 0x10, 27, 1),
+ PIN_FIELD_BASE(86, 86, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(87, 87, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(88, 88, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x0160, 0x10, 27, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0160, 0x10, 27, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0160, 0x10, 18, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0160, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0160, 0x10, 18, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0160, 0x10, 18, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0160, 0x10, 22, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0160, 0x10, 23, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0160, 0x10, 24, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x0160, 0x10, 22, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x0160, 0x10, 16, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x0160, 0x10, 23, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x0160, 0x10, 23, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0160, 0x10, 23, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0160, 0x10, 24, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0160, 0x10, 24, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0160, 0x10, 24, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0160, 0x10, 19, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0160, 0x10, 19, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0160, 0x10, 19, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0160, 0x10, 19, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0160, 0x10, 20, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0160, 0x10, 20, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0160, 0x10, 20, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x0160, 0x10, 20, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x0160, 0x10, 21, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x0160, 0x10, 21, 1),
+ PIN_FIELD_BASE(121, 121, 3, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(122, 122, 3, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(123, 123, 3, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(124, 124, 3, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x0160, 0x10, 25, 1),
+ PIN_FIELD_BASE(126, 126, 2, 0x0160, 0x10, 25, 1),
+ PIN_FIELD_BASE(127, 127, 2, 0x0160, 0x10, 25, 1),
+ PIN_FIELD_BASE(128, 128, 2, 0x0160, 0x10, 25, 1),
+ PIN_FIELD_BASE(129, 129, 2, 0x0160, 0x10, 26, 1),
+ PIN_FIELD_BASE(130, 130, 2, 0x0160, 0x10, 26, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0170, 0x10, 0, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0170, 0x10, 1, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0170, 0x10, 6, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0170, 0x10, 7, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0170, 0x10, 22, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0170, 0x10, 22, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0170, 0x10, 22, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0170, 0x10, 22, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0170, 0x10, 23, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0170, 0x10, 23, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0170, 0x10, 23, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0170, 0x10, 23, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0170, 0x10, 2, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0170, 0x10, 3, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0170, 0x10, 4, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0170, 0x10, 5, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0170, 0x10, 24, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0170, 0x10, 24, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0170, 0x10, 24, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0170, 0x10, 24, 1),
+ PIN_FIELD_BASE(151, 151, 2, 0x0160, 0x10, 9, 1),
+ PIN_FIELD_BASE(152, 152, 2, 0x0160, 0x10, 8, 1),
+ PIN_FIELD_BASE(153, 153, 2, 0x0160, 0x10, 7, 1),
+ PIN_FIELD_BASE(154, 154, 2, 0x0160, 0x10, 6, 1),
+ PIN_FIELD_BASE(155, 155, 2, 0x0160, 0x10, 11, 1),
+ PIN_FIELD_BASE(156, 156, 2, 0x0160, 0x10, 1, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0160, 0x10, 0, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0160, 0x10, 5, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0160, 0x10, 4, 1),
+ PIN_FIELD_BASE(160, 160, 2, 0x0160, 0x10, 3, 1),
+ PIN_FIELD_BASE(161, 161, 2, 0x0160, 0x10, 2, 1),
+ PIN_FIELD_BASE(162, 162, 2, 0x0160, 0x10, 10, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(174, 174, 3, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(175, 175, 2, 0x0160, 0x10, 28, 1),
+ PIN_FIELD_BASE(176, 176, 2, 0x0160, 0x10, 28, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x0080, 0x10, 26, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x0080, 0x10, 27, 1),
+ PIN_FIELD_BASE(2, 2, 1, 0x0080, 0x10, 28, 1),
+ PIN_FIELD_BASE(3, 3, 1, 0x0080, 0x10, 29, 1),
+ PIN_FIELD_BASE(4, 4, 1, 0x0080, 0x10, 30, 1),
+ PIN_FIELD_BASE(5, 5, 1, 0x0080, 0x10, 31, 1),
+ PIN_FIELD_BASE(6, 6, 1, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(7, 7, 1, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(8, 8, 1, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(9, 9, 1, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(10, 10, 1, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(14, 14, 2, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(15, 15, 2, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(16, 16, 3, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 3, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(18, 18, 4, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(19, 19, 4, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(21, 21, 4, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 4, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 4, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 4, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 1, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(26, 26, 1, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 1, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(28, 28, 1, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(29, 29, 1, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 1, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x0090, 0x10, 31, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0090, 0x10, 30, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(42, 42, 2, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(43, 43, 2, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(44, 44, 2, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(45, 45, 2, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x0080, 0x10, 26, 1),
+ PIN_FIELD_BASE(65, 65, 4, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(66, 66, 4, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(67, 67, 4, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(68, 68, 4, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x0090, 0x10, 25, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x0090, 0x10, 24, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x0090, 0x10, 27, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x0090, 0x10, 26, 1),
+ PIN_FIELD_BASE(77, 77, 1, 0x0090, 0x10, 29, 1),
+ PIN_FIELD_BASE(78, 78, 1, 0x0090, 0x10, 28, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(80, 80, 4, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(81, 81, 4, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(82, 82, 4, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x0080, 0x10, 30, 1),
+ PIN_FIELD_BASE(84, 84, 2, 0x0080, 0x10, 29, 1),
+ PIN_FIELD_BASE(85, 85, 2, 0x0080, 0x10, 31, 1),
+ PIN_FIELD_BASE(86, 86, 2, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(87, 87, 2, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(88, 88, 2, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0070, 0x10, 29, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0070, 0x10, 30, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(121, 121, 3, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(122, 122, 3, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(123, 123, 3, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(124, 124, 3, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(126, 126, 2, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(127, 127, 2, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(128, 128, 2, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(129, 129, 2, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(130, 130, 2, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(151, 151, 2, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(152, 152, 2, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(153, 153, 2, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(154, 154, 2, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(155, 155, 2, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(156, 156, 2, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(160, 160, 2, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(161, 161, 2, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(162, 162, 2, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(174, 174, 3, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(175, 175, 2, 0x0080, 0x10, 27, 1),
+ PIN_FIELD_BASE(176, 176, 2, 0x0080, 0x10, 28, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_tdsel_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x01b0, 0x10, 0, 4),
+ PIN_FIELD_BASE(1, 1, 1, 0x01b0, 0x10, 4, 4),
+ PIN_FIELD_BASE(2, 2, 1, 0x01b0, 0x10, 8, 4),
+ PIN_FIELD_BASE(3, 3, 1, 0x01b0, 0x10, 12, 4),
+ PIN_FIELD_BASE(4, 4, 1, 0x01c0, 0x10, 16, 4),
+ PIN_FIELD_BASE(5, 5, 1, 0x01c0, 0x10, 20, 4),
+ PIN_FIELD_BASE(6, 6, 1, 0x01c0, 0x10, 20, 4),
+ PIN_FIELD_BASE(7, 7, 1, 0x01b0, 0x10, 16, 4),
+ PIN_FIELD_BASE(8, 8, 1, 0x01b0, 0x10, 20, 4),
+ PIN_FIELD_BASE(9, 9, 1, 0x01b0, 0x10, 24, 4),
+ PIN_FIELD_BASE(10, 10, 1, 0x01b0, 0x10, 28, 4),
+ PIN_FIELD_BASE(11, 11, 1, 0x01c0, 0x10, 20, 4),
+ PIN_FIELD_BASE(12, 12, 2, 0x0190, 0x10, 16, 4),
+ PIN_FIELD_BASE(13, 13, 2, 0x0190, 0x10, 20, 4),
+ PIN_FIELD_BASE(14, 14, 2, 0x0190, 0x10, 24, 4),
+ PIN_FIELD_BASE(15, 15, 2, 0x0190, 0x10, 28, 4),
+ PIN_FIELD_BASE(16, 16, 3, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(17, 17, 3, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(18, 18, 4, 0x0110, 0x10, 4, 4),
+ PIN_FIELD_BASE(19, 19, 4, 0x0110, 0x10, 8, 4),
+ PIN_FIELD_BASE(20, 20, 4, 0x0110, 0x10, 8, 4),
+ PIN_FIELD_BASE(21, 21, 4, 0x0110, 0x10, 8, 4),
+ PIN_FIELD_BASE(22, 22, 4, 0x0100, 0x10, 0, 4),
+ PIN_FIELD_BASE(23, 23, 4, 0x0100, 0x10, 4, 4),
+ PIN_FIELD_BASE(24, 24, 4, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(25, 25, 1, 0x01c0, 0x10, 8, 4),
+ PIN_FIELD_BASE(26, 26, 1, 0x01c0, 0x10, 8, 4),
+ PIN_FIELD_BASE(27, 27, 1, 0x01c0, 0x10, 8, 4),
+ PIN_FIELD_BASE(28, 28, 1, 0x01c0, 0x10, 12, 4),
+ PIN_FIELD_BASE(29, 29, 1, 0x01c0, 0x10, 0, 4),
+ PIN_FIELD_BASE(30, 30, 1, 0x01c0, 0x10, 8, 4),
+ PIN_FIELD_BASE(31, 31, 1, 0x01c0, 0x10, 20, 4),
+ PIN_FIELD_BASE(32, 32, 1, 0x01c0, 0x10, 24, 4),
+ PIN_FIELD_BASE(33, 33, 1, 0x01c0, 0x10, 24, 4),
+ PIN_FIELD_BASE(34, 34, 1, 0x01c0, 0x10, 28, 4),
+ PIN_FIELD_BASE(35, 35, 1, 0x01c0, 0x10, 24, 4),
+ PIN_FIELD_BASE(36, 36, 1, 0x01c0, 0x10, 24, 4),
+ PIN_FIELD_BASE(37, 37, 1, 0x01c0, 0x10, 28, 4),
+ PIN_FIELD_BASE(38, 38, 1, 0x01c0, 0x10, 28, 4),
+ PIN_FIELD_BASE(39, 39, 1, 0x01c0, 0x10, 28, 4),
+ PIN_FIELD_BASE(40, 40, 1, 0x01d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(41, 41, 1, 0x01d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(42, 42, 2, 0x01a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(43, 43, 2, 0x01a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(44, 44, 2, 0x01a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(45, 45, 2, 0x01a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(46, 46, 3, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(47, 47, 1, 0x01c0, 0x10, 0, 4),
+ PIN_FIELD_BASE(48, 48, 1, 0x01c0, 0x10, 0, 4),
+ PIN_FIELD_BASE(49, 49, 1, 0x01c0, 0x10, 0, 4),
+ PIN_FIELD_BASE(50, 50, 3, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(51, 51, 3, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(52, 52, 3, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(53, 53, 3, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(54, 54, 3, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(55, 55, 1, 0x01c0, 0x10, 12, 4),
+ PIN_FIELD_BASE(56, 56, 1, 0x01c0, 0x10, 12, 4),
+ PIN_FIELD_BASE(57, 57, 2, 0x01a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(58, 58, 2, 0x01a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(59, 59, 1, 0x01c0, 0x10, 16, 4),
+ PIN_FIELD_BASE(60, 60, 1, 0x01c0, 0x10, 12, 4),
+ PIN_FIELD_BASE(61, 61, 1, 0x01c0, 0x10, 16, 4),
+ PIN_FIELD_BASE(62, 62, 1, 0x01c0, 0x10, 16, 4),
+ PIN_FIELD_BASE(63, 63, 2, 0x01a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(64, 64, 2, 0x01a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(65, 65, 4, 0x0110, 0x10, 12, 4),
+ PIN_FIELD_BASE(66, 66, 4, 0x0110, 0x10, 8, 4),
+ PIN_FIELD_BASE(67, 67, 4, 0x0110, 0x10, 12, 4),
+ PIN_FIELD_BASE(68, 68, 4, 0x0110, 0x10, 12, 4),
+ PIN_FIELD_BASE(69, 69, 1, 0x01d0, 0x10, 16, 4),
+ PIN_FIELD_BASE(70, 70, 1, 0x01d0, 0x10, 12, 4),
+ PIN_FIELD_BASE(71, 71, 1, 0x01e0, 0x10, 0, 4),
+ PIN_FIELD_BASE(72, 72, 1, 0x01d0, 0x10, 28, 4),
+ PIN_FIELD_BASE(73, 73, 1, 0x01d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(74, 74, 1, 0x01d0, 0x10, 24, 4),
+ PIN_FIELD_BASE(75, 75, 1, 0x01e0, 0x10, 8, 4),
+ PIN_FIELD_BASE(76, 76, 1, 0x01e0, 0x10, 4, 4),
+ PIN_FIELD_BASE(77, 77, 1, 0x01e0, 0x10, 16, 4),
+ PIN_FIELD_BASE(78, 78, 1, 0x01e0, 0x10, 12, 4),
+ PIN_FIELD_BASE(79, 79, 4, 0x0110, 0x10, 20, 4),
+ PIN_FIELD_BASE(80, 80, 4, 0x0110, 0x10, 16, 4),
+ PIN_FIELD_BASE(81, 81, 4, 0x0110, 0x10, 28, 4),
+ PIN_FIELD_BASE(82, 82, 4, 0x0110, 0x10, 24, 4),
+ PIN_FIELD_BASE(83, 83, 2, 0x01b0, 0x10, 8, 4),
+ PIN_FIELD_BASE(84, 84, 2, 0x01b0, 0x10, 8, 4),
+ PIN_FIELD_BASE(85, 85, 2, 0x01b0, 0x10, 12, 4),
+ PIN_FIELD_BASE(86, 86, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(87, 87, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(88, 88, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(89, 89, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(90, 90, 2, 0x01b0, 0x10, 12, 4),
+ PIN_FIELD_BASE(91, 91, 2, 0x01b0, 0x10, 12, 4),
+ PIN_FIELD_BASE(92, 92, 2, 0x01a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(93, 93, 2, 0x01a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(94, 94, 2, 0x01a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(95, 95, 2, 0x01a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(96, 96, 2, 0x01a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(97, 97, 2, 0x01a0, 0x10, 28, 4),
+ PIN_FIELD_BASE(98, 98, 2, 0x01b0, 0x10, 0, 4),
+ PIN_FIELD_BASE(99, 99, 2, 0x01a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(100, 100, 2, 0x01b0, 0x10, 20, 4),
+ PIN_FIELD_BASE(101, 101, 2, 0x01a0, 0x10, 28, 4),
+ PIN_FIELD_BASE(102, 102, 2, 0x01a0, 0x10, 28, 4),
+ PIN_FIELD_BASE(103, 103, 2, 0x01a0, 0x10, 28, 4),
+ PIN_FIELD_BASE(104, 104, 2, 0x01b0, 0x10, 0, 4),
+ PIN_FIELD_BASE(105, 105, 2, 0x01b0, 0x10, 0, 4),
+ PIN_FIELD_BASE(106, 106, 2, 0x01b0, 0x10, 0, 4),
+ PIN_FIELD_BASE(107, 107, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(108, 108, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(109, 109, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(110, 110, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(111, 111, 2, 0x01a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(112, 112, 2, 0x01a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(113, 113, 2, 0x01a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(114, 114, 2, 0x01a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(115, 115, 2, 0x01a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(116, 116, 2, 0x01a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(117, 117, 2, 0x01a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(118, 118, 2, 0x01a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(119, 119, 2, 0x01a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(120, 120, 2, 0x01a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(121, 121, 3, 0x00f0, 0x10, 24, 4),
+ PIN_FIELD_BASE(122, 122, 3, 0x0100, 0x10, 4, 4),
+ PIN_FIELD_BASE(123, 123, 3, 0x0100, 0x10, 0, 4),
+ PIN_FIELD_BASE(124, 124, 3, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(125, 125, 2, 0x01b0, 0x10, 4, 4),
+ PIN_FIELD_BASE(126, 126, 2, 0x01b0, 0x10, 4, 4),
+ PIN_FIELD_BASE(127, 127, 2, 0x01b0, 0x10, 4, 4),
+ PIN_FIELD_BASE(128, 128, 2, 0x01b0, 0x10, 4, 4),
+ PIN_FIELD_BASE(129, 129, 2, 0x01b0, 0x10, 8, 4),
+ PIN_FIELD_BASE(130, 130, 2, 0x01b0, 0x10, 8, 4),
+ PIN_FIELD_BASE(131, 131, 1, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(132, 132, 1, 0x01a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(133, 133, 1, 0x01a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(134, 134, 1, 0x01a0, 0x10, 28, 4),
+ PIN_FIELD_BASE(135, 135, 1, 0x01d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(136, 136, 1, 0x01d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(137, 137, 1, 0x01d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(138, 138, 1, 0x01d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(139, 139, 1, 0x01d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(140, 140, 1, 0x01d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(141, 141, 1, 0x01d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(142, 142, 1, 0x01d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(143, 143, 1, 0x01a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(144, 144, 1, 0x01a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(145, 145, 1, 0x01a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(146, 146, 1, 0x01a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(147, 147, 1, 0x01d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(148, 148, 1, 0x01d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(149, 149, 1, 0x01c0, 0x10, 4, 4),
+ PIN_FIELD_BASE(150, 150, 1, 0x01c0, 0x10, 4, 4),
+ PIN_FIELD_BASE(151, 151, 2, 0x0190, 0x10, 4, 4),
+ PIN_FIELD_BASE(152, 152, 2, 0x0190, 0x10, 0, 4),
+ PIN_FIELD_BASE(153, 153, 2, 0x0180, 0x10, 28, 4),
+ PIN_FIELD_BASE(154, 154, 2, 0x0180, 0x10, 24, 4),
+ PIN_FIELD_BASE(155, 155, 2, 0x0190, 0x10, 12, 4),
+ PIN_FIELD_BASE(156, 156, 2, 0x0180, 0x10, 4, 4),
+ PIN_FIELD_BASE(157, 157, 2, 0x0180, 0x10, 0, 4),
+ PIN_FIELD_BASE(158, 158, 2, 0x0180, 0x10, 20, 4),
+ PIN_FIELD_BASE(159, 159, 2, 0x0180, 0x10, 16, 4),
+ PIN_FIELD_BASE(160, 160, 2, 0x0180, 0x10, 12, 4),
+ PIN_FIELD_BASE(161, 161, 2, 0x0180, 0x10, 8, 4),
+ PIN_FIELD_BASE(162, 162, 2, 0x0190, 0x10, 8, 4),
+ PIN_FIELD_BASE(163, 163, 4, 0x0100, 0x10, 16, 4),
+ PIN_FIELD_BASE(164, 164, 4, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(165, 165, 4, 0x0100, 0x10, 20, 4),
+ PIN_FIELD_BASE(166, 166, 4, 0x0100, 0x10, 24, 4),
+ PIN_FIELD_BASE(167, 167, 4, 0x0100, 0x10, 28, 4),
+ PIN_FIELD_BASE(168, 168, 4, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(169, 169, 3, 0x00f0, 0x10, 4, 4),
+ PIN_FIELD_BASE(170, 170, 3, 0x00f0, 0x10, 0, 4),
+ PIN_FIELD_BASE(171, 171, 3, 0x00f0, 0x10, 8, 4),
+ PIN_FIELD_BASE(172, 172, 3, 0x00f0, 0x10, 12, 4),
+ PIN_FIELD_BASE(173, 173, 3, 0x00f0, 0x10, 16, 4),
+ PIN_FIELD_BASE(174, 174, 3, 0x00f0, 0x10, 20, 4),
+ PIN_FIELD_BASE(175, 175, 2, 0x01b0, 0x10, 16, 4),
+ PIN_FIELD_BASE(176, 176, 2, 0x01b0, 0x10, 16, 4),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_rdsel_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x0130, 0x10, 18, 2),
+ PIN_FIELD_BASE(1, 1, 1, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(2, 2, 1, 0x0130, 0x10, 22, 2),
+ PIN_FIELD_BASE(3, 3, 1, 0x0130, 0x10, 24, 2),
+ PIN_FIELD_BASE(4, 4, 1, 0x0140, 0x10, 14, 2),
+ PIN_FIELD_BASE(5, 5, 1, 0x0140, 0x10, 16, 2),
+ PIN_FIELD_BASE(6, 6, 1, 0x0140, 0x10, 16, 2),
+ PIN_FIELD_BASE(7, 7, 1, 0x0130, 0x10, 26, 2),
+ PIN_FIELD_BASE(8, 8, 1, 0x0130, 0x10, 28, 2),
+ PIN_FIELD_BASE(9, 9, 1, 0x0130, 0x10, 30, 2),
+ PIN_FIELD_BASE(10, 10, 1, 0x0140, 0x10, 0, 2),
+ PIN_FIELD_BASE(11, 11, 1, 0x0140, 0x10, 16, 2),
+ PIN_FIELD_BASE(12, 12, 2, 0x0130, 0x10, 12, 2),
+ PIN_FIELD_BASE(13, 13, 2, 0x0130, 0x10, 14, 2),
+ PIN_FIELD_BASE(14, 14, 2, 0x0130, 0x10, 16, 2),
+ PIN_FIELD_BASE(15, 15, 2, 0x0130, 0x10, 18, 2),
+ PIN_FIELD_BASE(16, 16, 3, 0x00b0, 0x10, 14, 2),
+ PIN_FIELD_BASE(17, 17, 3, 0x00b0, 0x10, 14, 2),
+ PIN_FIELD_BASE(18, 18, 4, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(19, 19, 4, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(20, 20, 4, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(21, 21, 4, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(22, 22, 4, 0x00b0, 0x10, 0, 2),
+ PIN_FIELD_BASE(23, 23, 4, 0x00b0, 0x10, 2, 2),
+ PIN_FIELD_BASE(24, 24, 4, 0x00b0, 0x10, 4, 2),
+ PIN_FIELD_BASE(25, 25, 1, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(26, 26, 1, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(27, 27, 1, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(28, 28, 1, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(29, 29, 1, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(30, 30, 1, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(31, 31, 1, 0x0140, 0x10, 16, 2),
+ PIN_FIELD_BASE(32, 32, 1, 0x0140, 0x10, 18, 2),
+ PIN_FIELD_BASE(33, 33, 1, 0x0140, 0x10, 18, 2),
+ PIN_FIELD_BASE(34, 34, 1, 0x0140, 0x10, 20, 2),
+ PIN_FIELD_BASE(35, 35, 1, 0x0140, 0x10, 18, 2),
+ PIN_FIELD_BASE(36, 36, 1, 0x0140, 0x10, 18, 2),
+ PIN_FIELD_BASE(37, 37, 1, 0x0140, 0x10, 20, 2),
+ PIN_FIELD_BASE(38, 38, 1, 0x0140, 0x10, 20, 2),
+ PIN_FIELD_BASE(39, 39, 1, 0x0140, 0x10, 20, 2),
+ PIN_FIELD_BASE(40, 40, 1, 0x0140, 0x10, 22, 2),
+ PIN_FIELD_BASE(41, 41, 1, 0x0140, 0x10, 22, 2),
+ PIN_FIELD_BASE(42, 42, 2, 0x0130, 0x10, 30, 2),
+ PIN_FIELD_BASE(43, 43, 2, 0x0140, 0x10, 0, 2),
+ PIN_FIELD_BASE(44, 44, 2, 0x0130, 0x10, 30, 2),
+ PIN_FIELD_BASE(45, 45, 2, 0x0140, 0x10, 0, 2),
+ PIN_FIELD_BASE(46, 46, 3, 0x00b0, 0x10, 14, 2),
+ PIN_FIELD_BASE(47, 47, 1, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(48, 48, 1, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(49, 49, 1, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(50, 50, 3, 0x00b0, 0x10, 14, 2),
+ PIN_FIELD_BASE(51, 51, 3, 0x00b0, 0x10, 16, 2),
+ PIN_FIELD_BASE(52, 52, 3, 0x00b0, 0x10, 16, 2),
+ PIN_FIELD_BASE(53, 53, 3, 0x00b0, 0x10, 16, 2),
+ PIN_FIELD_BASE(54, 54, 3, 0x00b0, 0x10, 16, 2),
+ PIN_FIELD_BASE(55, 55, 1, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(56, 56, 1, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(57, 57, 2, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(58, 58, 2, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(59, 59, 1, 0x0140, 0x10, 14, 2),
+ PIN_FIELD_BASE(60, 60, 1, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(61, 61, 1, 0x0140, 0x10, 14, 2),
+ PIN_FIELD_BASE(62, 62, 1, 0x0140, 0x10, 14, 2),
+ PIN_FIELD_BASE(63, 63, 2, 0x0140, 0x10, 0, 2),
+ PIN_FIELD_BASE(64, 64, 2, 0x0140, 0x10, 0, 2),
+ PIN_FIELD_BASE(65, 65, 4, 0x00c0, 0x10, 14, 2),
+ PIN_FIELD_BASE(66, 66, 4, 0x00c0, 0x10, 14, 2),
+ PIN_FIELD_BASE(67, 67, 4, 0x00c0, 0x10, 14, 2),
+ PIN_FIELD_BASE(68, 68, 4, 0x00c0, 0x10, 14, 2),
+ PIN_FIELD_BASE(69, 69, 1, 0x0150, 0x10, 14, 2),
+ PIN_FIELD_BASE(70, 70, 1, 0x0150, 0x10, 12, 2),
+ PIN_FIELD_BASE(71, 71, 1, 0x0150, 0x10, 22, 2),
+ PIN_FIELD_BASE(72, 72, 1, 0x0150, 0x10, 20, 2),
+ PIN_FIELD_BASE(73, 73, 1, 0x0150, 0x10, 16, 2),
+ PIN_FIELD_BASE(74, 74, 1, 0x0150, 0x10, 18, 2),
+ PIN_FIELD_BASE(75, 75, 1, 0x0150, 0x10, 26, 2),
+ PIN_FIELD_BASE(76, 76, 1, 0x0150, 0x10, 24, 2),
+ PIN_FIELD_BASE(77, 77, 1, 0x0150, 0x10, 30, 2),
+ PIN_FIELD_BASE(78, 78, 1, 0x0150, 0x10, 28, 2),
+ PIN_FIELD_BASE(79, 79, 4, 0x00c0, 0x10, 18, 2),
+ PIN_FIELD_BASE(80, 80, 4, 0x00c0, 0x10, 16, 2),
+ PIN_FIELD_BASE(81, 81, 4, 0x00c0, 0x10, 22, 2),
+ PIN_FIELD_BASE(82, 82, 4, 0x00c0, 0x10, 20, 2),
+ PIN_FIELD_BASE(83, 83, 2, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(84, 84, 2, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(85, 85, 2, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(86, 86, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(87, 87, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(88, 88, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(89, 89, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(90, 90, 2, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(91, 91, 2, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(92, 92, 2, 0x0130, 0x10, 22, 2),
+ PIN_FIELD_BASE(93, 93, 2, 0x0130, 0x10, 22, 2),
+ PIN_FIELD_BASE(94, 94, 2, 0x0130, 0x10, 22, 2),
+ PIN_FIELD_BASE(95, 95, 2, 0x0130, 0x10, 22, 2),
+ PIN_FIELD_BASE(96, 96, 2, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(97, 97, 2, 0x0140, 0x10, 4, 2),
+ PIN_FIELD_BASE(98, 98, 2, 0x0140, 0x10, 6, 2),
+ PIN_FIELD_BASE(99, 99, 2, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(100, 100, 2, 0x0140, 0x10, 16, 2),
+ PIN_FIELD_BASE(101, 101, 2, 0x0140, 0x10, 4, 2),
+ PIN_FIELD_BASE(102, 102, 2, 0x0140, 0x10, 4, 2),
+ PIN_FIELD_BASE(103, 103, 2, 0x0140, 0x10, 4, 2),
+ PIN_FIELD_BASE(104, 104, 2, 0x0140, 0x10, 6, 2),
+ PIN_FIELD_BASE(105, 105, 2, 0x0140, 0x10, 6, 2),
+ PIN_FIELD_BASE(106, 106, 2, 0x0140, 0x10, 6, 2),
+ PIN_FIELD_BASE(107, 107, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(108, 108, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(109, 109, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(110, 110, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(111, 111, 2, 0x0130, 0x10, 24, 2),
+ PIN_FIELD_BASE(112, 112, 2, 0x0130, 0x10, 24, 2),
+ PIN_FIELD_BASE(113, 113, 2, 0x0130, 0x10, 24, 2),
+ PIN_FIELD_BASE(114, 114, 2, 0x0130, 0x10, 24, 2),
+ PIN_FIELD_BASE(115, 115, 2, 0x0130, 0x10, 28, 2),
+ PIN_FIELD_BASE(116, 116, 2, 0x0130, 0x10, 28, 2),
+ PIN_FIELD_BASE(117, 117, 2, 0x0130, 0x10, 28, 2),
+ PIN_FIELD_BASE(118, 118, 2, 0x0130, 0x10, 28, 2),
+ PIN_FIELD_BASE(119, 119, 2, 0x0130, 0x10, 30, 2),
+ PIN_FIELD_BASE(120, 120, 2, 0x0130, 0x10, 30, 2),
+ PIN_FIELD_BASE(121, 121, 3, 0x00b0, 0x10, 6, 2),
+ PIN_FIELD_BASE(122, 122, 3, 0x00b0, 0x10, 12, 2),
+ PIN_FIELD_BASE(123, 123, 3, 0x00b0, 0x10, 10, 2),
+ PIN_FIELD_BASE(124, 124, 3, 0x00b0, 0x10, 8, 2),
+ PIN_FIELD_BASE(125, 125, 2, 0x0140, 0x10, 8, 2),
+ PIN_FIELD_BASE(126, 126, 2, 0x0140, 0x10, 8, 2),
+ PIN_FIELD_BASE(127, 127, 2, 0x0140, 0x10, 8, 2),
+ PIN_FIELD_BASE(128, 128, 2, 0x0140, 0x10, 8, 2),
+ PIN_FIELD_BASE(129, 129, 2, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(130, 130, 2, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(131, 131, 1, 0x0120, 0x10, 0, 6),
+ PIN_FIELD_BASE(132, 132, 1, 0x0130, 0x10, 0, 6),
+ PIN_FIELD_BASE(133, 133, 1, 0x0130, 0x10, 6, 6),
+ PIN_FIELD_BASE(134, 134, 1, 0x0130, 0x10, 12, 6),
+ PIN_FIELD_BASE(135, 135, 1, 0x0140, 0x10, 24, 6),
+ PIN_FIELD_BASE(136, 136, 1, 0x0140, 0x10, 24, 6),
+ PIN_FIELD_BASE(137, 137, 1, 0x0150, 0x10, 0, 6),
+ PIN_FIELD_BASE(138, 138, 1, 0x0150, 0x10, 0, 6),
+ PIN_FIELD_BASE(139, 139, 1, 0x0150, 0x10, 0, 6),
+ PIN_FIELD_BASE(140, 140, 1, 0x0150, 0x10, 0, 6),
+ PIN_FIELD_BASE(141, 141, 1, 0x0150, 0x10, 6, 6),
+ PIN_FIELD_BASE(142, 142, 1, 0x0150, 0x10, 6, 6),
+ PIN_FIELD_BASE(143, 143, 1, 0x0120, 0x10, 6, 6),
+ PIN_FIELD_BASE(144, 144, 1, 0x0120, 0x10, 12, 6),
+ PIN_FIELD_BASE(145, 145, 1, 0x0120, 0x10, 18, 6),
+ PIN_FIELD_BASE(146, 146, 1, 0x0120, 0x10, 24, 6),
+ PIN_FIELD_BASE(147, 147, 1, 0x0150, 0x10, 6, 6),
+ PIN_FIELD_BASE(148, 148, 1, 0x0150, 0x10, 6, 6),
+ PIN_FIELD_BASE(149, 149, 1, 0x0140, 0x10, 4, 6),
+ PIN_FIELD_BASE(150, 150, 1, 0x0140, 0x10, 4, 6),
+ PIN_FIELD_BASE(151, 151, 2, 0x0120, 0x10, 24, 6),
+ PIN_FIELD_BASE(152, 152, 2, 0x0120, 0x10, 18, 6),
+ PIN_FIELD_BASE(153, 153, 2, 0x0120, 0x10, 12, 6),
+ PIN_FIELD_BASE(154, 154, 2, 0x0120, 0x10, 6, 6),
+ PIN_FIELD_BASE(155, 155, 2, 0x0130, 0x10, 6, 6),
+ PIN_FIELD_BASE(156, 156, 2, 0x0110, 0x10, 6, 6),
+ PIN_FIELD_BASE(157, 157, 2, 0x0110, 0x10, 0, 6),
+ PIN_FIELD_BASE(158, 158, 2, 0x0120, 0x10, 0, 6),
+ PIN_FIELD_BASE(159, 159, 2, 0x0110, 0x10, 24, 6),
+ PIN_FIELD_BASE(160, 160, 2, 0x0110, 0x10, 18, 6),
+ PIN_FIELD_BASE(161, 161, 2, 0x0110, 0x10, 12, 6),
+ PIN_FIELD_BASE(162, 162, 2, 0x0130, 0x10, 0, 6),
+ PIN_FIELD_BASE(163, 163, 4, 0x00b0, 0x10, 12, 6),
+ PIN_FIELD_BASE(164, 164, 4, 0x00b0, 0x10, 6, 6),
+ PIN_FIELD_BASE(165, 165, 4, 0x00b0, 0x10, 18, 6),
+ PIN_FIELD_BASE(166, 166, 4, 0x00b0, 0x10, 24, 6),
+ PIN_FIELD_BASE(167, 167, 4, 0x00c0, 0x10, 0, 6),
+ PIN_FIELD_BASE(168, 168, 4, 0x00c0, 0x10, 6, 6),
+ PIN_FIELD_BASE(169, 169, 3, 0x00a0, 0x10, 6, 6),
+ PIN_FIELD_BASE(170, 170, 3, 0x00a0, 0x10, 0, 6),
+ PIN_FIELD_BASE(171, 171, 3, 0x00a0, 0x10, 12, 6),
+ PIN_FIELD_BASE(172, 172, 3, 0x00a0, 0x10, 18, 6),
+ PIN_FIELD_BASE(173, 173, 3, 0x00a0, 0x10, 24, 6),
+ PIN_FIELD_BASE(174, 174, 3, 0x00b0, 0x10, 0, 6),
+ PIN_FIELD_BASE(175, 175, 2, 0x0140, 0x10, 14, 2),
+ PIN_FIELD_BASE(176, 176, 2, 0x0140, 0x10, 14, 2),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_pupd_range[] = {
+ PIN_FIELD_BASE(42, 42, 2, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(43, 43, 2, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(44, 44, 2, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(45, 45, 2, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x00d0, 0x10, 14, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x00d0, 0x10, 15, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x00d0, 0x10, 16, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x00d0, 0x10, 18, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x00d0, 0x10, 19, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x00d0, 0x10, 17, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(151, 151, 2, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(152, 152, 2, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(153, 153, 2, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(154, 154, 2, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(155, 155, 2, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(156, 156, 2, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(160, 160, 2, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(161, 161, 2, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(162, 162, 2, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(174, 174, 3, 0x0060, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_r0_range[] = {
+ PIN_FIELD_BASE(42, 42, 2, 0x00f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(43, 43, 2, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(44, 44, 2, 0x00f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(45, 45, 2, 0x00f0, 0x10, 15, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0100, 0x10, 1, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0100, 0x10, 2, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0100, 0x10, 9, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0100, 0x10, 10, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0100, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0100, 0x10, 12, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0100, 0x10, 13, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0100, 0x10, 14, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0100, 0x10, 15, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0100, 0x10, 16, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0100, 0x10, 3, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0100, 0x10, 4, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0100, 0x10, 5, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0100, 0x10, 6, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0100, 0x10, 7, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0100, 0x10, 8, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0100, 0x10, 18, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0100, 0x10, 19, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0100, 0x10, 17, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0100, 0x10, 0, 1),
+ PIN_FIELD_BASE(151, 151, 2, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(152, 152, 2, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(153, 153, 2, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(154, 154, 2, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(155, 155, 2, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(156, 156, 2, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(160, 160, 2, 0x00f0, 0x10, 3, 1),
+ PIN_FIELD_BASE(161, 161, 2, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(162, 162, 2, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(174, 174, 3, 0x0080, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_r1_range[] = {
+ PIN_FIELD_BASE(42, 42, 2, 0x0100, 0x10, 12, 1),
+ PIN_FIELD_BASE(43, 43, 2, 0x0100, 0x10, 13, 1),
+ PIN_FIELD_BASE(44, 44, 2, 0x0100, 0x10, 14, 1),
+ PIN_FIELD_BASE(45, 45, 2, 0x0100, 0x10, 15, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0110, 0x10, 1, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0110, 0x10, 2, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0110, 0x10, 9, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0110, 0x10, 10, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0110, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0110, 0x10, 12, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0110, 0x10, 13, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0110, 0x10, 14, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0110, 0x10, 15, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0110, 0x10, 16, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0110, 0x10, 3, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0110, 0x10, 4, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0110, 0x10, 5, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0110, 0x10, 6, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0110, 0x10, 7, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0110, 0x10, 8, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0110, 0x10, 18, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0110, 0x10, 19, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0110, 0x10, 17, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0110, 0x10, 0, 1),
+ PIN_FIELD_BASE(151, 151, 2, 0x0100, 0x10, 9, 1),
+ PIN_FIELD_BASE(152, 152, 2, 0x0100, 0x10, 8, 1),
+ PIN_FIELD_BASE(153, 153, 2, 0x0100, 0x10, 7, 1),
+ PIN_FIELD_BASE(154, 154, 2, 0x0100, 0x10, 6, 1),
+ PIN_FIELD_BASE(155, 155, 2, 0x0100, 0x10, 11, 1),
+ PIN_FIELD_BASE(156, 156, 2, 0x0100, 0x10, 1, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0100, 0x10, 0, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0100, 0x10, 5, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0100, 0x10, 4, 1),
+ PIN_FIELD_BASE(160, 160, 2, 0x0100, 0x10, 3, 1),
+ PIN_FIELD_BASE(161, 161, 2, 0x0100, 0x10, 2, 1),
+ PIN_FIELD_BASE(162, 162, 2, 0x0100, 0x10, 10, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(174, 174, 3, 0x0090, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_pu_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(2, 2, 1, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(3, 3, 1, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(4, 4, 1, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(5, 5, 1, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(6, 6, 1, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(7, 7, 1, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(8, 8, 1, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(9, 9, 1, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(10, 10, 1, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(14, 14, 2, 0x00d0, 0x10, 14, 1),
+ PIN_FIELD_BASE(15, 15, 2, 0x00d0, 0x10, 15, 1),
+ PIN_FIELD_BASE(16, 16, 3, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 3, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(18, 18, 4, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(19, 19, 4, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(21, 21, 4, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 4, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 4, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 4, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 1, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(26, 26, 1, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(27, 27, 1, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(28, 28, 1, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(29, 29, 1, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 1, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x00f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x00f0, 0x10, 15, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x00f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x00e0, 0x10, 21, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x00e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x00e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x00e0, 0x10, 20, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x00e0, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x00e0, 0x10, 25, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x00e0, 0x10, 24, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x00e0, 0x10, 23, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x00e0, 0x10, 26, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x00e0, 0x10, 29, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x00e0, 0x10, 27, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x00e0, 0x10, 30, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x00e0, 0x10, 28, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(65, 65, 4, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 4, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(67, 67, 4, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(68, 68, 4, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x00f0, 0x10, 3, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(77, 77, 1, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(78, 78, 1, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(80, 80, 4, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(81, 81, 4, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(82, 82, 4, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(84, 84, 2, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(85, 85, 2, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(86, 86, 2, 0x00e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(87, 87, 2, 0x00e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(88, 88, 2, 0x00e0, 0x10, 20, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x00e0, 0x10, 22, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x00e0, 0x10, 21, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x00e0, 0x10, 23, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x00d0, 0x10, 31, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x00d0, 0x10, 30, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x00d0, 0x10, 21, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x00d0, 0x10, 16, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x00d0, 0x10, 22, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x00d0, 0x10, 17, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x00d0, 0x10, 18, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x00d0, 0x10, 19, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x00d0, 0x10, 20, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x00d0, 0x10, 28, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x00d0, 0x10, 23, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x00d0, 0x10, 29, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00d0, 0x10, 24, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00d0, 0x10, 25, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00d0, 0x10, 26, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00d0, 0x10, 27, 1),
+ PIN_FIELD_BASE(121, 121, 3, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(122, 122, 3, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(123, 123, 3, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(124, 124, 3, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(126, 126, 2, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(127, 127, 2, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(128, 128, 2, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(129, 129, 2, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(130, 130, 2, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(175, 175, 2, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(176, 176, 2, 0x00e0, 0x10, 12, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_pd_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(2, 2, 1, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(3, 3, 1, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(4, 4, 1, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(5, 5, 1, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(6, 6, 1, 0x00b0, 0x10, 12, 1),
+ PIN_FIELD_BASE(7, 7, 1, 0x00b0, 0x10, 13, 1),
+ PIN_FIELD_BASE(8, 8, 1, 0x00b0, 0x10, 14, 1),
+ PIN_FIELD_BASE(9, 9, 1, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(10, 10, 1, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x00b0, 0x10, 17, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(14, 14, 2, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(15, 15, 2, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(16, 16, 3, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(18, 18, 4, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(19, 19, 4, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(21, 21, 4, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 4, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 4, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 4, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 1, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(26, 26, 1, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(27, 27, 1, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(28, 28, 1, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(29, 29, 1, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 1, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x00b0, 0x10, 21, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x00b0, 0x10, 18, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x00b0, 0x10, 19, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x00b0, 0x10, 20, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x00b0, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x00b0, 0x10, 25, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x00b0, 0x10, 24, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x00b0, 0x10, 23, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x00b0, 0x10, 26, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x00b0, 0x10, 29, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x00b0, 0x10, 27, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x00b0, 0x10, 30, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x00b0, 0x10, 28, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x00b0, 0x10, 31, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(65, 65, 4, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 4, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(67, 67, 4, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(68, 68, 4, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(77, 77, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(78, 78, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(80, 80, 4, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(81, 81, 4, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(82, 82, 4, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(84, 84, 2, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(85, 85, 2, 0x00b0, 0x10, 17, 1),
+ PIN_FIELD_BASE(86, 86, 2, 0x00b0, 0x10, 19, 1),
+ PIN_FIELD_BASE(87, 87, 2, 0x00b0, 0x10, 18, 1),
+ PIN_FIELD_BASE(88, 88, 2, 0x00b0, 0x10, 20, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x00b0, 0x10, 22, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x00b0, 0x10, 21, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x00b0, 0x10, 23, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x00a0, 0x10, 31, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x00a0, 0x10, 30, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x00a0, 0x10, 21, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x00a0, 0x10, 22, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x00a0, 0x10, 17, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x00a0, 0x10, 18, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x00a0, 0x10, 19, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x00a0, 0x10, 20, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x00a0, 0x10, 28, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x00a0, 0x10, 29, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00a0, 0x10, 26, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00a0, 0x10, 27, 1),
+ PIN_FIELD_BASE(121, 121, 3, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(122, 122, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(123, 123, 3, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(124, 124, 3, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(126, 126, 2, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(127, 127, 2, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(128, 128, 2, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(129, 129, 2, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(130, 130, 2, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(175, 175, 2, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(176, 176, 2, 0x00b0, 0x10, 12, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(1, 1, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(2, 2, 1, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(3, 3, 1, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(4, 4, 1, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(5, 5, 1, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(6, 6, 1, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(7, 7, 1, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(8, 8, 1, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(9, 9, 1, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(10, 10, 1, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(11, 11, 1, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(12, 12, 2, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(13, 13, 2, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(14, 14, 2, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(15, 15, 2, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(16, 16, 3, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(17, 17, 3, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(18, 18, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(19, 19, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(20, 20, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(21, 21, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(22, 22, 4, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(23, 23, 4, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(24, 24, 4, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(25, 25, 1, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(26, 26, 1, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(27, 27, 1, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(28, 28, 1, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(29, 29, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(30, 30, 1, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(31, 31, 1, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(32, 32, 1, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(33, 33, 1, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(34, 34, 1, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(35, 35, 1, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(36, 36, 1, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(37, 37, 1, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(38, 38, 1, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(39, 39, 1, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(40, 40, 1, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(41, 41, 1, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(42, 42, 2, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(43, 43, 2, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(44, 44, 2, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(45, 45, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(46, 46, 3, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(47, 47, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(48, 48, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(49, 49, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(50, 50, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(51, 51, 3, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(52, 52, 3, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(53, 53, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(54, 54, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(55, 55, 1, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(56, 56, 1, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(57, 57, 2, 0x0030, 0x10, 9, 3),
+ PIN_FIELD_BASE(58, 58, 2, 0x0030, 0x10, 15, 3),
+ PIN_FIELD_BASE(59, 59, 1, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(60, 60, 1, 0x0030, 0x10, 9, 3),
+ PIN_FIELD_BASE(61, 61, 1, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(62, 62, 1, 0x0030, 0x10, 12, 3),
+ PIN_FIELD_BASE(63, 63, 2, 0x0030, 0x10, 12, 3),
+ PIN_FIELD_BASE(64, 64, 2, 0x0030, 0x10, 18, 3),
+ PIN_FIELD_BASE(65, 65, 4, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(66, 66, 4, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(67, 67, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 4, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(69, 69, 1, 0x0030, 0x10, 18, 3),
+ PIN_FIELD_BASE(70, 70, 1, 0x0030, 0x10, 15, 3),
+ PIN_FIELD_BASE(71, 71, 1, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(72, 72, 1, 0x0030, 0x10, 27, 3),
+ PIN_FIELD_BASE(73, 73, 1, 0x0030, 0x10, 21, 3),
+ PIN_FIELD_BASE(74, 74, 1, 0x0030, 0x10, 24, 3),
+ PIN_FIELD_BASE(75, 75, 1, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(76, 76, 1, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(77, 77, 1, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(78, 78, 1, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(79, 79, 4, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(80, 80, 4, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(81, 81, 4, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(82, 82, 4, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(83, 83, 2, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(84, 84, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(85, 85, 2, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(86, 86, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(87, 87, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(88, 88, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(89, 89, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(90, 90, 2, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(91, 91, 2, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(92, 92, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(93, 93, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(94, 94, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(95, 95, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(96, 96, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(97, 97, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(98, 98, 2, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(99, 99, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(100, 100, 2, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(101, 101, 2, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(102, 102, 2, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(103, 103, 2, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(104, 104, 2, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(105, 105, 2, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(106, 106, 2, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(107, 107, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(108, 108, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(109, 109, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(110, 110, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(111, 111, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(112, 112, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(113, 113, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(114, 114, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(115, 115, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(116, 116, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(117, 117, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(118, 118, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(119, 119, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(120, 120, 2, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(121, 121, 3, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(122, 122, 3, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(123, 123, 3, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(124, 124, 3, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(125, 125, 2, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(126, 126, 2, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(127, 127, 2, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(128, 128, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(129, 129, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(130, 130, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(131, 131, 1, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(132, 132, 1, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(133, 133, 1, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(134, 134, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(135, 135, 1, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(136, 136, 1, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(137, 137, 1, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(138, 138, 1, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(139, 139, 1, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(140, 140, 1, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(141, 141, 1, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(142, 142, 1, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(143, 143, 1, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(144, 144, 1, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(145, 145, 1, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(146, 146, 1, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(147, 147, 1, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(148, 148, 1, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(149, 149, 1, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(150, 150, 1, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(151, 151, 2, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(152, 152, 2, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(153, 153, 2, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(154, 154, 2, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(155, 155, 2, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(156, 156, 2, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(157, 157, 2, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(158, 158, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(159, 159, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(160, 160, 2, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(161, 161, 2, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(162, 162, 2, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(163, 163, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(164, 164, 4, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(165, 165, 4, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(166, 166, 4, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(167, 167, 4, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(168, 168, 4, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(169, 169, 3, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(170, 170, 3, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(171, 171, 3, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(172, 172, 3, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(173, 173, 3, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(174, 174, 3, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(175, 175, 2, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(176, 176, 2, 0x0030, 0x10, 3, 3),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(53, 53, 3, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(54, 54, 3, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(55, 55, 1, 0x0060, 0x10, 0, 3),
+ PIN_FIELD_BASE(56, 56, 1, 0x0060, 0x10, 9, 3),
+ PIN_FIELD_BASE(57, 57, 2, 0x0050, 0x10, 0, 3),
+ PIN_FIELD_BASE(58, 58, 2, 0x0050, 0x10, 6, 3),
+ PIN_FIELD_BASE(59, 59, 1, 0x0060, 0x10, 3, 3),
+ PIN_FIELD_BASE(60, 60, 1, 0x0060, 0x10, 12, 3),
+ PIN_FIELD_BASE(61, 61, 1, 0x0060, 0x10, 6, 3),
+ PIN_FIELD_BASE(62, 62, 1, 0x0060, 0x10, 15, 3),
+ PIN_FIELD_BASE(63, 63, 2, 0x0050, 0x10, 3, 3),
+ PIN_FIELD_BASE(64, 64, 2, 0x0050, 0x10, 9, 3),
+ PIN_FIELD_BASE(65, 65, 4, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(66, 66, 4, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(67, 67, 4, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 4, 0x0030, 0x10, 9, 3),
+ PIN_FIELD_BASE(175, 175, 2, 0x0050, 0x10, 12, 3),
+ PIN_FIELD_BASE(176, 176, 2, 0x0050, 0x10, 15, 3),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_rsel_range[] = {
+ PIN_FIELD_BASE(53, 53, 3, 0x00c0, 0x10, 0, 3),
+ PIN_FIELD_BASE(54, 54, 3, 0x00c0, 0x10, 3, 3),
+ PIN_FIELD_BASE(55, 55, 1, 0x0160, 0x10, 0, 3),
+ PIN_FIELD_BASE(56, 56, 1, 0x0160, 0x10, 9, 3),
+ PIN_FIELD_BASE(57, 57, 2, 0x0150, 0x10, 0, 3),
+ PIN_FIELD_BASE(58, 58, 2, 0x0150, 0x10, 6, 3),
+ PIN_FIELD_BASE(59, 59, 1, 0x0160, 0x10, 3, 3),
+ PIN_FIELD_BASE(60, 60, 1, 0x0160, 0x10, 12, 3),
+ PIN_FIELD_BASE(61, 61, 1, 0x0160, 0x10, 6, 3),
+ PIN_FIELD_BASE(62, 62, 1, 0x0160, 0x10, 15, 3),
+ PIN_FIELD_BASE(63, 63, 2, 0x0150, 0x10, 3, 3),
+ PIN_FIELD_BASE(64, 64, 2, 0x0150, 0x10, 9, 3),
+ PIN_FIELD_BASE(65, 65, 4, 0x00d0, 0x10, 0, 3),
+ PIN_FIELD_BASE(66, 66, 4, 0x00d0, 0x10, 6, 3),
+ PIN_FIELD_BASE(67, 67, 4, 0x00d0, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 4, 0x00d0, 0x10, 9, 3),
+ PIN_FIELD_BASE(175, 175, 2, 0x0150, 0x10, 12, 3),
+ PIN_FIELD_BASE(176, 176, 2, 0x0150, 0x10, 15, 3),
+};
+
+static const struct mtk_pin_rsel mt8188_pin_rsel_val_range[] = {
+ PIN_RSEL(53, 68, 0x0, 75000, 75000),
+ PIN_RSEL(53, 68, 0x1, 10000, 5000),
+ PIN_RSEL(53, 68, 0x2, 5000, 75000),
+ PIN_RSEL(53, 68, 0x3, 4000, 5000),
+ PIN_RSEL(53, 68, 0x4, 3000, 75000),
+ PIN_RSEL(53, 68, 0x5, 2000, 5000),
+ PIN_RSEL(53, 68, 0x6, 1500, 75000),
+ PIN_RSEL(53, 68, 0x7, 1000, 5000),
+ PIN_RSEL(175, 176, 0x0, 75000, 75000),
+ PIN_RSEL(175, 176, 0x1, 10000, 5000),
+ PIN_RSEL(175, 176, 0x2, 5000, 75000),
+ PIN_RSEL(175, 176, 0x3, 4000, 5000),
+ PIN_RSEL(175, 176, 0x4, 3000, 75000),
+ PIN_RSEL(175, 176, 0x5, 2000, 5000),
+ PIN_RSEL(175, 176, 0x6, 1500, 75000),
+ PIN_RSEL(175, 176, 0x7, 1000, 5000),
+};
+
+static const unsigned int mt8188_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE, /*0*/
+ MTK_PULL_PU_PD_TYPE, /*1*/
+ MTK_PULL_PU_PD_TYPE, /*2*/
+ MTK_PULL_PU_PD_TYPE, /*3*/
+ MTK_PULL_PU_PD_TYPE, /*4*/
+ MTK_PULL_PU_PD_TYPE, /*5*/
+ MTK_PULL_PU_PD_TYPE, /*6*/
+ MTK_PULL_PU_PD_TYPE, /*7*/
+ MTK_PULL_PU_PD_TYPE, /*8*/
+ MTK_PULL_PU_PD_TYPE, /*9*/
+ MTK_PULL_PU_PD_TYPE, /*10*/
+ MTK_PULL_PU_PD_TYPE, /*11*/
+ MTK_PULL_PU_PD_TYPE, /*12*/
+ MTK_PULL_PU_PD_TYPE, /*13*/
+ MTK_PULL_PU_PD_TYPE, /*14*/
+ MTK_PULL_PU_PD_TYPE, /*15*/
+ MTK_PULL_PU_PD_TYPE, /*16*/
+ MTK_PULL_PU_PD_TYPE, /*17*/
+ MTK_PULL_PU_PD_TYPE, /*18*/
+ MTK_PULL_PU_PD_TYPE, /*19*/
+ MTK_PULL_PU_PD_TYPE, /*20*/
+ MTK_PULL_PU_PD_TYPE, /*21*/
+ MTK_PULL_PU_PD_TYPE, /*22*/
+ MTK_PULL_PU_PD_TYPE, /*23*/
+ MTK_PULL_PU_PD_TYPE, /*24*/
+ MTK_PULL_PU_PD_TYPE, /*25*/
+ MTK_PULL_PU_PD_TYPE, /*26*/
+ MTK_PULL_PU_PD_TYPE, /*27*/
+ MTK_PULL_PU_PD_TYPE, /*28*/
+ MTK_PULL_PU_PD_TYPE, /*29*/
+ MTK_PULL_PU_PD_TYPE, /*30*/
+ MTK_PULL_PU_PD_TYPE, /*31*/
+ MTK_PULL_PU_PD_TYPE, /*32*/
+ MTK_PULL_PU_PD_TYPE, /*33*/
+ MTK_PULL_PU_PD_TYPE, /*34*/
+ MTK_PULL_PU_PD_TYPE, /*35*/
+ MTK_PULL_PU_PD_TYPE, /*36*/
+ MTK_PULL_PU_PD_TYPE, /*37*/
+ MTK_PULL_PU_PD_TYPE, /*38*/
+ MTK_PULL_PU_PD_TYPE, /*39*/
+ MTK_PULL_PU_PD_TYPE, /*40*/
+ MTK_PULL_PU_PD_TYPE, /*41*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*42*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*43*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*44*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*45*/
+ MTK_PULL_PU_PD_TYPE, /*46*/
+ MTK_PULL_PU_PD_TYPE, /*47*/
+ MTK_PULL_PU_PD_TYPE, /*48*/
+ MTK_PULL_PU_PD_TYPE, /*49*/
+ MTK_PULL_PU_PD_TYPE, /*50*/
+ MTK_PULL_PU_PD_TYPE, /*51*/
+ MTK_PULL_PU_PD_TYPE, /*52*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*53*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*54*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*55*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*56*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*57*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*58*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*59*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*60*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*61*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*62*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*63*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*64*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*65*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*66*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*67*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*68*/
+ MTK_PULL_PU_PD_TYPE, /*69*/
+ MTK_PULL_PU_PD_TYPE, /*70*/
+ MTK_PULL_PU_PD_TYPE, /*71*/
+ MTK_PULL_PU_PD_TYPE, /*72*/
+ MTK_PULL_PU_PD_TYPE, /*73*/
+ MTK_PULL_PU_PD_TYPE, /*74*/
+ MTK_PULL_PU_PD_TYPE, /*75*/
+ MTK_PULL_PU_PD_TYPE, /*76*/
+ MTK_PULL_PU_PD_TYPE, /*77*/
+ MTK_PULL_PU_PD_TYPE, /*78*/
+ MTK_PULL_PU_PD_TYPE, /*79*/
+ MTK_PULL_PU_PD_TYPE, /*80*/
+ MTK_PULL_PU_PD_TYPE, /*81*/
+ MTK_PULL_PU_PD_TYPE, /*82*/
+ MTK_PULL_PU_PD_TYPE, /*83*/
+ MTK_PULL_PU_PD_TYPE, /*84*/
+ MTK_PULL_PU_PD_TYPE, /*85*/
+ MTK_PULL_PU_PD_TYPE, /*86*/
+ MTK_PULL_PU_PD_TYPE, /*87*/
+ MTK_PULL_PU_PD_TYPE, /*88*/
+ MTK_PULL_PU_PD_TYPE, /*89*/
+ MTK_PULL_PU_PD_TYPE, /*90*/
+ MTK_PULL_PU_PD_TYPE, /*91*/
+ MTK_PULL_PU_PD_TYPE, /*92*/
+ MTK_PULL_PU_PD_TYPE, /*93*/
+ MTK_PULL_PU_PD_TYPE, /*94*/
+ MTK_PULL_PU_PD_TYPE, /*95*/
+ MTK_PULL_PU_PD_TYPE, /*96*/
+ MTK_PULL_PU_PD_TYPE, /*97*/
+ MTK_PULL_PU_PD_TYPE, /*98*/
+ MTK_PULL_PU_PD_TYPE, /*99*/
+ MTK_PULL_PU_PD_TYPE, /*100*/
+ MTK_PULL_PU_PD_TYPE, /*101*/
+ MTK_PULL_PU_PD_TYPE, /*102*/
+ MTK_PULL_PU_PD_TYPE, /*103*/
+ MTK_PULL_PU_PD_TYPE, /*104*/
+ MTK_PULL_PU_PD_TYPE, /*105*/
+ MTK_PULL_PU_PD_TYPE, /*106*/
+ MTK_PULL_PU_PD_TYPE, /*107*/
+ MTK_PULL_PU_PD_TYPE, /*108*/
+ MTK_PULL_PU_PD_TYPE, /*109*/
+ MTK_PULL_PU_PD_TYPE, /*110*/
+ MTK_PULL_PU_PD_TYPE, /*111*/
+ MTK_PULL_PU_PD_TYPE, /*112*/
+ MTK_PULL_PU_PD_TYPE, /*113*/
+ MTK_PULL_PU_PD_TYPE, /*114*/
+ MTK_PULL_PU_PD_TYPE, /*115*/
+ MTK_PULL_PU_PD_TYPE, /*116*/
+ MTK_PULL_PU_PD_TYPE, /*117*/
+ MTK_PULL_PU_PD_TYPE, /*118*/
+ MTK_PULL_PU_PD_TYPE, /*119*/
+ MTK_PULL_PU_PD_TYPE, /*120*/
+ MTK_PULL_PU_PD_TYPE, /*121*/
+ MTK_PULL_PU_PD_TYPE, /*122*/
+ MTK_PULL_PU_PD_TYPE, /*123*/
+ MTK_PULL_PU_PD_TYPE, /*124*/
+ MTK_PULL_PU_PD_TYPE, /*125*/
+ MTK_PULL_PU_PD_TYPE, /*126*/
+ MTK_PULL_PU_PD_TYPE, /*127*/
+ MTK_PULL_PU_PD_TYPE, /*128*/
+ MTK_PULL_PU_PD_TYPE, /*129*/
+ MTK_PULL_PU_PD_TYPE, /*130*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*131*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*132*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*133*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*134*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*135*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*136*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*137*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*138*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*139*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*140*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*141*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*142*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*143*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*144*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*145*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*146*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*147*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*148*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*149*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*150*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*151*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*152*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*153*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*154*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*155*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*156*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*157*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*158*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*159*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*160*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*161*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*162*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*163*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*164*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*165*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*166*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*167*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*168*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*169*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*170*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*171*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*172*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*173*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*174*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*175*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*176*/
+};
+
+static const struct mtk_pin_reg_calc mt8188_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8188_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8188_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8188_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8188_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8188_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8188_pin_ies_range),
+ [PINCTRL_PIN_REG_TDSEL] = MTK_RANGE(mt8188_pin_tdsel_range),
+ [PINCTRL_PIN_REG_RDSEL] = MTK_RANGE(mt8188_pin_rdsel_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8188_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8188_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8188_pin_r1_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8188_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt8188_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8188_pin_drv_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8188_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt8188_pin_rsel_range),
+};
+
+static const char * const mt8188_pinctrl_register_base_name[] = {
+ "iocfg0", "iocfg_rm", "iocfg_lt", "iocfg_lm", "iocfg_rt",
+};
+
+static const struct mtk_eint_hw mt8188_eint_hw = {
+ .port_mask = 0xf,
+ .ports = 7,
+ .ap_num = 225,
+ .db_cnt = 32,
+};
+
+static const struct mtk_pin_soc mt8188_data = {
+ .reg_cal = mt8188_reg_cals,
+ .pins = mtk_pins_mt8188,
+ .npins = ARRAY_SIZE(mtk_pins_mt8188),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt8188),
+ .eint_hw = &mt8188_eint_hw,
+ .nfuncs = 8,
+ .gpio_m = 0,
+ .base_names = mt8188_pinctrl_register_base_name,
+ .nbase_names = ARRAY_SIZE(mt8188_pinctrl_register_base_name),
+ .pull_type = mt8188_pull_type,
+ .pin_rsel = mt8188_pin_rsel_val_range,
+ .npin_rsel = ARRAY_SIZE(mt8188_pin_rsel_val_range),
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+};
+
+static const struct of_device_id mt8188_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt8188-pinctrl", .data = &mt8188_data },
+ { }
+};
+
+static struct platform_driver mt8188_pinctrl_driver = {
+ .driver = {
+ .name = "mt8188-pinctrl",
+ .of_match_table = mt8188_pinctrl_of_match,
+ .pm = &mtk_paris_pinctrl_pm_ops
+ },
+ .probe = mtk_paris_pinctrl_probe,
+};
+
+static int __init mt8188_pinctrl_init(void)
+{
+ return platform_driver_register(&mt8188_pinctrl_driver);
+}
+
+arch_initcall(mt8188_pinctrl_init);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MediaTek MT8188 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8188.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8188.h
new file mode 100644
index 000000000000..a487323748e2
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8188.h
@@ -0,0 +1,2259 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Hui Liu <hui.liu@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_MT8188_H
+#define __PINCTRL_MTK_MT8188_H
+
+#include "pinctrl-paris.h"
+
+static const struct mtk_pin_desc mtk_pins_mt8188[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO0"),
+ MTK_FUNCTION(1, "B0_TP_GPIO0_AO"),
+ MTK_FUNCTION(2, "O_SPIM5_CSB"),
+ MTK_FUNCTION(3, "O_UTXD1"),
+ MTK_FUNCTION(4, "O_DMIC3_CLK"),
+ MTK_FUNCTION(5, "B0_I2SIN_MCK"),
+ MTK_FUNCTION(6, "O_I2SO2_MCK"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A0")
+ ),
+
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO1"),
+ MTK_FUNCTION(1, "B0_TP_GPIO1_AO"),
+ MTK_FUNCTION(2, "O_SPIM5_CLK"),
+ MTK_FUNCTION(3, "I1_URXD1"),
+ MTK_FUNCTION(4, "I0_DMIC3_DAT"),
+ MTK_FUNCTION(5, "B0_I2SIN_BCK"),
+ MTK_FUNCTION(6, "B0_I2SO2_BCK"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A1")
+ ),
+
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO2"),
+ MTK_FUNCTION(1, "B0_TP_GPIO2_AO"),
+ MTK_FUNCTION(2, "B0_SPIM5_MOSI"),
+ MTK_FUNCTION(3, "O_URTS1"),
+ MTK_FUNCTION(4, "I0_DMIC3_DAT_R"),
+ MTK_FUNCTION(5, "B0_I2SIN_WS"),
+ MTK_FUNCTION(6, "B0_I2SO2_WS"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A2")
+ ),
+
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO3"),
+ MTK_FUNCTION(1, "B0_TP_GPIO3_AO"),
+ MTK_FUNCTION(2, "B0_SPIM5_MISO"),
+ MTK_FUNCTION(3, "I1_UCTS1"),
+ MTK_FUNCTION(4, "O_DMIC4_CLK"),
+ MTK_FUNCTION(5, "I0_I2SIN_D0"),
+ MTK_FUNCTION(6, "O_I2SO2_D0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A3")
+ ),
+
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO4"),
+ MTK_FUNCTION(1, "B0_TP_GPIO4_AO"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN2"),
+ MTK_FUNCTION(3, "O_I2SO1_MCK"),
+ MTK_FUNCTION(4, "I0_DMIC4_DAT"),
+ MTK_FUNCTION(5, "I0_I2SIN_D1"),
+ MTK_FUNCTION(6, "O_I2SO2_D1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A4")
+ ),
+
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO5"),
+ MTK_FUNCTION(1, "B0_TP_GPIO5_AO"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN1"),
+ MTK_FUNCTION(3, "O_I2SO1_BCK"),
+ MTK_FUNCTION(4, "I0_DMIC4_DAT_R"),
+ MTK_FUNCTION(5, "I0_I2SIN_D2"),
+ MTK_FUNCTION(6, "O_I2SO2_D2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A5")
+ ),
+
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO6"),
+ MTK_FUNCTION(1, "B0_TP_GPIO6_AO"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN0"),
+ MTK_FUNCTION(3, "O_I2SO1_WS"),
+ MTK_FUNCTION(4, "O_DMIC1_CLK"),
+ MTK_FUNCTION(5, "I0_I2SIN_D3"),
+ MTK_FUNCTION(6, "O_I2SO2_D3"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO0")
+ ),
+
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO7"),
+ MTK_FUNCTION(1, "B0_TP_GPIO7_AO"),
+ MTK_FUNCTION(2, "O_SPIM3_CSB"),
+ MTK_FUNCTION(3, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(4, "I0_DMIC1_DAT"),
+ MTK_FUNCTION(5, "O_CMVREF0"),
+ MTK_FUNCTION(6, "O_CLKM0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A6")
+ ),
+
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO8"),
+ MTK_FUNCTION(1, "B0_TP_GPIO0_AO"),
+ MTK_FUNCTION(2, "O_SPIM3_CLK"),
+ MTK_FUNCTION(3, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(4, "I0_DMIC1_DAT_R"),
+ MTK_FUNCTION(5, "O_CMVREF1"),
+ MTK_FUNCTION(6, "O_CLKM1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A7")
+ ),
+
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO9"),
+ MTK_FUNCTION(1, "B0_TP_GPIO1_AO"),
+ MTK_FUNCTION(2, "B0_SPIM3_MOSI"),
+ MTK_FUNCTION(3, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(4, "O_DMIC2_CLK"),
+ MTK_FUNCTION(5, "O_CMFLASH0"),
+ MTK_FUNCTION(6, "O_PWM_0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A8")
+ ),
+
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO10"),
+ MTK_FUNCTION(1, "B0_TP_GPIO2_AO"),
+ MTK_FUNCTION(2, "B0_SPIM3_MISO"),
+ MTK_FUNCTION(3, "I0_TDMIN_DI"),
+ MTK_FUNCTION(4, "I0_DMIC2_DAT"),
+ MTK_FUNCTION(5, "O_CMFLASH1"),
+ MTK_FUNCTION(6, "O_PWM_1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A9")
+ ),
+
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO11"),
+ MTK_FUNCTION(1, "B0_TP_GPIO3_AO"),
+ MTK_FUNCTION(2, "O_SPDIF_OUT"),
+ MTK_FUNCTION(3, "O_I2SO1_D0"),
+ MTK_FUNCTION(4, "I0_DMIC2_DAT_R"),
+ MTK_FUNCTION(5, "I0_DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(6, "O_CMVREF6"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A10")
+ ),
+
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO12"),
+ MTK_FUNCTION(1, "B0_TP_GPIO4_AO"),
+ MTK_FUNCTION(2, "O_SPIM4_CSB"),
+ MTK_FUNCTION(3, "B1_JTMS_SEL3"),
+ MTK_FUNCTION(4, "B1_APU_JTAG_TMS"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TMS"),
+ MTK_FUNCTION(6, "I0_IPU_JTAG_TMS"),
+ MTK_FUNCTION(7, "I0_HDMITX20_HTPLG")
+ ),
+
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO13"),
+ MTK_FUNCTION(1, "B0_TP_GPIO5_AO"),
+ MTK_FUNCTION(2, "O_SPIM4_CLK"),
+ MTK_FUNCTION(3, "I0_JTCK_SEL3"),
+ MTK_FUNCTION(4, "I0_APU_JTAG_TCK"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TCK"),
+ MTK_FUNCTION(6, "I0_IPU_JTAG_TCK"),
+ MTK_FUNCTION(7, "B1_HDMITX20_CEC")
+ ),
+
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO14"),
+ MTK_FUNCTION(1, "B0_TP_GPIO6_AO"),
+ MTK_FUNCTION(2, "B0_SPIM4_MOSI"),
+ MTK_FUNCTION(3, "I1_JTDI_SEL3"),
+ MTK_FUNCTION(4, "I1_APU_JTAG_TDI"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TDI"),
+ MTK_FUNCTION(6, "I0_IPU_JTAG_TDI"),
+ MTK_FUNCTION(7, "B1_HDMITX20_SCL")
+ ),
+
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO15"),
+ MTK_FUNCTION(1, "B0_TP_GPIO7_AO"),
+ MTK_FUNCTION(2, "B0_SPIM4_MISO"),
+ MTK_FUNCTION(3, "O_JTDO_SEL3"),
+ MTK_FUNCTION(4, "O_APU_JTAG_TDO"),
+ MTK_FUNCTION(5, "O_VPU_UDI_TDO"),
+ MTK_FUNCTION(6, "O_IPU_JTAG_TDO"),
+ MTK_FUNCTION(7, "B1_HDMITX20_SDA")
+ ),
+
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO16"),
+ MTK_FUNCTION(1, "B0_TP_GPIO0_AO"),
+ MTK_FUNCTION(2, "O_UTXD3"),
+ MTK_FUNCTION(3, "I1_JTRSTn_SEL3"),
+ MTK_FUNCTION(4, "I0_APU_JTAG_TRST"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_NTRST"),
+ MTK_FUNCTION(6, "I0_IPU_JTAG_TRST"),
+ MTK_FUNCTION(7, "O_HDMITX20_PWR5V")
+ ),
+
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO17"),
+ MTK_FUNCTION(1, "B0_TP_GPIO1_AO"),
+ MTK_FUNCTION(2, "I1_URXD3"),
+ MTK_FUNCTION(3, "O_CMFLASH2"),
+ MTK_FUNCTION(4, "I0_EDP_TX_HPD"),
+ MTK_FUNCTION(5, "I0_DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(6, "O_CMVREF7"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO1")
+ ),
+
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO18"),
+ MTK_FUNCTION(1, "B0_TP_GPIO2_AO"),
+ MTK_FUNCTION(2, "O_CMFLASH0"),
+ MTK_FUNCTION(3, "O_CMVREF4"),
+ MTK_FUNCTION(4, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(5, "O_UTXD1"),
+ MTK_FUNCTION(6, "O_TP_UTXD1_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A11")
+ ),
+
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO19"),
+ MTK_FUNCTION(1, "B0_TP_GPIO3_AO"),
+ MTK_FUNCTION(2, "O_CMFLASH1"),
+ MTK_FUNCTION(3, "O_CMVREF5"),
+ MTK_FUNCTION(4, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(5, "I1_URXD1"),
+ MTK_FUNCTION(6, "I1_TP_URXD1_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A12")
+ ),
+
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO20"),
+ MTK_FUNCTION(1, "B0_TP_GPIO4_AO"),
+ MTK_FUNCTION(2, "O_CMFLASH2"),
+ MTK_FUNCTION(3, "O_CLKM2"),
+ MTK_FUNCTION(4, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(5, "O_URTS1"),
+ MTK_FUNCTION(6, "O_TP_URTS1_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A13")
+ ),
+
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO21"),
+ MTK_FUNCTION(1, "B0_TP_GPIO5_AO"),
+ MTK_FUNCTION(2, "O_CMFLASH3"),
+ MTK_FUNCTION(3, "O_CLKM3"),
+ MTK_FUNCTION(4, "I0_TDMIN_DI"),
+ MTK_FUNCTION(5, "I1_UCTS1"),
+ MTK_FUNCTION(6, "I1_TP_UCTS1_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A14")
+ ),
+
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO22"),
+ MTK_FUNCTION(1, "O_CMMCLK0"),
+ MTK_FUNCTION(5, "B0_TP_GPIO6_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A15")
+ ),
+
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO23"),
+ MTK_FUNCTION(1, "O_CMMCLK1"),
+ MTK_FUNCTION(3, "O_PWM_2"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SCL"),
+ MTK_FUNCTION(5, "B0_TP_GPIO7_AO"),
+ MTK_FUNCTION(6, "I0_DP_TX_HPD"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A16")
+ ),
+
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO24"),
+ MTK_FUNCTION(1, "O_CMMCLK2"),
+ MTK_FUNCTION(3, "O_PWM_3"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SDA"),
+ MTK_FUNCTION(5, "I0_DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(6, "I0_EDP_TX_HPD"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO2")
+ ),
+
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO25"),
+ MTK_FUNCTION(1, "O_LCM_RST"),
+ MTK_FUNCTION(2, "O_LCM1_RST"),
+ MTK_FUNCTION(3, "I0_DP_TX_HPD")
+ ),
+
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO26"),
+ MTK_FUNCTION(1, "I0_DSI_TE"),
+ MTK_FUNCTION(2, "I0_DSI1_TE"),
+ MTK_FUNCTION(3, "I0_EDP_TX_HPD")
+ ),
+
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO27"),
+ MTK_FUNCTION(1, "O_LCM1_RST"),
+ MTK_FUNCTION(2, "O_LCM_RST"),
+ MTK_FUNCTION(3, "I0_DP_TX_HPD"),
+ MTK_FUNCTION(4, "O_CMVREF2"),
+ MTK_FUNCTION(5, "O_mbistwriteen_trigger"),
+ MTK_FUNCTION(6, "O_PWM_2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A17")
+ ),
+
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO28"),
+ MTK_FUNCTION(1, "I0_DSI1_TE"),
+ MTK_FUNCTION(2, "I0_DSI_TE"),
+ MTK_FUNCTION(3, "I0_EDP_TX_HPD"),
+ MTK_FUNCTION(4, "O_CMVREF3"),
+ MTK_FUNCTION(5, "O_mbistreaden_trigger"),
+ MTK_FUNCTION(6, "O_PWM_3"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A18")
+ ),
+
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO29"),
+ MTK_FUNCTION(1, "O_DISP_PWM0"),
+ MTK_FUNCTION(2, "O_DISP_PWM1")
+ ),
+
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO30"),
+ MTK_FUNCTION(1, "O_DISP_PWM1"),
+ MTK_FUNCTION(2, "O_DISP_PWM0"),
+ MTK_FUNCTION(3, "O_CMFLASH3"),
+ MTK_FUNCTION(4, "O_PWM_1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A19")
+ ),
+
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO31"),
+ MTK_FUNCTION(1, "O_UTXD0"),
+ MTK_FUNCTION(2, "O_TP_UTXD1_AO"),
+ MTK_FUNCTION(3, "O_ADSP_UTXD0"),
+ MTK_FUNCTION(4, "O_TP_UTXD2_AO"),
+ MTK_FUNCTION(5, "O_MD32_0_TXD"),
+ MTK_FUNCTION(6, "O_MD32_1_TXD"),
+ MTK_FUNCTION(7, "O_SSPM_UTXD_AO")
+ ),
+
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO32"),
+ MTK_FUNCTION(1, "I1_URXD0"),
+ MTK_FUNCTION(2, "I1_TP_URXD1_AO"),
+ MTK_FUNCTION(3, "I1_ADSP_URXD0"),
+ MTK_FUNCTION(4, "I1_TP_URXD2_AO"),
+ MTK_FUNCTION(5, "I1_MD32_0_RXD"),
+ MTK_FUNCTION(6, "I1_MD32_1_RXD"),
+ MTK_FUNCTION(7, "I1_SSPM_URXD_AO")
+ ),
+
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO33"),
+ MTK_FUNCTION(1, "O_UTXD1"),
+ MTK_FUNCTION(2, "O_URTS2"),
+ MTK_FUNCTION(3, "O_ADSP_UTXD0"),
+ MTK_FUNCTION(4, "O_TP_UTXD1_AO"),
+ MTK_FUNCTION(5, "O_mbistwriteen_trigger"),
+ MTK_FUNCTION(6, "O_MD32_0_TXD"),
+ MTK_FUNCTION(7, "O_SSPM_UTXD_AO")
+ ),
+
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO34"),
+ MTK_FUNCTION(1, "I1_URXD1"),
+ MTK_FUNCTION(2, "I1_UCTS2"),
+ MTK_FUNCTION(3, "I1_ADSP_URXD0"),
+ MTK_FUNCTION(4, "I1_TP_URXD1_AO"),
+ MTK_FUNCTION(5, "O_mbistreaden_trigger"),
+ MTK_FUNCTION(6, "I1_MD32_0_RXD"),
+ MTK_FUNCTION(7, "I1_SSPM_URXD_AO")
+ ),
+
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO35"),
+ MTK_FUNCTION(1, "O_UTXD2"),
+ MTK_FUNCTION(2, "O_URTS1"),
+ MTK_FUNCTION(3, "O_ADSP_UTXD0"),
+ MTK_FUNCTION(4, "O_TP_URTS1_AO"),
+ MTK_FUNCTION(5, "O_TP_UTXD2_AO"),
+ MTK_FUNCTION(6, "O_MD32_1_TXD"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A20")
+ ),
+
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO36"),
+ MTK_FUNCTION(1, "I1_URXD2"),
+ MTK_FUNCTION(2, "I1_UCTS1"),
+ MTK_FUNCTION(3, "I1_ADSP_URXD0"),
+ MTK_FUNCTION(4, "I1_TP_UCTS1_AO"),
+ MTK_FUNCTION(5, "I1_TP_URXD2_AO"),
+ MTK_FUNCTION(6, "I1_MD32_1_RXD"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A21")
+ ),
+
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO37"),
+ MTK_FUNCTION(1, "B1_JTMS_SEL1"),
+ MTK_FUNCTION(2, "I0_UDI_TMS"),
+ MTK_FUNCTION(3, "I1_SPM_JTAG_TMS"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TMS"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TMS"),
+ MTK_FUNCTION(7, "I1_MCUPM_JTAG_TMS")
+ ),
+
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO38"),
+ MTK_FUNCTION(1, "I0_JTCK_SEL1"),
+ MTK_FUNCTION(2, "I0_UDI_TCK"),
+ MTK_FUNCTION(3, "I1_SPM_JTAG_TCK"),
+ MTK_FUNCTION(4, "I0_ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TCK"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TCK"),
+ MTK_FUNCTION(7, "I1_MCUPM_JTAG_TCK")
+ ),
+
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO39"),
+ MTK_FUNCTION(1, "I1_JTDI_SEL1"),
+ MTK_FUNCTION(2, "I0_UDI_TDI"),
+ MTK_FUNCTION(3, "I1_SPM_JTAG_TDI"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TDI"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TDI"),
+ MTK_FUNCTION(7, "I1_MCUPM_JTAG_TDI")
+ ),
+
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO40"),
+ MTK_FUNCTION(1, "O_JTDO_SEL1"),
+ MTK_FUNCTION(2, "O_UDI_TDO"),
+ MTK_FUNCTION(3, "O_SPM_JTAG_TDO"),
+ MTK_FUNCTION(4, "O_ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(5, "O_SCP_JTAG0_TDO"),
+ MTK_FUNCTION(6, "O_CCU0_JTAG_TDO"),
+ MTK_FUNCTION(7, "O_MCUPM_JTAG_TDO")
+ ),
+
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO41"),
+ MTK_FUNCTION(1, "I1_JTRSTn_SEL1"),
+ MTK_FUNCTION(2, "I0_UDI_NTRST"),
+ MTK_FUNCTION(3, "I0_SPM_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(5, "I0_SCP_JTAG0_TRSTN"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TRST"),
+ MTK_FUNCTION(7, "I0_MCUPM_JTAG_TRSTN")
+ ),
+
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO42"),
+ MTK_FUNCTION(1, "B1_KPCOL0")
+ ),
+
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO43"),
+ MTK_FUNCTION(1, "B1_KPCOL1"),
+ MTK_FUNCTION(2, "I0_DP_TX_HPD"),
+ MTK_FUNCTION(3, "O_CMFLASH2"),
+ MTK_FUNCTION(4, "I0_DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(7, "O_mbistwriteen_trigger")
+ ),
+
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO44"),
+ MTK_FUNCTION(1, "B1_KPROW0")
+ ),
+
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO45"),
+ MTK_FUNCTION(1, "B1_KPROW1"),
+ MTK_FUNCTION(2, "I0_EDP_TX_HPD"),
+ MTK_FUNCTION(3, "O_CMFLASH3"),
+ MTK_FUNCTION(4, "B0_I2SIN_MCK"),
+ MTK_FUNCTION(7, "O_mbistreaden_trigger")
+ ),
+
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO46"),
+ MTK_FUNCTION(1, "I0_DP_TX_HPD"),
+ MTK_FUNCTION(2, "O_PWM_0"),
+ MTK_FUNCTION(3, "I0_VBUSVALID_2P"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A22")
+ ),
+
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO47"),
+ MTK_FUNCTION(1, "I1_WAKEN"),
+ MTK_FUNCTION(6, "O_GDU_TROOPS_DET0")
+ ),
+
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO48"),
+ MTK_FUNCTION(1, "O_PERSTN"),
+ MTK_FUNCTION(6, "O_GDU_TROOPS_DET1")
+ ),
+
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO49"),
+ MTK_FUNCTION(1, "B1_CLKREQN"),
+ MTK_FUNCTION(6, "O_GDU_TROOPS_DET2")
+ ),
+
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO50"),
+ MTK_FUNCTION(1, "O_HDMITX20_PWR5V"),
+ MTK_FUNCTION(3, "I1_IDDIG_1P"),
+ MTK_FUNCTION(4, "I1_SCP_JTAG1_TMS"),
+ MTK_FUNCTION(5, "I1_SSPM_JTAG_TMS"),
+ MTK_FUNCTION(6, "I1_MD32_0_JTAG_TMS"),
+ MTK_FUNCTION(7, "I1_MD32_1_JTAG_TMS")
+ ),
+
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO51"),
+ MTK_FUNCTION(1, "I0_HDMITX20_HTPLG"),
+ MTK_FUNCTION(2, "I0_EDP_TX_HPD"),
+ MTK_FUNCTION(3, "O_USB_DRVVBUS_1P"),
+ MTK_FUNCTION(4, "I1_SCP_JTAG1_TCK"),
+ MTK_FUNCTION(5, "I1_SSPM_JTAG_TCK"),
+ MTK_FUNCTION(6, "I1_MD32_0_JTAG_TCK"),
+ MTK_FUNCTION(7, "I1_MD32_1_JTAG_TCK")
+ ),
+
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO52"),
+ MTK_FUNCTION(1, "B1_HDMITX20_CEC"),
+ MTK_FUNCTION(3, "I0_VBUSVALID_1P"),
+ MTK_FUNCTION(4, "I1_SCP_JTAG1_TDI"),
+ MTK_FUNCTION(5, "I1_SSPM_JTAG_TDI"),
+ MTK_FUNCTION(6, "I1_MD32_0_JTAG_TDI"),
+ MTK_FUNCTION(7, "I1_MD32_1_JTAG_TDI")
+ ),
+
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO53"),
+ MTK_FUNCTION(1, "B1_HDMITX20_SCL"),
+ MTK_FUNCTION(3, "I1_IDDIG_2P"),
+ MTK_FUNCTION(4, "O_SCP_JTAG1_TDO"),
+ MTK_FUNCTION(5, "O_SSPM_JTAG_TDO"),
+ MTK_FUNCTION(6, "O_MD32_0_JTAG_TDO"),
+ MTK_FUNCTION(7, "O_MD32_1_JTAG_TDO")
+ ),
+
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO54"),
+ MTK_FUNCTION(1, "B1_HDMITX20_SDA"),
+ MTK_FUNCTION(3, "O_USB_DRVVBUS_2P"),
+ MTK_FUNCTION(4, "I0_SCP_JTAG1_TRSTN"),
+ MTK_FUNCTION(5, "I0_SSPM_JTAG_TRSTN"),
+ MTK_FUNCTION(6, "I1_MD32_0_JTAG_TRST"),
+ MTK_FUNCTION(7, "I1_MD32_1_JTAG_TRST")
+ ),
+
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO55"),
+ MTK_FUNCTION(1, "B1_SCL0"),
+ MTK_FUNCTION(2, "B1_SCP_SCL0"),
+ MTK_FUNCTION(3, "B1_SCP_SCL1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SCL")
+ ),
+
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO56"),
+ MTK_FUNCTION(1, "B1_SDA0"),
+ MTK_FUNCTION(2, "B1_SCP_SDA0"),
+ MTK_FUNCTION(3, "B1_SCP_SDA1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SDA")
+ ),
+
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO57"),
+ MTK_FUNCTION(1, "B1_SCL1")
+ ),
+
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO58"),
+ MTK_FUNCTION(1, "B1_SDA1")
+ ),
+
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO59"),
+ MTK_FUNCTION(1, "B1_SCL2"),
+ MTK_FUNCTION(2, "B1_SCP_SCL0"),
+ MTK_FUNCTION(3, "B1_SCP_SCL1")
+ ),
+
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO60"),
+ MTK_FUNCTION(1, "B1_SDA2"),
+ MTK_FUNCTION(2, "B1_SCP_SDA0"),
+ MTK_FUNCTION(3, "B1_SCP_SDA1")
+ ),
+
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO61"),
+ MTK_FUNCTION(1, "B1_SCL3"),
+ MTK_FUNCTION(2, "B1_SCP_SCL0"),
+ MTK_FUNCTION(3, "B1_SCP_SCL1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SCL")
+ ),
+
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO62"),
+ MTK_FUNCTION(1, "B1_SDA3"),
+ MTK_FUNCTION(2, "B1_SCP_SDA0"),
+ MTK_FUNCTION(3, "B1_SCP_SDA1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SDA")
+ ),
+
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO63"),
+ MTK_FUNCTION(1, "B1_SCL4")
+ ),
+
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO64"),
+ MTK_FUNCTION(1, "B1_SDA4")
+ ),
+
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO65"),
+ MTK_FUNCTION(1, "B1_SCL5"),
+ MTK_FUNCTION(2, "B1_SCP_SCL0"),
+ MTK_FUNCTION(3, "B1_SCP_SCL1")
+ ),
+
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO66"),
+ MTK_FUNCTION(1, "B1_SDA5"),
+ MTK_FUNCTION(2, "B1_SCP_SDA0"),
+ MTK_FUNCTION(3, "B1_SCP_SDA1")
+ ),
+
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO67"),
+ MTK_FUNCTION(1, "B1_SCL6"),
+ MTK_FUNCTION(2, "B1_SCP_SCL0"),
+ MTK_FUNCTION(3, "B1_SCP_SCL1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SCL")
+ ),
+
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO68"),
+ MTK_FUNCTION(1, "B1_SDA6"),
+ MTK_FUNCTION(2, "B1_SCP_SDA0"),
+ MTK_FUNCTION(3, "B1_SCP_SDA1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SDA")
+ ),
+
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO69"),
+ MTK_FUNCTION(1, "O_SPIM0_CSB"),
+ MTK_FUNCTION(2, "O_SCP_SPI0_CS"),
+ MTK_FUNCTION(3, "O_DMIC3_CLK"),
+ MTK_FUNCTION(4, "B0_MD32_1_GPIO0"),
+ MTK_FUNCTION(5, "O_CMVREF0"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP0_0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A23")
+ ),
+
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO70"),
+ MTK_FUNCTION(1, "O_SPIM0_CLK"),
+ MTK_FUNCTION(2, "O_SCP_SPI0_CK"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT"),
+ MTK_FUNCTION(4, "B0_MD32_1_GPIO1"),
+ MTK_FUNCTION(5, "O_CMVREF1"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP0_1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A24")
+ ),
+
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO71"),
+ MTK_FUNCTION(1, "B0_SPIM0_MOSI"),
+ MTK_FUNCTION(2, "O_SCP_SPI0_MO"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT_R"),
+ MTK_FUNCTION(4, "B0_MD32_1_GPIO2"),
+ MTK_FUNCTION(5, "O_CMVREF2"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP0_2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A25")
+ ),
+
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO72"),
+ MTK_FUNCTION(1, "B0_SPIM0_MISO"),
+ MTK_FUNCTION(2, "I0_SCP_SPI0_MI"),
+ MTK_FUNCTION(3, "O_DMIC4_CLK"),
+ MTK_FUNCTION(5, "O_CMVREF3"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP1_0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A26")
+ ),
+
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO73"),
+ MTK_FUNCTION(1, "B0_SPIM0_MIO2"),
+ MTK_FUNCTION(2, "O_UTXD3"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT"),
+ MTK_FUNCTION(4, "O_CLKM0"),
+ MTK_FUNCTION(5, "O_CMVREF4"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP1_1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A27")
+ ),
+
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO74"),
+ MTK_FUNCTION(1, "B0_SPIM0_MIO3"),
+ MTK_FUNCTION(2, "I1_URXD3"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT_R"),
+ MTK_FUNCTION(4, "O_CLKM1"),
+ MTK_FUNCTION(5, "O_CMVREF5"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP1_2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A28")
+ ),
+
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO75"),
+ MTK_FUNCTION(1, "O_SPIM1_CSB"),
+ MTK_FUNCTION(2, "O_SCP_SPI1_A_CS"),
+ MTK_FUNCTION(3, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(4, "B1_SCP_SCL0"),
+ MTK_FUNCTION(5, "O_CMVREF6"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP2_0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A29")
+ ),
+
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO76"),
+ MTK_FUNCTION(1, "O_SPIM1_CLK"),
+ MTK_FUNCTION(2, "O_SCP_SPI1_A_CK"),
+ MTK_FUNCTION(3, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(4, "B1_SCP_SDA0"),
+ MTK_FUNCTION(5, "O_CMVREF7"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP2_1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A30")
+ ),
+
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO77"),
+ MTK_FUNCTION(1, "B0_SPIM1_MOSI"),
+ MTK_FUNCTION(2, "O_SCP_SPI1_A_MO"),
+ MTK_FUNCTION(3, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(4, "B1_SCP_SCL1"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP2_2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A31")
+ ),
+
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO78"),
+ MTK_FUNCTION(1, "B0_SPIM1_MISO"),
+ MTK_FUNCTION(2, "I0_SCP_SPI1_A_MI"),
+ MTK_FUNCTION(3, "I0_TDMIN_DI"),
+ MTK_FUNCTION(4, "B1_SCP_SDA1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A32")
+ ),
+
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO79"),
+ MTK_FUNCTION(1, "O_SPIM2_CSB"),
+ MTK_FUNCTION(2, "O_SCP_SPI2_CS"),
+ MTK_FUNCTION(3, "O_I2SO1_MCK"),
+ MTK_FUNCTION(4, "O_UTXD2"),
+ MTK_FUNCTION(5, "O_TP_UTXD2_AO"),
+ MTK_FUNCTION(6, "B0_PCM_SYNC"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B0")
+ ),
+
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO80"),
+ MTK_FUNCTION(1, "O_SPIM2_CLK"),
+ MTK_FUNCTION(2, "O_SCP_SPI2_CK"),
+ MTK_FUNCTION(3, "O_I2SO1_BCK"),
+ MTK_FUNCTION(4, "I1_URXD2"),
+ MTK_FUNCTION(5, "I1_TP_URXD2_AO"),
+ MTK_FUNCTION(6, "B0_PCM_CLK"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B1")
+ ),
+
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO81"),
+ MTK_FUNCTION(1, "B0_SPIM2_MOSI"),
+ MTK_FUNCTION(2, "O_SCP_SPI2_MO"),
+ MTK_FUNCTION(3, "O_I2SO1_WS"),
+ MTK_FUNCTION(4, "O_URTS2"),
+ MTK_FUNCTION(5, "O_TP_URTS2_AO"),
+ MTK_FUNCTION(6, "O_PCM_DO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B2")
+ ),
+
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO82"),
+ MTK_FUNCTION(1, "B0_SPIM2_MISO"),
+ MTK_FUNCTION(2, "I0_SCP_SPI2_MI"),
+ MTK_FUNCTION(3, "O_I2SO1_D0"),
+ MTK_FUNCTION(4, "I1_UCTS2"),
+ MTK_FUNCTION(5, "I1_TP_UCTS2_AO"),
+ MTK_FUNCTION(6, "I0_PCM_DI"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B3")
+ ),
+
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO83"),
+ MTK_FUNCTION(1, "I1_IDDIG")
+ ),
+
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO84"),
+ MTK_FUNCTION(1, "O_USB_DRVVBUS")
+ ),
+
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO85"),
+ MTK_FUNCTION(1, "I0_VBUSVALID")
+ ),
+
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO86"),
+ MTK_FUNCTION(1, "I1_IDDIG_1P"),
+ MTK_FUNCTION(2, "O_UTXD1"),
+ MTK_FUNCTION(3, "O_URTS2"),
+ MTK_FUNCTION(4, "O_PWM_2"),
+ MTK_FUNCTION(5, "B0_TP_GPIO4_AO"),
+ MTK_FUNCTION(6, "O_AUXIF_ST0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B4")
+ ),
+
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO87"),
+ MTK_FUNCTION(1, "O_USB_DRVVBUS_1P"),
+ MTK_FUNCTION(2, "I1_URXD1"),
+ MTK_FUNCTION(3, "I1_UCTS2"),
+ MTK_FUNCTION(4, "O_PWM_3"),
+ MTK_FUNCTION(5, "B0_TP_GPIO5_AO"),
+ MTK_FUNCTION(6, "O_AUXIF_CLK0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B5")
+ ),
+
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO88"),
+ MTK_FUNCTION(1, "I0_VBUSVALID_1P"),
+ MTK_FUNCTION(2, "O_UTXD2"),
+ MTK_FUNCTION(3, "O_URTS1"),
+ MTK_FUNCTION(4, "O_CLKM2"),
+ MTK_FUNCTION(5, "B0_TP_GPIO6_AO"),
+ MTK_FUNCTION(6, "O_AUXIF_ST1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B6")
+ ),
+
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO89"),
+ MTK_FUNCTION(1, "I1_IDDIG_2P"),
+ MTK_FUNCTION(2, "I1_URXD2"),
+ MTK_FUNCTION(3, "I1_UCTS1"),
+ MTK_FUNCTION(4, "O_CLKM3"),
+ MTK_FUNCTION(5, "B0_TP_GPIO7_AO"),
+ MTK_FUNCTION(6, "O_AUXIF_CLK1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B7")
+ ),
+
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO90"),
+ MTK_FUNCTION(1, "O_USB_DRVVBUS_2P"),
+ MTK_FUNCTION(2, "O_UTXD3"),
+ MTK_FUNCTION(3, "O_ADSP_UTXD0"),
+ MTK_FUNCTION(4, "O_SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "O_MD32_0_TXD"),
+ MTK_FUNCTION(6, "O_MD32_1_TXD"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B8")
+ ),
+
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO91"),
+ MTK_FUNCTION(1, "I0_VBUSVALID_2P"),
+ MTK_FUNCTION(2, "I1_URXD3"),
+ MTK_FUNCTION(3, "I1_ADSP_URXD0"),
+ MTK_FUNCTION(4, "I1_SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "I1_MD32_0_RXD"),
+ MTK_FUNCTION(6, "I1_MD32_1_RXD"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B9")
+ ),
+
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO92"),
+ MTK_FUNCTION(1, "O_PWRAP_SPI0_CSN")
+ ),
+
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO93"),
+ MTK_FUNCTION(1, "O_PWRAP_SPI0_CK")
+ ),
+
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO94"),
+ MTK_FUNCTION(1, "B0_PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "B0_PWRAP_SPI0_MI")
+ ),
+
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO95"),
+ MTK_FUNCTION(1, "B0_PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "B0_PWRAP_SPI0_MO")
+ ),
+
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO96"),
+ MTK_FUNCTION(1, "O_SRCLKENA0")
+ ),
+
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO97"),
+ MTK_FUNCTION(1, "O_SRCLKENA1")
+ ),
+
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO98"),
+ MTK_FUNCTION(1, "O_SCP_VREQ_VAO"),
+ MTK_FUNCTION(2, "I0_DVFSRC_EXT_REQ")
+ ),
+
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO99"),
+ MTK_FUNCTION(1, "I0_RTC32K_CK")
+ ),
+
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO100"),
+ MTK_FUNCTION(1, "O_WATCHDOG")
+ ),
+
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO101"),
+ MTK_FUNCTION(1, "O_AUD_CLK_MOSI"),
+ MTK_FUNCTION(2, "O_I2SO1_MCK"),
+ MTK_FUNCTION(3, "B0_I2SIN_BCK")
+ ),
+
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO102"),
+ MTK_FUNCTION(1, "O_AUD_SYNC_MOSI"),
+ MTK_FUNCTION(2, "O_I2SO1_BCK"),
+ MTK_FUNCTION(3, "B0_I2SIN_WS")
+ ),
+
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO103"),
+ MTK_FUNCTION(1, "O_AUD_DAT_MOSI0"),
+ MTK_FUNCTION(2, "O_I2SO1_WS"),
+ MTK_FUNCTION(3, "I0_I2SIN_D0")
+ ),
+
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO104"),
+ MTK_FUNCTION(1, "O_AUD_DAT_MOSI1"),
+ MTK_FUNCTION(2, "O_I2SO1_D0"),
+ MTK_FUNCTION(3, "I0_I2SIN_D1")
+ ),
+
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO105"),
+ MTK_FUNCTION(1, "I0_AUD_DAT_MISO0"),
+ MTK_FUNCTION(2, "I0_VOW_DAT_MISO"),
+ MTK_FUNCTION(3, "I0_I2SIN_D2")
+ ),
+
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO106"),
+ MTK_FUNCTION(1, "I0_AUD_DAT_MISO1"),
+ MTK_FUNCTION(2, "I0_VOW_CLK_MISO"),
+ MTK_FUNCTION(3, "I0_I2SIN_D3")
+ ),
+
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO107"),
+ MTK_FUNCTION(1, "B0_I2SIN_MCK"),
+ MTK_FUNCTION(2, "I0_SPLIN_MCK"),
+ MTK_FUNCTION(3, "I0_SPDIF_IN0"),
+ MTK_FUNCTION(4, "O_CMVREF4"),
+ MTK_FUNCTION(5, "O_AUXIF_ST0"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR0")
+ ),
+
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO108"),
+ MTK_FUNCTION(1, "B0_I2SIN_BCK"),
+ MTK_FUNCTION(2, "I0_SPLIN_LRCK"),
+ MTK_FUNCTION(3, "O_DMIC4_CLK"),
+ MTK_FUNCTION(4, "O_CMVREF5"),
+ MTK_FUNCTION(5, "O_AUXIF_CLK0"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B10")
+ ),
+
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO109"),
+ MTK_FUNCTION(1, "B0_I2SIN_WS"),
+ MTK_FUNCTION(2, "I0_SPLIN_BCK"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT"),
+ MTK_FUNCTION(4, "O_CMVREF6"),
+ MTK_FUNCTION(5, "O_AUXIF_ST1"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B11")
+ ),
+
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO110"),
+ MTK_FUNCTION(1, "I0_I2SIN_D0"),
+ MTK_FUNCTION(2, "I0_SPLIN_D0"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT_R"),
+ MTK_FUNCTION(4, "O_CMVREF7"),
+ MTK_FUNCTION(5, "O_AUXIF_CLK1"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR3"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B12")
+ ),
+
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO111"),
+ MTK_FUNCTION(1, "I0_I2SIN_D1"),
+ MTK_FUNCTION(2, "I0_SPLIN_D1"),
+ MTK_FUNCTION(3, "O_DMIC3_CLK"),
+ MTK_FUNCTION(4, "O_SPDIF_OUT"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR4"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B13")
+ ),
+
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO112"),
+ MTK_FUNCTION(1, "I0_I2SIN_D2"),
+ MTK_FUNCTION(2, "I0_SPLIN_D2"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT"),
+ MTK_FUNCTION(4, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(5, "O_I2SO1_WS"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR5"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B14")
+ ),
+
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO113"),
+ MTK_FUNCTION(1, "I0_I2SIN_D3"),
+ MTK_FUNCTION(2, "I0_SPLIN_D3"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT_R"),
+ MTK_FUNCTION(4, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(5, "O_I2SO1_D0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B15")
+ ),
+
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO114"),
+ MTK_FUNCTION(1, "O_I2SO2_MCK"),
+ MTK_FUNCTION(2, "B0_I2SIN_MCK"),
+ MTK_FUNCTION(3, "I1_MCUPM_JTAG_TMS"),
+ MTK_FUNCTION(4, "B1_APU_JTAG_TMS"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG1_TMS"),
+ MTK_FUNCTION(6, "I1_SPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B16")
+ ),
+
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO115"),
+ MTK_FUNCTION(1, "B0_I2SO2_BCK"),
+ MTK_FUNCTION(2, "B0_I2SIN_BCK"),
+ MTK_FUNCTION(3, "I1_MCUPM_JTAG_TCK"),
+ MTK_FUNCTION(4, "I0_APU_JTAG_TCK"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG1_TCK"),
+ MTK_FUNCTION(6, "I1_SPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B17")
+ ),
+
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO116"),
+ MTK_FUNCTION(1, "B0_I2SO2_WS"),
+ MTK_FUNCTION(2, "B0_I2SIN_WS"),
+ MTK_FUNCTION(3, "I1_MCUPM_JTAG_TDI"),
+ MTK_FUNCTION(4, "I1_APU_JTAG_TDI"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG1_TDI"),
+ MTK_FUNCTION(6, "I1_SPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B18")
+ ),
+
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO117"),
+ MTK_FUNCTION(1, "O_I2SO2_D0"),
+ MTK_FUNCTION(2, "I0_I2SIN_D0"),
+ MTK_FUNCTION(3, "O_MCUPM_JTAG_TDO"),
+ MTK_FUNCTION(4, "O_APU_JTAG_TDO"),
+ MTK_FUNCTION(5, "O_SCP_JTAG1_TDO"),
+ MTK_FUNCTION(6, "O_SPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B19")
+ ),
+
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO118"),
+ MTK_FUNCTION(1, "O_I2SO2_D1"),
+ MTK_FUNCTION(2, "I0_I2SIN_D1"),
+ MTK_FUNCTION(3, "I0_MCUPM_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "I0_APU_JTAG_TRST"),
+ MTK_FUNCTION(5, "I0_SCP_JTAG1_TRSTN"),
+ MTK_FUNCTION(6, "I0_SPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B20")
+ ),
+
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO119"),
+ MTK_FUNCTION(1, "O_I2SO2_D2"),
+ MTK_FUNCTION(2, "I0_I2SIN_D2"),
+ MTK_FUNCTION(3, "O_UTXD3"),
+ MTK_FUNCTION(4, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(5, "O_I2SO1_MCK"),
+ MTK_FUNCTION(6, "O_SSPM_UTXD_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B21")
+ ),
+
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO120"),
+ MTK_FUNCTION(1, "O_I2SO2_D3"),
+ MTK_FUNCTION(2, "I0_I2SIN_D3"),
+ MTK_FUNCTION(3, "I1_URXD3"),
+ MTK_FUNCTION(4, "I0_TDMIN_DI"),
+ MTK_FUNCTION(5, "O_I2SO1_BCK"),
+ MTK_FUNCTION(6, "I1_SSPM_URXD_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B22")
+ ),
+
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO121"),
+ MTK_FUNCTION(1, "B0_PCM_CLK"),
+ MTK_FUNCTION(2, "O_SPIM4_CSB"),
+ MTK_FUNCTION(3, "O_SCP_SPI1_B_CS"),
+ MTK_FUNCTION(4, "O_TP_UTXD2_AO"),
+ MTK_FUNCTION(5, "O_AUXIF_ST0"),
+ MTK_FUNCTION(6, "O_PGD_DA_EFUSE_RDY"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B23")
+ ),
+
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO122"),
+ MTK_FUNCTION(1, "B0_PCM_SYNC"),
+ MTK_FUNCTION(2, "O_SPIM4_CLK"),
+ MTK_FUNCTION(3, "O_SCP_SPI1_B_CK"),
+ MTK_FUNCTION(4, "I1_TP_URXD2_AO"),
+ MTK_FUNCTION(5, "O_AUXIF_CLK0"),
+ MTK_FUNCTION(6, "O_PGD_DA_EFUSE_RDY_PRE"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B24")
+ ),
+
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, 123),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO123"),
+ MTK_FUNCTION(1, "O_PCM_DO"),
+ MTK_FUNCTION(2, "B0_SPIM4_MOSI"),
+ MTK_FUNCTION(3, "O_SCP_SPI1_B_MO"),
+ MTK_FUNCTION(4, "O_TP_URTS2_AO"),
+ MTK_FUNCTION(5, "O_AUXIF_ST1"),
+ MTK_FUNCTION(6, "O_PGD_DA_PWRGD_RESET"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B25")
+ ),
+
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, 124),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO124"),
+ MTK_FUNCTION(1, "I0_PCM_DI"),
+ MTK_FUNCTION(2, "B0_SPIM4_MISO"),
+ MTK_FUNCTION(3, "I0_SCP_SPI1_B_MI"),
+ MTK_FUNCTION(4, "I1_TP_UCTS2_AO"),
+ MTK_FUNCTION(5, "O_AUXIF_CLK1"),
+ MTK_FUNCTION(6, "O_PGD_DA_PWRGD_ENB"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B26")
+ ),
+
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO125"),
+ MTK_FUNCTION(1, "O_DMIC1_CLK"),
+ MTK_FUNCTION(2, "O_SPINOR_CK"),
+ MTK_FUNCTION(3, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(6, "O_LVTS_FOUT"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B27")
+ ),
+
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO126"),
+ MTK_FUNCTION(1, "I0_DMIC1_DAT"),
+ MTK_FUNCTION(2, "O_SPINOR_CS"),
+ MTK_FUNCTION(3, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(6, "O_LVTS_SDO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B28")
+ ),
+
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO127"),
+ MTK_FUNCTION(1, "I0_DMIC1_DAT_R"),
+ MTK_FUNCTION(2, "B0_SPINOR_IO0"),
+ MTK_FUNCTION(3, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(6, "I0_LVTS_26M"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B29")
+ ),
+
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO128"),
+ MTK_FUNCTION(1, "O_DMIC2_CLK"),
+ MTK_FUNCTION(2, "B0_SPINOR_IO1"),
+ MTK_FUNCTION(3, "I0_TDMIN_DI"),
+ MTK_FUNCTION(6, "I0_LVTS_SCF"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B30")
+ ),
+
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO129"),
+ MTK_FUNCTION(1, "I0_DMIC2_DAT"),
+ MTK_FUNCTION(2, "B0_SPINOR_IO2"),
+ MTK_FUNCTION(3, "I0_SPDIF_IN1"),
+ MTK_FUNCTION(6, "I0_LVTS_SCK"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B31")
+ ),
+
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO130"),
+ MTK_FUNCTION(1, "I0_DMIC2_DAT_R"),
+ MTK_FUNCTION(2, "B0_SPINOR_IO3"),
+ MTK_FUNCTION(3, "I0_SPDIF_IN2"),
+ MTK_FUNCTION(6, "I0_LVTS_SDI"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B32")
+ ),
+
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, 131),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO131"),
+ MTK_FUNCTION(1, "O_DPI_D0"),
+ MTK_FUNCTION(2, "O_GBE_TXD3"),
+ MTK_FUNCTION(3, "O_DMIC1_CLK"),
+ MTK_FUNCTION(4, "O_I2SO2_MCK"),
+ MTK_FUNCTION(5, "B0_TP_GPIO0_AO"),
+ MTK_FUNCTION(6, "O_SPIM5_CSB"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR0")
+ ),
+
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, 132),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO132"),
+ MTK_FUNCTION(1, "O_DPI_D1"),
+ MTK_FUNCTION(2, "O_GBE_TXD2"),
+ MTK_FUNCTION(3, "I0_DMIC1_DAT"),
+ MTK_FUNCTION(4, "B0_I2SO2_BCK"),
+ MTK_FUNCTION(5, "B0_TP_GPIO1_AO"),
+ MTK_FUNCTION(6, "O_SPIM5_CLK"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR1")
+ ),
+
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, 133),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO133"),
+ MTK_FUNCTION(1, "O_DPI_D2"),
+ MTK_FUNCTION(2, "O_GBE_TXD1"),
+ MTK_FUNCTION(3, "I0_DMIC1_DAT_R"),
+ MTK_FUNCTION(4, "B0_I2SO2_WS"),
+ MTK_FUNCTION(5, "B0_TP_GPIO2_AO"),
+ MTK_FUNCTION(6, "B0_SPIM5_MOSI"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR2")
+ ),
+
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, 134),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO134"),
+ MTK_FUNCTION(1, "O_DPI_D3"),
+ MTK_FUNCTION(2, "O_GBE_TXD0"),
+ MTK_FUNCTION(3, "O_DMIC2_CLK"),
+ MTK_FUNCTION(4, "O_I2SO2_D0"),
+ MTK_FUNCTION(5, "B0_TP_GPIO3_AO"),
+ MTK_FUNCTION(6, "B0_SPIM5_MISO"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR3")
+ ),
+
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, 135),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO135"),
+ MTK_FUNCTION(1, "O_DPI_D4"),
+ MTK_FUNCTION(2, "I0_GBE_RXD3"),
+ MTK_FUNCTION(3, "I0_DMIC2_DAT"),
+ MTK_FUNCTION(4, "O_I2SO2_D1"),
+ MTK_FUNCTION(5, "B0_TP_GPIO4_AO"),
+ MTK_FUNCTION(6, "I1_WAKEN"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR4")
+ ),
+
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, 136),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO136"),
+ MTK_FUNCTION(1, "O_DPI_D5"),
+ MTK_FUNCTION(2, "I0_GBE_RXD2"),
+ MTK_FUNCTION(3, "I0_DMIC2_DAT_R"),
+ MTK_FUNCTION(4, "O_I2SO2_D2"),
+ MTK_FUNCTION(5, "B0_TP_GPIO5_AO"),
+ MTK_FUNCTION(6, "O_PERSTN"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR5")
+ ),
+
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO137"),
+ MTK_FUNCTION(1, "O_DPI_D6"),
+ MTK_FUNCTION(2, "I0_GBE_RXD1"),
+ MTK_FUNCTION(3, "O_DMIC3_CLK"),
+ MTK_FUNCTION(4, "O_I2SO2_D3"),
+ MTK_FUNCTION(5, "B0_TP_GPIO6_AO"),
+ MTK_FUNCTION(6, "B1_CLKREQN"),
+ MTK_FUNCTION(7, "O_PWM_0")
+ ),
+
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO138"),
+ MTK_FUNCTION(1, "O_DPI_D7"),
+ MTK_FUNCTION(2, "I0_GBE_RXD0"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT"),
+ MTK_FUNCTION(4, "O_CLKM2"),
+ MTK_FUNCTION(5, "B0_TP_GPIO7_AO"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO0")
+ ),
+
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO139"),
+ MTK_FUNCTION(1, "O_DPI_D8"),
+ MTK_FUNCTION(2, "B0_GBE_TXC"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT_R"),
+ MTK_FUNCTION(4, "O_CLKM3"),
+ MTK_FUNCTION(5, "O_TP_UTXD2_AO"),
+ MTK_FUNCTION(6, "O_UTXD2"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO1")
+ ),
+
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO140"),
+ MTK_FUNCTION(1, "O_DPI_D9"),
+ MTK_FUNCTION(2, "I0_GBE_RXC"),
+ MTK_FUNCTION(3, "O_DMIC4_CLK"),
+ MTK_FUNCTION(4, "O_PWM_2"),
+ MTK_FUNCTION(5, "I1_TP_URXD2_AO"),
+ MTK_FUNCTION(6, "I1_URXD2"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO2")
+ ),
+
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO141"),
+ MTK_FUNCTION(1, "O_DPI_D10"),
+ MTK_FUNCTION(2, "I0_GBE_RXDV"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT"),
+ MTK_FUNCTION(4, "O_PWM_3"),
+ MTK_FUNCTION(5, "O_TP_URTS2_AO"),
+ MTK_FUNCTION(6, "O_URTS2"),
+ MTK_FUNCTION(7, "B0_MD32_1_GPIO0")
+ ),
+
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO142"),
+ MTK_FUNCTION(1, "O_DPI_D11"),
+ MTK_FUNCTION(2, "O_GBE_TXEN"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT_R"),
+ MTK_FUNCTION(4, "O_PWM_1"),
+ MTK_FUNCTION(5, "I1_TP_UCTS2_AO"),
+ MTK_FUNCTION(6, "I1_UCTS2"),
+ MTK_FUNCTION(7, "B0_MD32_1_GPIO1")
+ ),
+
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO143"),
+ MTK_FUNCTION(1, "O_DPI_D12"),
+ MTK_FUNCTION(2, "O_GBE_MDC"),
+ MTK_FUNCTION(3, "B0_MD32_0_GPIO0"),
+ MTK_FUNCTION(4, "O_CLKM0"),
+ MTK_FUNCTION(5, "O_SPIM3_CSB"),
+ MTK_FUNCTION(6, "O_UTXD1"),
+ MTK_FUNCTION(7, "B0_MD32_1_GPIO2")
+ ),
+
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO144"),
+ MTK_FUNCTION(1, "O_DPI_D13"),
+ MTK_FUNCTION(2, "B1_GBE_MDIO"),
+ MTK_FUNCTION(3, "B0_MD32_0_GPIO1"),
+ MTK_FUNCTION(4, "O_CLKM1"),
+ MTK_FUNCTION(5, "O_SPIM3_CLK"),
+ MTK_FUNCTION(6, "I1_URXD1"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR0")
+ ),
+
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO145"),
+ MTK_FUNCTION(1, "O_DPI_D14"),
+ MTK_FUNCTION(2, "O_GBE_TXER"),
+ MTK_FUNCTION(3, "B0_MD32_1_GPIO0"),
+ MTK_FUNCTION(4, "O_CMFLASH0"),
+ MTK_FUNCTION(5, "B0_SPIM3_MOSI"),
+ MTK_FUNCTION(6, "B0_GBE_AUX_PPS2"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR1")
+ ),
+
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO146"),
+ MTK_FUNCTION(1, "O_DPI_D15"),
+ MTK_FUNCTION(2, "I0_GBE_RXER"),
+ MTK_FUNCTION(3, "B0_MD32_1_GPIO1"),
+ MTK_FUNCTION(4, "O_CMFLASH1"),
+ MTK_FUNCTION(5, "B0_SPIM3_MISO"),
+ MTK_FUNCTION(6, "B0_GBE_AUX_PPS3"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR2")
+ ),
+
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO147"),
+ MTK_FUNCTION(1, "O_DPI_HSYNC"),
+ MTK_FUNCTION(2, "I0_GBE_COL"),
+ MTK_FUNCTION(3, "O_I2SO1_MCK"),
+ MTK_FUNCTION(4, "O_CMVREF0"),
+ MTK_FUNCTION(5, "O_SPDIF_OUT"),
+ MTK_FUNCTION(6, "O_URTS1"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR3")
+ ),
+
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO148"),
+ MTK_FUNCTION(1, "O_DPI_VSYNC"),
+ MTK_FUNCTION(2, "I0_GBE_INTR"),
+ MTK_FUNCTION(3, "O_I2SO1_BCK"),
+ MTK_FUNCTION(4, "O_CMVREF1"),
+ MTK_FUNCTION(5, "I0_SPDIF_IN0"),
+ MTK_FUNCTION(6, "I1_UCTS1"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR4")
+ ),
+
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO149"),
+ MTK_FUNCTION(1, "O_DPI_DE"),
+ MTK_FUNCTION(2, "B0_GBE_AUX_PPS0"),
+ MTK_FUNCTION(3, "O_I2SO1_WS"),
+ MTK_FUNCTION(4, "O_CMVREF2"),
+ MTK_FUNCTION(5, "I0_SPDIF_IN1"),
+ MTK_FUNCTION(6, "O_UTXD3"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR5")
+ ),
+
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO150"),
+ MTK_FUNCTION(1, "O_DPI_CK"),
+ MTK_FUNCTION(2, "B0_GBE_AUX_PPS1"),
+ MTK_FUNCTION(3, "O_I2SO1_D0"),
+ MTK_FUNCTION(4, "O_CMVREF3"),
+ MTK_FUNCTION(5, "I0_SPDIF_IN2"),
+ MTK_FUNCTION(6, "I1_URXD3")
+ ),
+
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO151"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT7")
+ ),
+
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO152"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT6")
+ ),
+
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO153"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT5")
+ ),
+
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO154"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT4")
+ ),
+
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO155"),
+ MTK_FUNCTION(1, "O_MSDC0_RSTB")
+ ),
+
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 156),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO156"),
+ MTK_FUNCTION(1, "B1_MSDC0_CMD")
+ ),
+
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 157),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO157"),
+ MTK_FUNCTION(1, "B1_MSDC0_CLK")
+ ),
+
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 158),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO158"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT3")
+ ),
+
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 159),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO159"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT2")
+ ),
+
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 160),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO160"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT1")
+ ),
+
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 161),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO161"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT0")
+ ),
+
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 162),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO162"),
+ MTK_FUNCTION(1, "B0_MSDC0_DSL")
+ ),
+
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 163),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO163"),
+ MTK_FUNCTION(1, "B1_MSDC1_CMD"),
+ MTK_FUNCTION(2, "O_SPDIF_OUT"),
+ MTK_FUNCTION(3, "I1_MD32_0_JTAG_TMS"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TMS"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TMS"),
+ MTK_FUNCTION(7, "I0_IPU_JTAG_TMS")
+ ),
+
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 164),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO164"),
+ MTK_FUNCTION(1, "B1_MSDC1_CLK"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN0"),
+ MTK_FUNCTION(3, "I1_MD32_0_JTAG_TCK"),
+ MTK_FUNCTION(4, "I0_ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TCK"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TCK"),
+ MTK_FUNCTION(7, "I0_IPU_JTAG_TCK")
+ ),
+
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, 165),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO165"),
+ MTK_FUNCTION(1, "B1_MSDC1_DAT0"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN1"),
+ MTK_FUNCTION(3, "I1_MD32_0_JTAG_TDI"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TDI"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TDI"),
+ MTK_FUNCTION(7, "I0_IPU_JTAG_TDI")
+ ),
+
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, 166),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO166"),
+ MTK_FUNCTION(1, "B1_MSDC1_DAT1"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN2"),
+ MTK_FUNCTION(3, "O_MD32_0_JTAG_TDO"),
+ MTK_FUNCTION(4, "O_ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(5, "O_SCP_JTAG0_TDO"),
+ MTK_FUNCTION(6, "O_CCU0_JTAG_TDO"),
+ MTK_FUNCTION(7, "O_IPU_JTAG_TDO")
+ ),
+
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, 167),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO167"),
+ MTK_FUNCTION(1, "B1_MSDC1_DAT2"),
+ MTK_FUNCTION(2, "O_PWM_0"),
+ MTK_FUNCTION(3, "I1_MD32_0_JTAG_TRST"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(5, "I0_SCP_JTAG0_TRSTN"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TRST"),
+ MTK_FUNCTION(7, "I0_IPU_JTAG_TRST")
+ ),
+
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(0, 168),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO168"),
+ MTK_FUNCTION(1, "B1_MSDC1_DAT3"),
+ MTK_FUNCTION(2, "O_PWM_1"),
+ MTK_FUNCTION(3, "O_CLKM0")
+ ),
+
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(0, 169),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO169"),
+ MTK_FUNCTION(1, "B1_MSDC2_CMD"),
+ MTK_FUNCTION(2, "O_LVTS_FOUT"),
+ MTK_FUNCTION(3, "I1_MD32_1_JTAG_TMS"),
+ MTK_FUNCTION(4, "I0_UDI_TMS"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TMS"),
+ MTK_FUNCTION(6, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(7, "I1_SSPM_JTAG_TMS")
+ ),
+
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, 170),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO170"),
+ MTK_FUNCTION(1, "B1_MSDC2_CLK"),
+ MTK_FUNCTION(2, "O_LVTS_SDO"),
+ MTK_FUNCTION(3, "I1_MD32_1_JTAG_TCK"),
+ MTK_FUNCTION(4, "I0_UDI_TCK"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TCK"),
+ MTK_FUNCTION(6, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(7, "I1_SSPM_JTAG_TCK")
+ ),
+
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, 171),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO171"),
+ MTK_FUNCTION(1, "B1_MSDC2_DAT0"),
+ MTK_FUNCTION(2, "I0_LVTS_26M"),
+ MTK_FUNCTION(3, "I1_MD32_1_JTAG_TDI"),
+ MTK_FUNCTION(4, "I0_UDI_TDI"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TDI"),
+ MTK_FUNCTION(6, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(7, "I1_SSPM_JTAG_TDI")
+ ),
+
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, 172),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO172"),
+ MTK_FUNCTION(1, "B1_MSDC2_DAT1"),
+ MTK_FUNCTION(2, "I0_LVTS_SCF"),
+ MTK_FUNCTION(3, "O_MD32_1_JTAG_TDO"),
+ MTK_FUNCTION(4, "O_UDI_TDO"),
+ MTK_FUNCTION(5, "O_VPU_UDI_TDO"),
+ MTK_FUNCTION(6, "I0_TDMIN_DI"),
+ MTK_FUNCTION(7, "O_SSPM_JTAG_TDO")
+ ),
+
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, 173),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO173"),
+ MTK_FUNCTION(1, "B1_MSDC2_DAT2"),
+ MTK_FUNCTION(2, "I0_LVTS_SCK"),
+ MTK_FUNCTION(3, "I1_MD32_1_JTAG_TRST"),
+ MTK_FUNCTION(4, "I0_UDI_NTRST"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_NTRST"),
+ MTK_FUNCTION(7, "I0_SSPM_JTAG_TRSTN")
+ ),
+
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, 174),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO174"),
+ MTK_FUNCTION(1, "B1_MSDC2_DAT3"),
+ MTK_FUNCTION(2, "I0_LVTS_SDI")
+ ),
+
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, 175),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO175"),
+ MTK_FUNCTION(1, "B0_SPMI_M_SCL")
+ ),
+
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(0, 176),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO176"),
+ MTK_FUNCTION(1, "B0_SPMI_M_SDA")
+ ),
+
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(0, 212),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, 213),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 214),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 180, "GPIO180",
+ MTK_EINT_FUNCTION(0, 215),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 181, "GPIO181",
+ MTK_EINT_FUNCTION(0, 216),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 182, "GPIO182",
+ MTK_EINT_FUNCTION(0, 217),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 183, "GPIO183",
+ MTK_EINT_FUNCTION(0, 218),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 184, "GPIO184",
+ MTK_EINT_FUNCTION(0, 219),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 185, "GPIO185",
+ MTK_EINT_FUNCTION(0, 220),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 186, "GPIO186",
+ MTK_EINT_FUNCTION(0, 221),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 187, "GPIO187",
+ MTK_EINT_FUNCTION(0, 222),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 188, "GPIO188",
+ MTK_EINT_FUNCTION(0, 223),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 189, "GPIO189",
+ MTK_EINT_FUNCTION(0, 224),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ )
+};
+
+#endif /* __PINCTRL__MTK_MT8188_H */
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index cc2cd73ff8f9..530f3f934e19 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -608,6 +608,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.label = pc->data->name;
pc->chip.parent = pc->dev;
+ pc->chip.fwnode = pc->fwnode;
pc->chip.request = gpiochip_generic_request;
pc->chip.free = gpiochip_generic_free;
pc->chip.set_config = gpiochip_generic_config;
@@ -619,8 +620,6 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.base = -1;
pc->chip.ngpio = pc->data->num_pins;
pc->chip.can_sleep = false;
- pc->chip.of_node = pc->of_node;
- pc->chip.of_gpio_n_cells = 2;
ret = gpiochip_add_data(&pc->chip, pc);
if (ret) {
@@ -678,8 +677,8 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc)
return -EINVAL;
}
- gpio_np = to_of_node(gpiochip_node_get_first(pc->dev));
- pc->of_node = gpio_np;
+ pc->fwnode = gpiochip_node_get_first(pc->dev);
+ gpio_np = to_of_node(pc->fwnode);
pc->reg_mux = meson_map_resource(pc, gpio_np, "mux");
if (IS_ERR_OR_NULL(pc->reg_mux)) {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index b197827027bd..34fc4e8612e4 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -12,6 +12,8 @@
#include <linux/types.h>
#include <linux/module.h>
+struct fwnode_handle;
+
struct meson_pinctrl;
/**
@@ -131,7 +133,7 @@ struct meson_pinctrl {
struct regmap *reg_gpio;
struct regmap *reg_ds;
struct gpio_chip chip;
- struct device_node *of_node;
+ struct fwnode_handle *fwnode;
};
#define FUNCTION(fn) \
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index bcde042d29dc..261b46841b9f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -112,14 +112,14 @@ struct armada_37xx_pinctrl {
struct armada_37xx_pm_state pm;
};
-#define PIN_GRP(_name, _start, _nr, _mask, _func1, _func2) \
+#define PIN_GRP_GPIO_0(_name, _start, _nr) \
{ \
.name = _name, \
.start_pin = _start, \
.npins = _nr, \
- .reg_mask = _mask, \
- .val = {0, _mask}, \
- .funcs = {_func1, _func2} \
+ .reg_mask = 0, \
+ .val = {0}, \
+ .funcs = {"gpio"} \
}
#define PIN_GRP_GPIO(_name, _start, _nr, _mask, _func1) \
@@ -179,6 +179,7 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
"pwm", "led"),
PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
+ PIN_GRP_GPIO_0("gpio1_5", 5, 1),
PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
PIN_GRP_GPIO("i2c1", 0, 2, BIT(10), "i2c"),
PIN_GRP_GPIO("spi_cs1", 17, 1, BIT(12), "spi"),
@@ -195,15 +196,18 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"),
PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
+ PIN_GRP_GPIO_0("gpio2_2", 2, 1),
PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
PIN_GRP_GPIO("smi", 18, 2, BIT(4), "smi"),
PIN_GRP_GPIO("pcie1", 3, 1, BIT(5), "pcie"), /* this actually controls "pcie1_reset" */
PIN_GRP_GPIO("pcie1_clkreq", 4, 1, BIT(9), "pcie"),
PIN_GRP_GPIO("pcie1_wakeup", 5, 1, BIT(10), "pcie"),
- PIN_GRP_GPIO("ptp", 20, 3, BIT(11) | BIT(12) | BIT(13), "ptp"),
- PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
- PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
+ PIN_GRP_GPIO("ptp", 20, 1, BIT(11), "ptp"),
+ PIN_GRP_GPIO_3("ptp_clk", 21, 1, BIT(6) | BIT(12), 0, BIT(6), BIT(12),
+ "ptp", "mii"),
+ PIN_GRP_GPIO_3("ptp_trig", 22, 1, BIT(7) | BIT(13), 0, BIT(7), BIT(13),
+ "ptp", "mii"),
PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
"mii", "mii_err"),
};
@@ -486,11 +490,15 @@ static int armada_37xx_gpio_request_enable(struct pinctrl_dev *pctldev,
struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct armada_37xx_pin_group *group;
int grp = 0;
+ int ret;
dev_dbg(info->dev, "requesting gpio %d\n", offset);
- while ((group = armada_37xx_find_next_grp_by_pin(info, offset, &grp)))
- armada_37xx_pmx_set_by_name(pctldev, "gpio", group);
+ while ((group = armada_37xx_find_next_grp_by_pin(info, offset, &grp))) {
+ ret = armada_37xx_pmx_set_by_name(pctldev, "gpio", group);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index ac3d4d91266d..758d21f0a850 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -674,163 +674,160 @@ static const unsigned hwobs_oc4_1_pins[] = { DB8500_PIN_D17, DB8500_PIN_D16,
DB8500_PIN_D21, DB8500_PIN_D20, DB8500_PIN_C20, DB8500_PIN_B21,
DB8500_PIN_C21, DB8500_PIN_A22, DB8500_PIN_B24, DB8500_PIN_C22 };
-#define DB8500_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
- .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
-
static const struct nmk_pingroup nmk_db8500_groups[] = {
/* Altfunction A column */
- DB8500_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(u1rxtx_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(u1ctsrts_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ipi2c_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ipi2c_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp0txrx_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp0tfstck_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp0rfsrck_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc0_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc0_dat47_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc0dat31dir_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp1txrx_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcdb_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcdvsi0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcdvsi1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcd_d0_d7_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcd_d8_d11_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcd_d12_d15_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcd_d12_d23_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(kp_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(kpskaskb_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc2_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc2_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ssp1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ssp0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ipgpio0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ipgpio1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(modem_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(kp_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp2sck_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp2_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc4_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc1_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc1dir_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(hsir_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(hsit_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(hsit_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(clkout1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(clkout1_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(clkout2_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(clkout2_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(usb_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u1rxtx_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u1ctsrts_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ipi2c_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ipi2c_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp0txrx_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp0tfstck_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp0rfsrck_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc0_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc0_dat47_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc0dat31dir_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp1txrx_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcdb_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcdvsi0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcdvsi1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcd_d0_d7_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcd_d8_d11_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcd_d12_d15_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcd_d12_d23_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(kp_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(kpskaskb_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc2_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc2_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ssp1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ssp0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ipgpio0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ipgpio1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(modem_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(kp_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp2sck_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp2_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc4_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc1_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc1dir_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(hsir_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(hsit_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(hsit_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(clkout1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(clkout1_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(clkout2_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(clkout2_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(usb_a_1, NMK_GPIO_ALT_A),
/* Altfunction B column */
- DB8500_PIN_GROUP(trig_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(i2c4_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(i2c1_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(i2c2_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(i2c2_b_2, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(msp0txrx_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(i2c1_b_2, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(u2rxtx_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(uartmodtx_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(msp0sck_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(uartmodrx_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(stmmod_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(uartmodrx_b_2, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(ipgpio7_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(ipgpio2_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(ipgpio3_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(lcdaclk_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(lcda_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(lcd_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(lcd_d16_d23_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(ddrtrig_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(pwl_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(spi1_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(mc3_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(pwl_b_2, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(pwl_b_3, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(pwl_b_4, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(trig_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2c4_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2c1_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2c2_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2c2_b_2, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(msp0txrx_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2c1_b_2, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(u2rxtx_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(uartmodtx_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(msp0sck_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(uartmodrx_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(stmmod_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(uartmodrx_b_2, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(ipgpio7_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(ipgpio2_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(ipgpio3_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(lcdaclk_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(lcda_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(lcd_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(lcd_d16_d23_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(ddrtrig_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(pwl_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(spi1_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(mc3_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(pwl_b_2, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(pwl_b_3, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(pwl_b_4, NMK_GPIO_ALT_B),
/* Altfunction C column */
- DB8500_PIN_GROUP(ipjtag_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio6_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio0_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio1_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio3_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio2_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(slim0_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ms_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(iptrigout_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(u2rxtx_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(u2ctsrts_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(u0_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio4_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio5_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio6_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio7_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(smcleale_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(stmape_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(u2rxtx_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio2_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio3_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio4_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio5_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(mc5_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(mc2rstn_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(kp_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(smps0_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(smps1_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(u2rxtx_c_3, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(stmape_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(uartmodrx_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(uartmodtx_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(stmmod_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(usbsim_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(mc4rstn_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(clkout1_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(clkout2_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(i2c3_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(spi0_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(usbsim_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(i2c3_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipjtag_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio6_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio0_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio1_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio3_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio2_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(slim0_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ms_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(iptrigout_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u2rxtx_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u2ctsrts_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u0_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio4_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio5_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio6_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio7_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(smcleale_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(stmape_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u2rxtx_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio2_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio3_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio4_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio5_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(mc5_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(mc2rstn_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(kp_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(smps0_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(smps1_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u2rxtx_c_3, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(stmape_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(uartmodrx_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(uartmodtx_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(stmmod_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(usbsim_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(mc4rstn_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(clkout1_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(clkout2_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(i2c3_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(spi0_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(usbsim_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(i2c3_c_2, NMK_GPIO_ALT_C),
/* Other alt C1 column */
- DB8500_PIN_GROUP(u2rx_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(stmape_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(remap0_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(remap1_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(ptma9_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(kp_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(rf_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(hxclk_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(uartmodrx_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(uartmodtx_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(stmmod_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(hxgpio_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(rf_oc1_2, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(spi2_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(spi2_oc1_2, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(u2rx_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(stmape_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(remap0_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(remap1_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(ptma9_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(kp_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(rf_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(hxclk_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(uartmodrx_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(uartmodtx_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(stmmod_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(hxgpio_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(rf_oc1_2, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(spi2_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(spi2_oc1_2, NMK_GPIO_ALT_C1),
/* Other alt C2 column */
- DB8500_PIN_GROUP(sbag_oc2_1, NMK_GPIO_ALT_C2),
- DB8500_PIN_GROUP(etmr4_oc2_1, NMK_GPIO_ALT_C2),
- DB8500_PIN_GROUP(ptma9_oc2_1, NMK_GPIO_ALT_C2),
+ NMK_PIN_GROUP(sbag_oc2_1, NMK_GPIO_ALT_C2),
+ NMK_PIN_GROUP(etmr4_oc2_1, NMK_GPIO_ALT_C2),
+ NMK_PIN_GROUP(ptma9_oc2_1, NMK_GPIO_ALT_C2),
/* Other alt C3 column */
- DB8500_PIN_GROUP(stmmod_oc3_1, NMK_GPIO_ALT_C3),
- DB8500_PIN_GROUP(stmmod_oc3_2, NMK_GPIO_ALT_C3),
- DB8500_PIN_GROUP(uartmodrx_oc3_1, NMK_GPIO_ALT_C3),
- DB8500_PIN_GROUP(uartmodtx_oc3_1, NMK_GPIO_ALT_C3),
- DB8500_PIN_GROUP(etmr4_oc3_1, NMK_GPIO_ALT_C3),
+ NMK_PIN_GROUP(stmmod_oc3_1, NMK_GPIO_ALT_C3),
+ NMK_PIN_GROUP(stmmod_oc3_2, NMK_GPIO_ALT_C3),
+ NMK_PIN_GROUP(uartmodrx_oc3_1, NMK_GPIO_ALT_C3),
+ NMK_PIN_GROUP(uartmodtx_oc3_1, NMK_GPIO_ALT_C3),
+ NMK_PIN_GROUP(etmr4_oc3_1, NMK_GPIO_ALT_C3),
/* Other alt C4 column */
- DB8500_PIN_GROUP(sbag_oc4_1, NMK_GPIO_ALT_C4),
- DB8500_PIN_GROUP(hwobs_oc4_1, NMK_GPIO_ALT_C4),
+ NMK_PIN_GROUP(sbag_oc4_1, NMK_GPIO_ALT_C4),
+ NMK_PIN_GROUP(hwobs_oc4_1, NMK_GPIO_ALT_C4),
};
/* We use this macro to define the groups applicable to a function */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index 8d944bb3a036..c0d7c86d0939 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -303,23 +303,20 @@ static const unsigned usbhs_c_1_pins[] = { STN8815_PIN_E21, STN8815_PIN_E20,
STN8815_PIN_C16, STN8815_PIN_A15,
STN8815_PIN_D17, STN8815_PIN_C17 };
-#define STN8815_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
- .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
-
static const struct nmk_pingroup nmk_stn8815_groups[] = {
- STN8815_PIN_GROUP(u0txrx_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(u0ctsrts_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(u0modem_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(mmcsd_b_1, NMK_GPIO_ALT_B),
- STN8815_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(i2c1_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(u1_b_1, NMK_GPIO_ALT_B),
- STN8815_PIN_GROUP(i2cusb_b_1, NMK_GPIO_ALT_B),
- STN8815_PIN_GROUP(clcd_16_23_b_1, NMK_GPIO_ALT_B),
- STN8815_PIN_GROUP(usbfs_b_1, NMK_GPIO_ALT_B),
- STN8815_PIN_GROUP(usbhs_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u0txrx_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u0ctsrts_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u0modem_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mmcsd_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(i2c1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u1_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2cusb_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(clcd_16_23_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(usbfs_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(usbhs_c_1, NMK_GPIO_ALT_C),
};
/* We use this macro to define the groups applicable to a function */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index f5014d09d81a..f7d02513d8cc 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -244,7 +244,6 @@ enum nmk_gpio_slpm {
struct nmk_gpio_chip {
struct gpio_chip chip;
- struct irq_chip irqchip;
void __iomem *addr;
struct clk *clk;
unsigned int bank;
@@ -608,8 +607,8 @@ static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev,
static void nmk_gpio_irq_ack(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
clk_enable(nmk_chip->clk);
writel(BIT(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
@@ -675,15 +674,11 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
__nmk_gpio_irq_modify(nmk_chip, offset, WAKE, on);
}
-static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
+static void nmk_gpio_irq_maskunmask(struct nmk_gpio_chip *nmk_chip,
+ struct irq_data *d, bool enable)
{
- struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- nmk_chip = irq_data_get_irq_chip_data(d);
- if (!nmk_chip)
- return -EINVAL;
-
clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
spin_lock(&nmk_chip->lock);
@@ -696,29 +691,32 @@ static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
spin_unlock(&nmk_chip->lock);
spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
clk_disable(nmk_chip->clk);
-
- return 0;
}
static void nmk_gpio_irq_mask(struct irq_data *d)
{
- nmk_gpio_irq_maskunmask(d, false);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ nmk_gpio_irq_maskunmask(nmk_chip, d, false);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void nmk_gpio_irq_unmask(struct irq_data *d)
{
- nmk_gpio_irq_maskunmask(d, true);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
+ nmk_gpio_irq_maskunmask(nmk_chip, d, true);
}
static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
- struct nmk_gpio_chip *nmk_chip;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
unsigned long flags;
- nmk_chip = irq_data_get_irq_chip_data(d);
- if (!nmk_chip)
- return -EINVAL;
-
clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
spin_lock(&nmk_chip->lock);
@@ -740,14 +738,12 @@ static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
bool enabled = !irqd_irq_disabled(d);
bool wake = irqd_is_wakeup_set(d);
- struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- nmk_chip = irq_data_get_irq_chip_data(d);
- if (!nmk_chip)
- return -EINVAL;
if (type & IRQ_TYPE_LEVEL_HIGH)
return -EINVAL;
if (type & IRQ_TYPE_LEVEL_LOW)
@@ -784,7 +780,8 @@ static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
{
- struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
clk_enable(nmk_chip->clk);
nmk_gpio_irq_unmask(d);
@@ -793,7 +790,8 @@ static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
static void nmk_gpio_irq_shutdown(struct irq_data *d)
{
- struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
nmk_gpio_irq_mask(d);
clk_disable(nmk_chip->clk);
@@ -1078,13 +1076,34 @@ static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
return nmk_chip;
}
+static void nmk_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ seq_printf(p, "nmk%u-%u-%u", nmk_chip->bank,
+ gc->base, gc->base + gc->ngpio - 1);
+}
+
+static const struct irq_chip nmk_irq_chip = {
+ .irq_ack = nmk_gpio_irq_ack,
+ .irq_mask = nmk_gpio_irq_mask,
+ .irq_unmask = nmk_gpio_irq_unmask,
+ .irq_set_type = nmk_gpio_irq_set_type,
+ .irq_set_wake = nmk_gpio_irq_set_wake,
+ .irq_startup = nmk_gpio_irq_startup,
+ .irq_shutdown = nmk_gpio_irq_shutdown,
+ .irq_print_chip = nmk_gpio_irq_print_chip,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int nmk_gpio_probe(struct platform_device *dev)
{
struct device_node *np = dev->dev.of_node;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
struct gpio_irq_chip *girq;
- struct irq_chip *irqchip;
bool supports_sleepmode;
int irq;
int ret;
@@ -1125,22 +1144,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
chip->can_sleep = false;
chip->owner = THIS_MODULE;
- irqchip = &nmk_chip->irqchip;
- irqchip->irq_ack = nmk_gpio_irq_ack;
- irqchip->irq_mask = nmk_gpio_irq_mask;
- irqchip->irq_unmask = nmk_gpio_irq_unmask;
- irqchip->irq_set_type = nmk_gpio_irq_set_type;
- irqchip->irq_set_wake = nmk_gpio_irq_set_wake;
- irqchip->irq_startup = nmk_gpio_irq_startup;
- irqchip->irq_shutdown = nmk_gpio_irq_shutdown;
- irqchip->flags = IRQCHIP_MASK_ON_SUSPEND;
- irqchip->name = kasprintf(GFP_KERNEL, "nmk%u-%u-%u",
- dev->id,
- chip->base,
- chip->base + chip->ngpio - 1);
-
girq = &chip->irq;
- girq->chip = irqchip;
+ gpio_irq_chip_set_chip(girq, &nmk_irq_chip);
girq->parent_handler = nmk_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&dev->dev, 1,
@@ -1179,17 +1184,17 @@ static const char *nmk_get_group_name(struct pinctrl_dev *pctldev,
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
- return npct->soc->groups[selector].name;
+ return npct->soc->groups[selector].grp.name;
}
static int nmk_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
- *pins = npct->soc->groups[selector].pins;
- *num_pins = npct->soc->groups[selector].npins;
+ *pins = npct->soc->groups[selector].grp.pins;
+ *npins = npct->soc->groups[selector].grp.npins;
return 0;
}
@@ -1531,7 +1536,7 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
if (g->altsetting < 0)
return -EINVAL;
- dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
+ dev_dbg(npct->dev, "enable group %s, %u pins\n", g->grp.name, g->grp.npins);
/*
* If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
@@ -1566,26 +1571,26 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
* Then mask the pins that need to be sleeping now when we're
* switching to the ALT C function.
*/
- for (i = 0; i < g->npins; i++)
- slpm[g->pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->pins[i]);
+ for (i = 0; i < g->grp.npins; i++)
+ slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->grp.pins[i]);
nmk_gpio_glitch_slpm_init(slpm);
}
- for (i = 0; i < g->npins; i++) {
+ for (i = 0; i < g->grp.npins; i++) {
struct nmk_gpio_chip *nmk_chip;
unsigned bit;
- nmk_chip = find_nmk_gpio_from_pin(g->pins[i]);
+ nmk_chip = find_nmk_gpio_from_pin(g->grp.pins[i]);
if (!nmk_chip) {
dev_err(npct->dev,
"invalid pin offset %d in group %s at index %d\n",
- g->pins[i], g->name, i);
+ g->grp.pins[i], g->grp.name, i);
goto out_glitch;
}
- dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->pins[i], g->altsetting);
+ dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->grp.pins[i], g->altsetting);
clk_enable(nmk_chip->clk);
- bit = g->pins[i] % NMK_GPIO_PER_CHIP;
+ bit = g->grp.pins[i] % NMK_GPIO_PER_CHIP;
/*
* If the pin is switching to altfunc, and there was an
* interrupt installed on it which has been lazy disabled,
@@ -1608,7 +1613,7 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
* then some bits in PRCM GPIOCR registers must be cleared.
*/
if ((g->altsetting & NMK_GPIO_ALT_C) == NMK_GPIO_ALT_C)
- nmk_prcm_altcx_set_mode(npct, g->pins[i],
+ nmk_prcm_altcx_set_mode(npct, g->grp.pins[i],
g->altsetting >> NMK_GPIO_ALT_CX_SHIFT);
}
@@ -1802,10 +1807,6 @@ static const struct of_device_id nmk_pinctrl_match[] = {
.compatible = "stericsson,db8500-pinctrl",
.data = (void *)PINCTRL_NMK_DB8500,
},
- {
- .compatible = "stericsson,db8540-pinctrl",
- .data = (void *)PINCTRL_NMK_DB8540,
- },
{},
};
@@ -1856,8 +1857,6 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
nmk_pinctrl_stn8815_init(&npct->soc);
if (version == PINCTRL_NMK_DB8500)
nmk_pinctrl_db8500_init(&npct->soc);
- if (version == PINCTRL_NMK_DB8540)
- nmk_pinctrl_db8540_init(&npct->soc);
/*
* Since we depend on the GPIO chips to provide clock and register base
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
index ae0bac06639f..84e297757335 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.h
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
@@ -5,7 +5,6 @@
/* Package definitions */
#define PINCTRL_NMK_STN8815 0
#define PINCTRL_NMK_DB8500 1
-#define PINCTRL_NMK_DB8540 2
/* Alternate functions: function C is set in hw by setting both A and B */
#define NMK_GPIO_ALT_GPIO 0
@@ -105,21 +104,21 @@ struct nmk_function {
/**
* struct nmk_pingroup - describes a Nomadik pin group
- * @name: the name of this specific pin group
- * @pins: an array of discrete physical pins used in this group, taken
- * from the driver-local pin enumeration space
- * @num_pins: the number of pins in this group array, i.e. the number of
- * elements in .pins so we can iterate over that array
+ * @grp: Generic data of the pin group (name and pins)
* @altsetting: the altsetting to apply to all pins in this group to
* configure them to be used by a function
*/
struct nmk_pingroup {
- const char *name;
- const unsigned int *pins;
- const unsigned npins;
+ struct pingroup grp;
int altsetting;
};
+#define NMK_PIN_GROUP(a, b) \
+ { \
+ .grp = PINCTRL_PINGROUP(#a, a##_pins, ARRAY_SIZE(a##_pins)), \
+ .altsetting = b, \
+ }
+
/**
* struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
* @pins: An array describing all pins the pin controller affects.
@@ -173,17 +172,4 @@ nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc)
#endif
-#ifdef CONFIG_PINCTRL_DB8540
-
-void nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc);
-
-#else
-
-static inline void
-nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc)
-{
-}
-
-#endif
-
#endif /* PINCTRL_PINCTRL_NOMADIK_H */
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 64d8a568b3db..1c4e89b046de 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -81,11 +81,11 @@ struct npcm7xx_gpio {
int irq;
struct irq_chip irq_chip;
u32 pinctrl_id;
- int (*direction_input)(struct gpio_chip *chip, unsigned offset);
- int (*direction_output)(struct gpio_chip *chip, unsigned offset,
+ int (*direction_input)(struct gpio_chip *chip, unsigned int offset);
+ int (*direction_output)(struct gpio_chip *chip, unsigned int offset,
int value);
- int (*request)(struct gpio_chip *chip, unsigned offset);
- void (*free)(struct gpio_chip *chip, unsigned offset);
+ int (*request)(struct gpio_chip *chip, unsigned int offset);
+ void (*free)(struct gpio_chip *chip, unsigned int offset);
};
struct npcm7xx_pinctrl {
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index 0dbeb91f0bf2..8193b92da403 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -1081,10 +1081,13 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
girq->num_parents = 0;
for (i = 0; i < WPCM450_NUM_GPIO_IRQS; i++) {
- int irq = fwnode_irq_get(child, i);
+ int irq;
+ irq = fwnode_irq_get(child, i);
if (irq < 0)
break;
+ if (!irq)
+ continue;
girq->parents[i] = irq;
girq->num_parents++;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 4691a33bc374..6be896871718 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -246,7 +246,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
}
seq_printf(s, "GPIO bank%d\n", bank);
for (; i < pin_num; i++) {
- seq_printf(s, "📌%d\t", i);
+ seq_printf(s, "#%d\t", i);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + i * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -278,32 +278,32 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
}
if (pin_reg & BIT(INTERRUPT_MASK_OFF))
- interrupt_mask = "-";
+ interrupt_mask = "😛";
else
- interrupt_mask = "+";
- seq_printf(s, "int %s (🎭 %s)| active-%s| %s-🔫| ",
+ interrupt_mask = "😷";
+ seq_printf(s, "int %s (%s)| active-%s| %s-⚡| ",
interrupt_enable,
interrupt_mask,
active_level,
level_trig);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S0I3))
- wake_cntrl0 = "+";
+ wake_cntrl0 = "⏰";
else
- wake_cntrl0 = "∅";
- seq_printf(s, "S0i3 🌅 %s| ", wake_cntrl0);
+ wake_cntrl0 = " ∅";
+ seq_printf(s, "S0i3 %s| ", wake_cntrl0);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S3))
- wake_cntrl1 = "+";
+ wake_cntrl1 = "⏰";
else
- wake_cntrl1 = "∅";
- seq_printf(s, "S3 🌅 %s| ", wake_cntrl1);
+ wake_cntrl1 = " ∅";
+ seq_printf(s, "S3 %s| ", wake_cntrl1);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S4))
- wake_cntrl2 = "+";
+ wake_cntrl2 = "⏰";
else
- wake_cntrl2 = "∅";
- seq_printf(s, "S4/S5 🌅 %s| ", wake_cntrl2);
+ wake_cntrl2 = " ∅";
+ seq_printf(s, "S4/S5 %s| ", wake_cntrl2);
if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
pull_up_enable = "+";
@@ -367,7 +367,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
debounce_enable = " ∅";
}
snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
- seq_printf(s, "debounce %s (⏰ %sus)| ", debounce_enable, debounce_value);
+ seq_printf(s, "debounce %s (🕑 %sus)| ", debounce_enable, debounce_value);
seq_printf(s, " 0x%x\n", pin_reg);
}
}
@@ -639,7 +639,7 @@ static bool do_amd_gpio_irq_handler(int irq, void *dev_id)
if (!(regval & PIN_IRQ_PENDING) ||
!(regval & BIT(INTERRUPT_MASK_OFF)))
continue;
- generic_handle_domain_irq(gc->irq.domain, irqnr + i);
+ generic_handle_domain_irq_safe(gc->irq.domain, irqnr + i);
/* Clear interrupt.
* We must read the pin register again, in case the
@@ -1051,13 +1051,13 @@ static void amd_get_iomux_res(struct amd_gpio *gpio_dev)
index = device_property_match_string(dev, "pinctrl-resource-names", "iomux");
if (index < 0) {
- dev_warn(dev, "failed to get iomux index\n");
+ dev_dbg(dev, "iomux not supported\n");
goto out_no_pinmux;
}
gpio_dev->iomux_base = devm_platform_ioremap_resource(gpio_dev->pdev, index);
if (IS_ERR(gpio_dev->iomux_base)) {
- dev_warn(dev, "Failed to get iomux %d io resource\n", index);
+ dev_dbg(dev, "iomux not supported %d io resource\n", index);
goto out_no_pinmux;
}
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 5634fa063ebf..81dbffab621f 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -22,8 +22,7 @@
#include <linux/pinctrl/pinmux.h>
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
-
-#include <soc/at91/pm.h>
+#include <linux/pm.h>
#include "pinctrl-at91.h"
#include "core.h"
@@ -33,16 +32,34 @@
struct at91_pinctrl_mux_ops;
+/**
+ * struct at91_gpio_chip: at91 gpio chip
+ * @chip: gpio chip
+ * @range: gpio range
+ * @next: bank sharing same clock
+ * @pioc_hwirq: PIO bank interrupt identifier on AIC
+ * @pioc_virq: PIO bank Linux virtual interrupt
+ * @pioc_idx: PIO bank index
+ * @regbase: PIO bank virtual address
+ * @clock: associated clock
+ * @ops: at91 pinctrl mux ops
+ * @wakeups: wakeup interrupts
+ * @backups: interrupts disabled in suspend
+ * @id: gpio chip identifier
+ */
struct at91_gpio_chip {
struct gpio_chip chip;
struct pinctrl_gpio_range range;
- struct at91_gpio_chip *next; /* Bank sharing same clock */
- int pioc_hwirq; /* PIO bank interrupt identifier on AIC */
- int pioc_virq; /* PIO bank Linux virtual interrupt */
- int pioc_idx; /* PIO bank index */
- void __iomem *regbase; /* PIO bank virtual address */
- struct clk *clock; /* associated clock */
- const struct at91_pinctrl_mux_ops *ops; /* ops */
+ struct at91_gpio_chip *next;
+ int pioc_hwirq;
+ int pioc_virq;
+ int pioc_idx;
+ void __iomem *regbase;
+ struct clk *clock;
+ const struct at91_pinctrl_mux_ops *ops;
+ u32 wakeups;
+ u32 backups;
+ u32 id;
};
static struct at91_gpio_chip *gpio_chips[MAX_GPIO_BANKS];
@@ -1615,70 +1632,51 @@ static void gpio_irq_ack(struct irq_data *d)
/* the interrupt is already cleared before by reading ISR */
}
-static u32 wakeups[MAX_GPIO_BANKS];
-static u32 backups[MAX_GPIO_BANKS];
-
static int gpio_irq_set_wake(struct irq_data *d, unsigned state)
{
struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
- unsigned bank = at91_gpio->pioc_idx;
unsigned mask = 1 << d->hwirq;
- if (unlikely(bank >= MAX_GPIO_BANKS))
- return -EINVAL;
-
if (state)
- wakeups[bank] |= mask;
+ at91_gpio->wakeups |= mask;
else
- wakeups[bank] &= ~mask;
+ at91_gpio->wakeups &= ~mask;
irq_set_irq_wake(at91_gpio->pioc_virq, state);
return 0;
}
-void at91_pinctrl_gpio_suspend(void)
+static int at91_gpio_suspend(struct device *dev)
{
- int i;
-
- for (i = 0; i < gpio_banks; i++) {
- void __iomem *pio;
+ struct at91_gpio_chip *at91_chip = dev_get_drvdata(dev);
+ void __iomem *pio = at91_chip->regbase;
- if (!gpio_chips[i])
- continue;
-
- pio = gpio_chips[i]->regbase;
+ at91_chip->backups = readl_relaxed(pio + PIO_IMR);
+ writel_relaxed(at91_chip->backups, pio + PIO_IDR);
+ writel_relaxed(at91_chip->wakeups, pio + PIO_IER);
- backups[i] = readl_relaxed(pio + PIO_IMR);
- writel_relaxed(backups[i], pio + PIO_IDR);
- writel_relaxed(wakeups[i], pio + PIO_IER);
+ if (!at91_chip->wakeups)
+ clk_disable_unprepare(at91_chip->clock);
+ else
+ dev_dbg(dev, "GPIO-%c may wake for %08x\n",
+ 'A' + at91_chip->id, at91_chip->wakeups);
- if (!wakeups[i])
- clk_disable_unprepare(gpio_chips[i]->clock);
- else
- printk(KERN_DEBUG "GPIO-%c may wake for %08x\n",
- 'A'+i, wakeups[i]);
- }
+ return 0;
}
-void at91_pinctrl_gpio_resume(void)
+static int at91_gpio_resume(struct device *dev)
{
- int i;
-
- for (i = 0; i < gpio_banks; i++) {
- void __iomem *pio;
+ struct at91_gpio_chip *at91_chip = dev_get_drvdata(dev);
+ void __iomem *pio = at91_chip->regbase;
- if (!gpio_chips[i])
- continue;
-
- pio = gpio_chips[i]->regbase;
+ if (!at91_chip->wakeups)
+ clk_prepare_enable(at91_chip->clock);
- if (!wakeups[i])
- clk_prepare_enable(gpio_chips[i]->clock);
+ writel_relaxed(at91_chip->wakeups, pio + PIO_IDR);
+ writel_relaxed(at91_chip->backups, pio + PIO_IER);
- writel_relaxed(wakeups[i], pio + PIO_IDR);
- writel_relaxed(backups[i], pio + PIO_IER);
- }
+ return 0;
}
static void gpio_irq_handler(struct irq_desc *desc)
@@ -1860,6 +1858,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
}
at91_chip->chip = at91_gpio_template;
+ at91_chip->id = alias_idx;
chip = &at91_chip->chip;
chip->label = dev_name(&pdev->dev);
@@ -1905,6 +1904,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
goto gpiochip_add_err;
gpio_chips[alias_idx] = at91_chip;
+ platform_set_drvdata(pdev, at91_chip);
gpio_banks = max(gpio_banks, alias_idx + 1);
dev_info(&pdev->dev, "at address %p\n", at91_chip->regbase);
@@ -1920,10 +1920,15 @@ err:
return ret;
}
+static const struct dev_pm_ops at91_gpio_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(at91_gpio_suspend, at91_gpio_resume)
+};
+
static struct platform_driver at91_gpio_driver = {
.driver = {
.name = "gpio-at91",
.of_match_table = at91_gpio_of_match,
+ .pm = pm_ptr(&at91_gpio_pm_ops),
},
.probe = at91_gpio_probe,
};
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
new file mode 100644
index 000000000000..68509a2301b8
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -0,0 +1,1419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CY8C95X0 20/40/60 pin I2C GPIO port expander with interrupt support
+ *
+ * Copyright (C) 2022 9elements GmbH
+ * Authors: Patrick Rudolph <patrick.rudolph@9elements.com>
+ * Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/dmi.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+
+/* Fast access registers */
+#define CY8C95X0_INPUT 0x00
+#define CY8C95X0_OUTPUT 0x08
+#define CY8C95X0_INTSTATUS 0x10
+
+#define CY8C95X0_INPUT_(x) (CY8C95X0_INPUT + (x))
+#define CY8C95X0_OUTPUT_(x) (CY8C95X0_OUTPUT + (x))
+#define CY8C95X0_INTSTATUS_(x) (CY8C95X0_INTSTATUS + (x))
+
+/* Port Select configures the port */
+#define CY8C95X0_PORTSEL 0x18
+/* Port settings, write PORTSEL first */
+#define CY8C95X0_INTMASK 0x19
+#define CY8C95X0_PWMSEL 0x1A
+#define CY8C95X0_INVERT 0x1B
+#define CY8C95X0_DIRECTION 0x1C
+/* Drive mode register change state on writing '1' */
+#define CY8C95X0_DRV_PU 0x1D
+#define CY8C95X0_DRV_PD 0x1E
+#define CY8C95X0_DRV_ODH 0x1F
+#define CY8C95X0_DRV_ODL 0x20
+#define CY8C95X0_DRV_PP_FAST 0x21
+#define CY8C95X0_DRV_PP_SLOW 0x22
+#define CY8C95X0_DRV_HIZ 0x23
+#define CY8C95X0_DEVID 0x2E
+#define CY8C95X0_WATCHDOG 0x2F
+#define CY8C95X0_COMMAND 0x30
+
+#define CY8C95X0_PIN_TO_OFFSET(x) (((x) >= 20) ? ((x) + 4) : (x))
+
+static const struct i2c_device_id cy8c95x0_id[] = {
+ { "cy8c9520", 20, },
+ { "cy8c9540", 40, },
+ { "cy8c9560", 60, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cy8c95x0_id);
+
+#define OF_CY8C95X(__nrgpio) ((void *)(__nrgpio))
+
+static const struct of_device_id cy8c95x0_dt_ids[] = {
+ { .compatible = "cypress,cy8c9520", .data = OF_CY8C95X(20), },
+ { .compatible = "cypress,cy8c9540", .data = OF_CY8C95X(40), },
+ { .compatible = "cypress,cy8c9560", .data = OF_CY8C95X(60), },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cy8c95x0_dt_ids);
+
+static const struct acpi_gpio_params cy8c95x0_irq_gpios = { 0, 0, true };
+
+static const struct acpi_gpio_mapping cy8c95x0_acpi_irq_gpios[] = {
+ { "irq-gpios", &cy8c95x0_irq_gpios, 1, ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER },
+ { }
+};
+
+static int cy8c95x0_acpi_get_irq(struct device *dev)
+{
+ int ret;
+
+ ret = devm_acpi_dev_add_driver_gpios(dev, cy8c95x0_acpi_irq_gpios);
+ if (ret)
+ dev_warn(dev, "can't add GPIO ACPI mapping\n");
+
+ ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "ACPI interrupt quirk (IRQ %d)\n", ret);
+ return ret;
+}
+
+static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
+ {
+ /*
+ * On Intel Galileo Gen 1 board the IRQ pin is provided
+ * as an absolute number instead of being relative.
+ * Since first controller (gpio-sch.c) and second
+ * (gpio-dwapb.c) are at the fixed bases, we may safely
+ * refer to the number in the global space to get an IRQ
+ * out of it.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
+ },
+ },
+ {}
+};
+
+#define MAX_BANK 8
+#define BANK_SZ 8
+#define MAX_LINE (MAX_BANK * BANK_SZ)
+
+#define CY8C95X0_GPIO_MASK GENMASK(7, 0)
+
+/**
+ * struct cy8c95x0_pinctrl - driver data
+ * @regmap: Device's regmap
+ * @irq_lock: IRQ bus lock
+ * @i2c_lock: Mutex for the device internal mux register
+ * @irq_mask: I/O bits affected by interrupts
+ * @irq_trig_raise: I/O bits affected by raising voltage level
+ * @irq_trig_fall: I/O bits affected by falling voltage level
+ * @irq_trig_low: I/O bits affected by a low voltage level
+ * @irq_trig_high: I/O bits affected by a high voltage level
+ * @push_pull: I/O bits configured as push pull driver
+ * @shiftmask: Mask used to compensate for Gport2 width
+ * @nport: Number of Gports in this chip
+ * @gpio_chip: gpiolib chip
+ * @driver_data: private driver data
+ * @regulator: Pointer to the regulator for the IC
+ * @dev: struct device
+ * @pctldev: pin controller device
+ * @pinctrl_desc: pin controller description
+ * @name: Chip controller name
+ * @tpin: Total number of pins
+ */
+struct cy8c95x0_pinctrl {
+ struct regmap *regmap;
+ struct mutex irq_lock;
+ struct mutex i2c_lock;
+ DECLARE_BITMAP(irq_mask, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_raise, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_fall, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_low, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_high, MAX_LINE);
+ DECLARE_BITMAP(push_pull, MAX_LINE);
+ DECLARE_BITMAP(shiftmask, MAX_LINE);
+ int nport;
+ struct gpio_chip gpio_chip;
+ unsigned long driver_data;
+ struct regulator *regulator;
+ struct device *dev;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc pinctrl_desc;
+ char name[32];
+ unsigned int tpin;
+};
+
+static const struct pinctrl_pin_desc cy8c9560_pins[] = {
+ PINCTRL_PIN(0, "gp00"),
+ PINCTRL_PIN(1, "gp01"),
+ PINCTRL_PIN(2, "gp02"),
+ PINCTRL_PIN(3, "gp03"),
+ PINCTRL_PIN(4, "gp04"),
+ PINCTRL_PIN(5, "gp05"),
+ PINCTRL_PIN(6, "gp06"),
+ PINCTRL_PIN(7, "gp07"),
+
+ PINCTRL_PIN(8, "gp10"),
+ PINCTRL_PIN(9, "gp11"),
+ PINCTRL_PIN(10, "gp12"),
+ PINCTRL_PIN(11, "gp13"),
+ PINCTRL_PIN(12, "gp14"),
+ PINCTRL_PIN(13, "gp15"),
+ PINCTRL_PIN(14, "gp16"),
+ PINCTRL_PIN(15, "gp17"),
+
+ PINCTRL_PIN(16, "gp20"),
+ PINCTRL_PIN(17, "gp21"),
+ PINCTRL_PIN(18, "gp22"),
+ PINCTRL_PIN(19, "gp23"),
+
+ PINCTRL_PIN(20, "gp30"),
+ PINCTRL_PIN(21, "gp31"),
+ PINCTRL_PIN(22, "gp32"),
+ PINCTRL_PIN(23, "gp33"),
+ PINCTRL_PIN(24, "gp34"),
+ PINCTRL_PIN(25, "gp35"),
+ PINCTRL_PIN(26, "gp36"),
+ PINCTRL_PIN(27, "gp37"),
+
+ PINCTRL_PIN(28, "gp40"),
+ PINCTRL_PIN(29, "gp41"),
+ PINCTRL_PIN(30, "gp42"),
+ PINCTRL_PIN(31, "gp43"),
+ PINCTRL_PIN(32, "gp44"),
+ PINCTRL_PIN(33, "gp45"),
+ PINCTRL_PIN(34, "gp46"),
+ PINCTRL_PIN(35, "gp47"),
+
+ PINCTRL_PIN(36, "gp50"),
+ PINCTRL_PIN(37, "gp51"),
+ PINCTRL_PIN(38, "gp52"),
+ PINCTRL_PIN(39, "gp53"),
+ PINCTRL_PIN(40, "gp54"),
+ PINCTRL_PIN(41, "gp55"),
+ PINCTRL_PIN(42, "gp56"),
+ PINCTRL_PIN(43, "gp57"),
+
+ PINCTRL_PIN(44, "gp60"),
+ PINCTRL_PIN(45, "gp61"),
+ PINCTRL_PIN(46, "gp62"),
+ PINCTRL_PIN(47, "gp63"),
+ PINCTRL_PIN(48, "gp64"),
+ PINCTRL_PIN(49, "gp65"),
+ PINCTRL_PIN(50, "gp66"),
+ PINCTRL_PIN(51, "gp67"),
+
+ PINCTRL_PIN(52, "gp70"),
+ PINCTRL_PIN(53, "gp71"),
+ PINCTRL_PIN(54, "gp72"),
+ PINCTRL_PIN(55, "gp73"),
+ PINCTRL_PIN(56, "gp74"),
+ PINCTRL_PIN(57, "gp75"),
+ PINCTRL_PIN(58, "gp76"),
+ PINCTRL_PIN(59, "gp77"),
+};
+
+static const char * const cy8c95x0_groups[] = {
+ "gp00",
+ "gp01",
+ "gp02",
+ "gp03",
+ "gp04",
+ "gp05",
+ "gp06",
+ "gp07",
+
+ "gp10",
+ "gp11",
+ "gp12",
+ "gp13",
+ "gp14",
+ "gp15",
+ "gp16",
+ "gp17",
+
+ "gp20",
+ "gp21",
+ "gp22",
+ "gp23",
+
+ "gp30",
+ "gp31",
+ "gp32",
+ "gp33",
+ "gp34",
+ "gp35",
+ "gp36",
+ "gp37",
+
+ "gp40",
+ "gp41",
+ "gp42",
+ "gp43",
+ "gp44",
+ "gp45",
+ "gp46",
+ "gp47",
+
+ "gp50",
+ "gp51",
+ "gp52",
+ "gp53",
+ "gp54",
+ "gp55",
+ "gp56",
+ "gp57",
+
+ "gp60",
+ "gp61",
+ "gp62",
+ "gp63",
+ "gp64",
+ "gp65",
+ "gp66",
+ "gp67",
+
+ "gp70",
+ "gp71",
+ "gp72",
+ "gp73",
+ "gp74",
+ "gp75",
+ "gp76",
+ "gp77",
+};
+
+static inline u8 cypress_get_port(struct cy8c95x0_pinctrl *chip, unsigned int pin)
+{
+ /* Account for GPORT2 which only has 4 bits */
+ return CY8C95X0_PIN_TO_OFFSET(pin) / BANK_SZ;
+}
+
+static int cypress_get_pin_mask(struct cy8c95x0_pinctrl *chip, unsigned int pin)
+{
+ /* Account for GPORT2 which only has 4 bits */
+ return BIT(CY8C95X0_PIN_TO_OFFSET(pin) % BANK_SZ);
+}
+
+static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x24 ... 0x27:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
+ return false;
+ case CY8C95X0_DEVID:
+ return false;
+ case 0x24 ... 0x27:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool cy8c95x0_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
+ case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
+ case CY8C95X0_INTMASK:
+ case CY8C95X0_INVERT:
+ case CY8C95X0_PWMSEL:
+ case CY8C95X0_DIRECTION:
+ case CY8C95X0_DRV_PU:
+ case CY8C95X0_DRV_PD:
+ case CY8C95X0_DRV_ODH:
+ case CY8C95X0_DRV_ODL:
+ case CY8C95X0_DRV_PP_FAST:
+ case CY8C95X0_DRV_PP_SLOW:
+ case CY8C95X0_DRV_HIZ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cy8c95x0_precious_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default cy8c95x0_reg_defaults[] = {
+ { CY8C95X0_OUTPUT_(0), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(1), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(2), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(3), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(4), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(5), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(6), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(7), GENMASK(7, 0) },
+ { CY8C95X0_PORTSEL, 0 },
+ { CY8C95X0_PWMSEL, 0 },
+};
+
+static const struct regmap_config cy8c95x0_i2c_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .reg_defaults = cy8c95x0_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(cy8c95x0_reg_defaults),
+
+ .readable_reg = cy8c95x0_readable_register,
+ .writeable_reg = cy8c95x0_writeable_register,
+ .volatile_reg = cy8c95x0_volatile_register,
+ .precious_reg = cy8c95x0_precious_register,
+
+ .cache_type = REGCACHE_FLAT,
+ .max_register = CY8C95X0_COMMAND,
+};
+
+static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
+ unsigned long *val, unsigned long *mask)
+{
+ DECLARE_BITMAP(tmask, MAX_LINE);
+ DECLARE_BITMAP(tval, MAX_LINE);
+ int write_val;
+ int ret = 0;
+ int i, off = 0;
+ u8 bits;
+
+ /* Add the 4 bit gap of Gport2 */
+ bitmap_andnot(tmask, mask, chip->shiftmask, MAX_LINE);
+ bitmap_shift_left(tmask, tmask, 4, MAX_LINE);
+ bitmap_replace(tmask, tmask, mask, chip->shiftmask, BANK_SZ * 3);
+
+ bitmap_andnot(tval, val, chip->shiftmask, MAX_LINE);
+ bitmap_shift_left(tval, tval, 4, MAX_LINE);
+ bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3);
+
+ mutex_lock(&chip->i2c_lock);
+ for (i = 0; i < chip->nport; i++) {
+ /* Skip over unused banks */
+ bits = bitmap_get_value8(tmask, i * BANK_SZ);
+ if (!bits)
+ continue;
+
+ switch (reg) {
+ /* Muxed registers */
+ case CY8C95X0_INTMASK:
+ case CY8C95X0_PWMSEL:
+ case CY8C95X0_INVERT:
+ case CY8C95X0_DIRECTION:
+ case CY8C95X0_DRV_PU:
+ case CY8C95X0_DRV_PD:
+ case CY8C95X0_DRV_ODH:
+ case CY8C95X0_DRV_ODL:
+ case CY8C95X0_DRV_PP_FAST:
+ case CY8C95X0_DRV_PP_SLOW:
+ case CY8C95X0_DRV_HIZ:
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, i);
+ if (ret < 0)
+ goto out;
+ off = reg;
+ break;
+ /* Direct access registers */
+ case CY8C95X0_INPUT:
+ case CY8C95X0_OUTPUT:
+ case CY8C95X0_INTSTATUS:
+ off = reg + i;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ write_val = bitmap_get_value8(tval, i * BANK_SZ);
+
+ ret = regmap_update_bits(chip->regmap, off, bits, write_val);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ if (ret < 0)
+ dev_err(chip->dev, "failed writing register %d: err %d\n", off, ret);
+
+ return ret;
+}
+
+static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
+ unsigned long *val, unsigned long *mask)
+{
+ DECLARE_BITMAP(tmask, MAX_LINE);
+ DECLARE_BITMAP(tval, MAX_LINE);
+ DECLARE_BITMAP(tmp, MAX_LINE);
+ int read_val;
+ int ret = 0;
+ int i, off = 0;
+ u8 bits;
+
+ /* Add the 4 bit gap of Gport2 */
+ bitmap_andnot(tmask, mask, chip->shiftmask, MAX_LINE);
+ bitmap_shift_left(tmask, tmask, 4, MAX_LINE);
+ bitmap_replace(tmask, tmask, mask, chip->shiftmask, BANK_SZ * 3);
+
+ bitmap_andnot(tval, val, chip->shiftmask, MAX_LINE);
+ bitmap_shift_left(tval, tval, 4, MAX_LINE);
+ bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3);
+
+ mutex_lock(&chip->i2c_lock);
+ for (i = 0; i < chip->nport; i++) {
+ /* Skip over unused banks */
+ bits = bitmap_get_value8(tmask, i * BANK_SZ);
+ if (!bits)
+ continue;
+
+ switch (reg) {
+ /* Muxed registers */
+ case CY8C95X0_INTMASK:
+ case CY8C95X0_PWMSEL:
+ case CY8C95X0_INVERT:
+ case CY8C95X0_DIRECTION:
+ case CY8C95X0_DRV_PU:
+ case CY8C95X0_DRV_PD:
+ case CY8C95X0_DRV_ODH:
+ case CY8C95X0_DRV_ODL:
+ case CY8C95X0_DRV_PP_FAST:
+ case CY8C95X0_DRV_PP_SLOW:
+ case CY8C95X0_DRV_HIZ:
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, i);
+ if (ret < 0)
+ goto out;
+ off = reg;
+ break;
+ /* Direct access registers */
+ case CY8C95X0_INPUT:
+ case CY8C95X0_OUTPUT:
+ case CY8C95X0_INTSTATUS:
+ off = reg + i;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = regmap_read(chip->regmap, off, &read_val);
+ if (ret < 0)
+ goto out;
+
+ read_val &= bits;
+ read_val |= bitmap_get_value8(tval, i * BANK_SZ) & ~bits;
+ bitmap_set_value8(tval, read_val, i * BANK_SZ);
+ }
+
+ /* Fill the 4 bit gap of Gport2 */
+ bitmap_shift_right(tmp, tval, 4, MAX_LINE);
+ bitmap_replace(val, tmp, tval, chip->shiftmask, MAX_LINE);
+
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ if (ret < 0)
+ dev_err(chip->dev, "failed reading register %d: err %d\n", off, ret);
+
+ return ret;
+}
+
+static int cy8c95x0_gpio_direction_input(struct gpio_chip *gc, unsigned int off)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret)
+ goto out;
+
+ ret = regmap_write_bits(chip->regmap, CY8C95X0_DIRECTION, bit, bit);
+ if (ret)
+ goto out;
+
+ if (test_bit(off, chip->push_pull)) {
+ /*
+ * Disable driving the pin by forcing it to HighZ. Only setting the
+ * direction register isn't sufficient in Push-Pull mode.
+ */
+ ret = regmap_write_bits(chip->regmap, CY8C95X0_DRV_HIZ, bit, bit);
+ if (ret)
+ goto out;
+
+ __clear_bit(off, chip->push_pull);
+ }
+
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ return ret;
+}
+
+static int cy8c95x0_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int off, int val)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ u8 port = cypress_get_port(chip, off);
+ u8 outreg = CY8C95X0_OUTPUT_(port);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ int ret;
+
+ /* Set output level */
+ ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ if (ret)
+ return ret;
+
+ mutex_lock(&chip->i2c_lock);
+ /* Select port... */
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret)
+ goto out;
+
+ /* ...then direction */
+ ret = regmap_write_bits(chip->regmap, CY8C95X0_DIRECTION, bit, 0);
+
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ return ret;
+}
+
+static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ u8 inreg = CY8C95X0_INPUT_(cypress_get_port(chip, off));
+ u8 bit = cypress_get_pin_mask(chip, off);
+ u32 reg_val;
+ int ret;
+
+ ret = regmap_read(chip->regmap, inreg, &reg_val);
+ if (ret < 0) {
+ /*
+ * NOTE:
+ * Diagnostic already emitted; that's all we should
+ * do unless gpio_*_value_cansleep() calls become different
+ * from their nonsleeping siblings (and report faults).
+ */
+ return 0;
+ }
+
+ return !!(reg_val & bit);
+}
+
+static void cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ u8 outreg = CY8C95X0_OUTPUT_(cypress_get_port(chip, off));
+ u8 bit = cypress_get_pin_mask(chip, off);
+
+ regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+}
+
+static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ u32 reg_val;
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(chip->regmap, CY8C95X0_DIRECTION, &reg_val);
+ if (ret < 0)
+ goto out;
+
+ mutex_unlock(&chip->i2c_lock);
+
+ if (reg_val & bit)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+out:
+ mutex_unlock(&chip->i2c_lock);
+ return ret;
+}
+
+static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
+ unsigned int off,
+ unsigned long *config)
+{
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ unsigned int reg;
+ u32 reg_val;
+ u16 arg = 0;
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+
+ /* Select port */
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret < 0)
+ goto out;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ reg = CY8C95X0_DRV_PU;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ reg = CY8C95X0_DRV_PD;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ reg = CY8C95X0_DRV_HIZ;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ reg = CY8C95X0_DRV_ODL;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
+ reg = CY8C95X0_DRV_ODH;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ reg = CY8C95X0_DRV_PP_FAST;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ reg = CY8C95X0_DIRECTION;
+ break;
+ case PIN_CONFIG_MODE_PWM:
+ reg = CY8C95X0_PWMSEL;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ reg = CY8C95X0_OUTPUT_(port);
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ reg = CY8C95X0_DIRECTION;
+ break;
+
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ case PIN_CONFIG_INPUT_SCHMITT:
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ case PIN_CONFIG_MODE_LOW_POWER:
+ case PIN_CONFIG_PERSIST_STATE:
+ case PIN_CONFIG_POWER_SOURCE:
+ case PIN_CONFIG_SKEW_DELAY:
+ case PIN_CONFIG_SLEEP_HARDWARE_STATE:
+ case PIN_CONFIG_SLEW_RATE:
+ default:
+ ret = -ENOTSUPP;
+ goto out;
+ }
+ /*
+ * Writing 1 to one of the drive mode registers will automatically
+ * clear conflicting set bits in the other drive mode registers.
+ */
+ ret = regmap_read(chip->regmap, reg, &reg_val);
+ if (reg_val & bit)
+ arg = 1;
+
+ *config = pinconf_to_config_packed(param, (u16)arg);
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ return ret;
+}
+
+static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
+ unsigned int off,
+ unsigned long config)
+{
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ unsigned long param = pinconf_to_config_param(config);
+ unsigned int reg;
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+
+ /* Select port */
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret < 0)
+ goto out;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ __clear_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_PU;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ __clear_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_PD;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ __clear_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_HIZ;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ __clear_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_ODL;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
+ __clear_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_ODH;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ __set_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_PP_FAST;
+ break;
+ case PIN_CONFIG_MODE_PWM:
+ reg = CY8C95X0_PWMSEL;
+ break;
+ default:
+ ret = -ENOTSUPP;
+ goto out;
+ }
+ /*
+ * Writing 1 to one of the drive mode registers will automatically
+ * clear conflicting set bits in the other drive mode registers.
+ */
+ ret = regmap_write_bits(chip->regmap, reg, bit, bit);
+
+out:
+ mutex_unlock(&chip->i2c_lock);
+ return ret;
+}
+
+static int cy8c95x0_gpio_get_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+
+ return cy8c95x0_read_regs_mask(chip, CY8C95X0_INPUT, bits, mask);
+}
+
+static void cy8c95x0_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+
+ cy8c95x0_write_regs_mask(chip, CY8C95X0_OUTPUT, bits, mask);
+}
+
+static int cy8c95x0_add_pin_ranges(struct gpio_chip *gc)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ struct device *dev = chip->dev;
+ int ret;
+
+ ret = gpiochip_add_pin_range(gc, dev_name(dev), 0, 0, chip->tpin);
+ if (ret)
+ dev_err(dev, "failed to add GPIO pin range\n");
+
+ return ret;
+}
+
+static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip)
+{
+ struct gpio_chip *gc = &chip->gpio_chip;
+
+ gc->direction_input = cy8c95x0_gpio_direction_input;
+ gc->direction_output = cy8c95x0_gpio_direction_output;
+ gc->get = cy8c95x0_gpio_get_value;
+ gc->set = cy8c95x0_gpio_set_value;
+ gc->get_direction = cy8c95x0_gpio_get_direction;
+ gc->get_multiple = cy8c95x0_gpio_get_multiple;
+ gc->set_multiple = cy8c95x0_gpio_set_multiple;
+ gc->set_config = gpiochip_generic_config,
+ gc->can_sleep = true;
+ gc->add_pin_ranges = cy8c95x0_add_pin_ranges;
+
+ gc->base = -1;
+ gc->ngpio = chip->tpin;
+
+ gc->parent = chip->dev;
+ gc->owner = THIS_MODULE;
+ gc->names = NULL;
+
+ gc->label = dev_name(chip->dev);
+
+ return devm_gpiochip_add_data(chip->dev, gc, chip);
+}
+
+static void cy8c95x0_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ set_bit(hwirq, chip->irq_mask);
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void cy8c95x0_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+ clear_bit(hwirq, chip->irq_mask);
+}
+
+static void cy8c95x0_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->irq_lock);
+}
+
+static void cy8c95x0_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ DECLARE_BITMAP(ones, MAX_LINE);
+ DECLARE_BITMAP(irq_mask, MAX_LINE);
+ DECLARE_BITMAP(reg_direction, MAX_LINE);
+
+ bitmap_fill(ones, MAX_LINE);
+
+ cy8c95x0_write_regs_mask(chip, CY8C95X0_INTMASK, chip->irq_mask, ones);
+
+ /* Switch direction to input if needed */
+ cy8c95x0_read_regs_mask(chip, CY8C95X0_DIRECTION, reg_direction, chip->irq_mask);
+ bitmap_or(irq_mask, chip->irq_mask, reg_direction, MAX_LINE);
+ bitmap_complement(irq_mask, irq_mask, MAX_LINE);
+
+ /* Look for any newly setup interrupt */
+ cy8c95x0_write_regs_mask(chip, CY8C95X0_DIRECTION, ones, irq_mask);
+
+ mutex_unlock(&chip->irq_lock);
+}
+
+static int cy8c95x0_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned int trig_type;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ trig_type = type;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ trig_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ trig_type = IRQ_TYPE_EDGE_FALLING;
+ break;
+ default:
+ dev_err(chip->dev, "irq %d: unsupported type %d\n", d->irq, type);
+ return -EINVAL;
+ }
+
+ assign_bit(hwirq, chip->irq_trig_fall, trig_type & IRQ_TYPE_EDGE_FALLING);
+ assign_bit(hwirq, chip->irq_trig_raise, trig_type & IRQ_TYPE_EDGE_RISING);
+ assign_bit(hwirq, chip->irq_trig_low, type == IRQ_TYPE_LEVEL_LOW);
+ assign_bit(hwirq, chip->irq_trig_high, type == IRQ_TYPE_LEVEL_HIGH);
+
+ return 0;
+}
+
+static void cy8c95x0_irq_shutdown(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ clear_bit(hwirq, chip->irq_trig_raise);
+ clear_bit(hwirq, chip->irq_trig_fall);
+ clear_bit(hwirq, chip->irq_trig_low);
+ clear_bit(hwirq, chip->irq_trig_high);
+}
+
+static const struct irq_chip cy8c95x0_irqchip = {
+ .name = "cy8c95x0-irq",
+ .irq_mask = cy8c95x0_irq_mask,
+ .irq_unmask = cy8c95x0_irq_unmask,
+ .irq_bus_lock = cy8c95x0_irq_bus_lock,
+ .irq_bus_sync_unlock = cy8c95x0_irq_bus_sync_unlock,
+ .irq_set_type = cy8c95x0_irq_set_type,
+ .irq_shutdown = cy8c95x0_irq_shutdown,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static bool cy8c95x0_irq_pending(struct cy8c95x0_pinctrl *chip, unsigned long *pending)
+{
+ DECLARE_BITMAP(ones, MAX_LINE);
+ DECLARE_BITMAP(cur_stat, MAX_LINE);
+ DECLARE_BITMAP(new_stat, MAX_LINE);
+ DECLARE_BITMAP(trigger, MAX_LINE);
+
+ bitmap_fill(ones, MAX_LINE);
+
+ /* Read the current interrupt status from the device */
+ if (cy8c95x0_read_regs_mask(chip, CY8C95X0_INTSTATUS, trigger, ones))
+ return false;
+
+ /* Check latched inputs */
+ if (cy8c95x0_read_regs_mask(chip, CY8C95X0_INPUT, cur_stat, trigger))
+ return false;
+
+ /* Apply filter for rising/falling edge selection */
+ bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise,
+ cur_stat, MAX_LINE);
+
+ bitmap_and(pending, new_stat, trigger, MAX_LINE);
+
+ return !bitmap_empty(pending, MAX_LINE);
+}
+
+static irqreturn_t cy8c95x0_irq_handler(int irq, void *devid)
+{
+ struct cy8c95x0_pinctrl *chip = devid;
+ struct gpio_chip *gc = &chip->gpio_chip;
+ DECLARE_BITMAP(pending, MAX_LINE);
+ int nested_irq, level;
+ bool ret;
+
+ ret = cy8c95x0_irq_pending(chip, pending);
+ if (!ret)
+ return IRQ_RETVAL(0);
+
+ ret = 0;
+ for_each_set_bit(level, pending, MAX_LINE) {
+ /* Already accounted for 4bit gap in GPort2 */
+ nested_irq = irq_find_mapping(gc->irq.domain, level);
+
+ if (unlikely(nested_irq <= 0)) {
+ dev_warn_ratelimited(gc->parent, "unmapped interrupt %d\n", level);
+ continue;
+ }
+
+ if (test_bit(level, chip->irq_trig_low))
+ while (!cy8c95x0_gpio_get_value(gc, level))
+ handle_nested_irq(nested_irq);
+ else if (test_bit(level, chip->irq_trig_high))
+ while (cy8c95x0_gpio_get_value(gc, level))
+ handle_nested_irq(nested_irq);
+ else
+ handle_nested_irq(nested_irq);
+
+ ret = 1;
+ }
+
+ return IRQ_RETVAL(ret);
+}
+
+static int cy8c95x0_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+
+ return chip->tpin;
+}
+
+static const char *cy8c95x0_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ return cy8c95x0_groups[group];
+}
+
+static int cy8c95x0_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = &cy8c9560_pins[group].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static const char *cy8c95x0_get_fname(unsigned int selector)
+{
+ if (selector == 0)
+ return "gpio";
+ else
+ return "pwm";
+}
+
+static void cy8c95x0_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int pin)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+ DECLARE_BITMAP(mask, MAX_LINE);
+ DECLARE_BITMAP(pwm, MAX_LINE);
+
+ bitmap_zero(mask, MAX_LINE);
+ __set_bit(pin, mask);
+
+ if (cy8c95x0_read_regs_mask(chip, CY8C95X0_PWMSEL, pwm, mask)) {
+ seq_puts(s, "not available");
+ return;
+ }
+
+ seq_printf(s, "MODE:%s", cy8c95x0_get_fname(test_bit(pin, pwm)));
+}
+
+static const struct pinctrl_ops cy8c95x0_pinctrl_ops = {
+ .get_groups_count = cy8c95x0_pinctrl_get_groups_count,
+ .get_group_name = cy8c95x0_pinctrl_get_group_name,
+ .get_group_pins = cy8c95x0_pinctrl_get_group_pins,
+#ifdef CONFIG_OF
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+#endif
+ .pin_dbg_show = cy8c95x0_pin_dbg_show,
+};
+
+static const char *cy8c95x0_get_function_name(struct pinctrl_dev *pctldev, unsigned int selector)
+{
+ return cy8c95x0_get_fname(selector);
+}
+
+static int cy8c95x0_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return 2;
+}
+
+static int cy8c95x0_get_function_groups(struct pinctrl_dev *pctldev, unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = cy8c95x0_groups;
+ *num_groups = chip->tpin;
+ return 0;
+}
+
+static int cy8c95x0_pinmux_cfg(struct cy8c95x0_pinctrl *chip,
+ unsigned int val,
+ unsigned long off)
+{
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ int ret;
+
+ /* Select port */
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write_bits(chip->regmap, CY8C95X0_PWMSEL, bit, val ? bit : 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set direction to output & set output to 1 so that PWM can work */
+ ret = regmap_write_bits(chip->regmap, CY8C95X0_DIRECTION, bit, bit);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write_bits(chip->regmap, CY8C95X0_OUTPUT_(port), bit, bit);
+}
+
+static int cy8c95x0_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
+ unsigned int group)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+ ret = cy8c95x0_pinmux_cfg(chip, selector, group);
+ mutex_unlock(&chip->i2c_lock);
+
+ return ret;
+}
+
+static const struct pinmux_ops cy8c95x0_pmxops = {
+ .get_functions_count = cy8c95x0_get_functions_count,
+ .get_function_name = cy8c95x0_get_function_name,
+ .get_function_groups = cy8c95x0_get_function_groups,
+ .set_mux = cy8c95x0_set_mux,
+ .strict = true,
+};
+
+static int cy8c95x0_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+
+ return cy8c95x0_gpio_get_pincfg(chip, pin, config);
+}
+
+static int cy8c95x0_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ ret = cy8c95x0_gpio_set_pincfg(chip, pin, configs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct pinconf_ops cy8c95x0_pinconf_ops = {
+ .pin_config_get = cy8c95x0_pinconf_get,
+ .pin_config_set = cy8c95x0_pinconf_set,
+ .is_generic = true,
+};
+
+static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq)
+{
+ struct gpio_irq_chip *girq = &chip->gpio_chip.irq;
+ DECLARE_BITMAP(pending_irqs, MAX_LINE);
+ int ret;
+
+ mutex_init(&chip->irq_lock);
+
+ bitmap_zero(pending_irqs, MAX_LINE);
+
+ /* Read IRQ status register to clear all pending interrupts */
+ ret = cy8c95x0_irq_pending(chip, pending_irqs);
+ if (ret) {
+ dev_err(chip->dev, "failed to clear irq status register\n");
+ return ret;
+ }
+
+ /* Mask all interrupts */
+ bitmap_fill(chip->irq_mask, MAX_LINE);
+
+ gpio_irq_chip_set_chip(girq, &cy8c95x0_irqchip);
+
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
+ ret = devm_request_threaded_irq(chip->dev, irq,
+ NULL, cy8c95x0_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH,
+ dev_name(chip->dev), chip);
+ if (ret) {
+ dev_err(chip->dev, "failed to request irq %d\n", irq);
+ return ret;
+ }
+ dev_info(chip->dev, "Registered threaded IRQ\n");
+
+ return 0;
+}
+
+static int cy8c95x0_setup_pinctrl(struct cy8c95x0_pinctrl *chip)
+{
+ struct pinctrl_desc *pd = &chip->pinctrl_desc;
+
+ pd->pctlops = &cy8c95x0_pinctrl_ops;
+ pd->confops = &cy8c95x0_pinconf_ops;
+ pd->pmxops = &cy8c95x0_pmxops;
+ pd->name = dev_name(chip->dev);
+ pd->pins = cy8c9560_pins;
+ pd->npins = chip->tpin;
+ pd->owner = THIS_MODULE;
+
+ chip->pctldev = devm_pinctrl_register(chip->dev, pd, chip);
+ if (IS_ERR(chip->pctldev))
+ return dev_err_probe(chip->dev, PTR_ERR(chip->pctldev),
+ "can't register controller\n");
+
+ return 0;
+}
+
+static int cy8c95x0_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int ret;
+ const char *name;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(client, CY8C95X0_DEVID);
+ if (ret < 0)
+ return ret;
+ switch (ret & GENMASK(7, 4)) {
+ case 0x20:
+ name = cy8c95x0_id[0].name;
+ break;
+ case 0x40:
+ name = cy8c95x0_id[1].name;
+ break;
+ case 0x60:
+ name = cy8c95x0_id[2].name;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "Found a %s chip at 0x%02x.\n", name, client->addr);
+ strscpy(info->type, name, I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int cy8c95x0_probe(struct i2c_client *client)
+{
+ struct cy8c95x0_pinctrl *chip;
+ struct regulator *reg;
+ int ret;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &client->dev;
+
+ /* Set the device type */
+ chip->driver_data = (unsigned long)device_get_match_data(&client->dev);
+ if (!chip->driver_data)
+ chip->driver_data = i2c_match_id(cy8c95x0_id, client)->driver_data;
+ if (!chip->driver_data)
+ return -ENODEV;
+
+ i2c_set_clientdata(client, chip);
+
+ chip->tpin = chip->driver_data & CY8C95X0_GPIO_MASK;
+ chip->nport = DIV_ROUND_UP(CY8C95X0_PIN_TO_OFFSET(chip->tpin), BANK_SZ);
+
+ switch (chip->tpin) {
+ case 20:
+ strscpy(chip->name, cy8c95x0_id[0].name, I2C_NAME_SIZE);
+ break;
+ case 40:
+ strscpy(chip->name, cy8c95x0_id[1].name, I2C_NAME_SIZE);
+ break;
+ case 60:
+ strscpy(chip->name, cy8c95x0_id[2].name, I2C_NAME_SIZE);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ reg = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ } else {
+ ret = regulator_enable(reg);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable regulator vdd: %d\n", ret);
+ return ret;
+ }
+ chip->regulator = reg;
+ }
+
+ chip->regmap = devm_regmap_init_i2c(client, &cy8c95x0_i2c_regmap);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ goto err_exit;
+ }
+
+ bitmap_zero(chip->push_pull, MAX_LINE);
+ bitmap_zero(chip->shiftmask, MAX_LINE);
+ bitmap_set(chip->shiftmask, 0, 20);
+ mutex_init(&chip->i2c_lock);
+
+ if (dmi_first_match(cy8c95x0_dmi_acpi_irq_info)) {
+ ret = cy8c95x0_acpi_get_irq(&client->dev);
+ if (ret > 0)
+ client->irq = ret;
+ }
+
+ if (client->irq) {
+ ret = cy8c95x0_irq_setup(chip, client->irq);
+ if (ret)
+ goto err_exit;
+ }
+
+ ret = cy8c95x0_setup_pinctrl(chip);
+ if (ret)
+ goto err_exit;
+
+ ret = cy8c95x0_setup_gpiochip(chip);
+ if (ret)
+ goto err_exit;
+
+ return 0;
+
+err_exit:
+ if (!IS_ERR_OR_NULL(chip->regulator))
+ regulator_disable(chip->regulator);
+ return ret;
+}
+
+static void cy8c95x0_remove(struct i2c_client *client)
+{
+ struct cy8c95x0_pinctrl *chip = i2c_get_clientdata(client);
+
+ if (!IS_ERR_OR_NULL(chip->regulator))
+ regulator_disable(chip->regulator);
+}
+
+static const struct acpi_device_id cy8c95x0_acpi_ids[] = {
+ { "INT3490", 40, },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cy8c95x0_acpi_ids);
+
+static struct i2c_driver cy8c95x0_driver = {
+ .driver = {
+ .name = "cy8c95x0-pinctrl",
+ .of_match_table = cy8c95x0_dt_ids,
+ .acpi_match_table = cy8c95x0_acpi_ids,
+ },
+ .probe_new = cy8c95x0_probe,
+ .remove = cy8c95x0_remove,
+ .id_table = cy8c95x0_id,
+ .detect = cy8c95x0_detect,
+};
+module_i2c_driver(cy8c95x0_driver);
+
+MODULE_AUTHOR("Patrick Rudolph <patrick.rudolph@9elements.com>");
+MODULE_AUTHOR("Naresh Solanki <naresh.solanki@9elements.com>");
+MODULE_DESCRIPTION("Pinctrl driver for CY8C95X0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 3a9ee9c8af11..7e732076dedf 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -12,14 +12,14 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -4152,7 +4152,7 @@ static const struct of_device_id ingenic_gpio_of_matches[] __initconst = {
};
static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
- struct device_node *node)
+ struct fwnode_handle *fwnode)
{
struct ingenic_gpio_chip *jzgc;
struct device *dev = jzpc->dev;
@@ -4160,7 +4160,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
unsigned int bank;
int err;
- err = of_property_read_u32(node, "reg", &bank);
+ err = fwnode_property_read_u32(fwnode, "reg", &bank);
if (err) {
dev_err(dev, "Cannot read \"reg\" property: %i\n", err);
return err;
@@ -4185,7 +4185,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.ngpio = 32;
jzgc->gc.parent = dev;
- jzgc->gc.of_node = node;
+ jzgc->gc.fwnode = fwnode;
jzgc->gc.owner = THIS_MODULE;
jzgc->gc.set = ingenic_gpio_set;
@@ -4196,9 +4196,12 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.request = gpiochip_generic_request;
jzgc->gc.free = gpiochip_generic_free;
- jzgc->irq = irq_of_parse_and_map(node, 0);
- if (!jzgc->irq)
+ err = fwnode_irq_get(fwnode, 0);
+ if (err < 0)
+ return err;
+ if (!err)
return -EINVAL;
+ jzgc->irq = err;
girq = &jzgc->gc.irq;
gpio_irq_chip_set_chip(girq, &ingenic_gpio_irqchip);
@@ -4227,12 +4230,12 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
struct pinctrl_desc *pctl_desc;
void __iomem *base;
const struct ingenic_chip_info *chip_info;
- struct device_node *node;
struct regmap_config regmap_config;
+ struct fwnode_handle *fwnode;
unsigned int i;
int err;
- chip_info = of_device_get_match_data(dev);
+ chip_info = device_get_match_data(dev);
if (!chip_info) {
dev_err(dev, "Unsupported SoC\n");
return -EINVAL;
@@ -4319,11 +4322,11 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
dev_set_drvdata(dev, jzpc->map);
- for_each_child_of_node(dev->of_node, node) {
- if (of_match_node(ingenic_gpio_of_matches, node)) {
- err = ingenic_gpio_probe(jzpc, node);
+ device_for_each_child_node(dev, fwnode) {
+ if (of_match_node(ingenic_gpio_of_matches, to_of_node(fwnode))) {
+ err = ingenic_gpio_probe(jzpc, fwnode);
if (err) {
- of_node_put(node);
+ fwnode_handle_put(fwnode);
return err;
}
}
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 695236636d05..5f356edfd0fd 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -549,9 +549,6 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.get = mcp23s08_get;
mcp->chip.direction_output = mcp23s08_direction_output;
mcp->chip.set = mcp23s08_set;
-#ifdef CONFIG_OF_GPIO
- mcp->chip.of_gpio_n_cells = 2;
-#endif
mcp->chip.base = base;
mcp->chip.can_sleep = true;
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 2b4167a09b3b..af27b72c8958 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -865,9 +865,10 @@ static int microchip_sgpio_register_bank(struct device *dev,
gc->can_sleep = !bank->is_input;
if (bank->is_input && priv->properties->flags & SGPIO_FLAGS_HAS_IRQ) {
- int irq = fwnode_irq_get(fwnode, 0);
+ int irq;
- if (irq) {
+ irq = fwnode_irq_get(fwnode, 0);
+ if (irq > 0) {
struct gpio_irq_chip *girq = &gc->irq;
gpio_irq_chip_set_chip(girq, &microchip_sgpio_irqchip);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 83464e0bf4e6..62ce3957abe4 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -2129,4 +2129,6 @@ static struct platform_driver ocelot_pinctrl_driver = {
.remove = ocelot_pinctrl_remove,
};
module_platform_driver(ocelot_pinctrl_driver);
+
+MODULE_DESCRIPTION("Ocelot Chip Pinctrl Driver");
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 5de691c630b4..7ca4ecb6eb8d 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -10,13 +10,13 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -1347,46 +1347,51 @@ static struct pistachio_gpio_bank pistachio_gpio_banks[] = {
static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
{
- struct device_node *node = pctl->dev->of_node;
struct pistachio_gpio_bank *bank;
unsigned int i;
int irq, ret = 0;
for (i = 0; i < pctl->nbanks; i++) {
char child_name[sizeof("gpioXX")];
- struct device_node *child;
+ struct fwnode_handle *child;
struct gpio_irq_chip *girq;
snprintf(child_name, sizeof(child_name), "gpio%d", i);
- child = of_get_child_by_name(node, child_name);
+ child = device_get_named_child_node(pctl->dev, child_name);
if (!child) {
dev_err(pctl->dev, "No node for bank %u\n", i);
ret = -ENODEV;
goto err;
}
- if (!of_find_property(child, "gpio-controller", NULL)) {
+ if (!fwnode_property_present(child, "gpio-controller")) {
+ fwnode_handle_put(child);
dev_err(pctl->dev,
"No gpio-controller property for bank %u\n", i);
- of_node_put(child);
ret = -ENODEV;
goto err;
}
- irq = irq_of_parse_and_map(child, 0);
- if (!irq) {
+ ret = fwnode_irq_get(child, 0);
+ if (ret < 0) {
+ fwnode_handle_put(child);
+ dev_err(pctl->dev, "Failed to retrieve IRQ for bank %u\n", i);
+ goto err;
+ }
+ if (!ret) {
+ fwnode_handle_put(child);
dev_err(pctl->dev, "No IRQ for bank %u\n", i);
- of_node_put(child);
ret = -EINVAL;
goto err;
}
+ irq = ret;
bank = &pctl->gpio_banks[i];
bank->pctl = pctl;
bank->base = pctl->base + GPIO_BANK_BASE(i);
bank->gpio_chip.parent = pctl->dev;
- bank->gpio_chip.of_node = child;
+ bank->gpio_chip.fwnode = child;
girq = &bank->gpio_chip.irq;
girq->chip = &bank->irq_chip;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 32e41395fc76..53bdfc40f055 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -57,6 +57,7 @@
#define IOMUX_UNROUTED BIT(3)
#define IOMUX_WIDTH_3BIT BIT(4)
#define IOMUX_WIDTH_2BIT BIT(5)
+#define IOMUX_L_SOURCE_PMU BIT(6)
#define PIN_BANK(id, pins, label) \
{ \
@@ -147,6 +148,21 @@
.pull_type[3] = pull3, \
}
+#define PIN_BANK_IOMUX_FLAGS_OFFSET(id, pins, label, iom0, iom1, iom2, \
+ iom3, offset0, offset1, offset2, \
+ offset3) \
+ { \
+ .bank_num = id, \
+ .nr_pins = pins, \
+ .name = label, \
+ .iomux = { \
+ { .type = iom0, .offset = offset0 }, \
+ { .type = iom1, .offset = offset1 }, \
+ { .type = iom2, .offset = offset2 }, \
+ { .type = iom3, .offset = offset3 }, \
+ }, \
+ }
+
#define PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(id, pins, label, iom0, iom1, \
iom2, iom3, drv0, drv1, drv2, \
drv3, offset0, offset1, \
@@ -443,6 +459,37 @@ static struct rockchip_mux_recalced_data rv1108_mux_recalced_data[] = {
},
};
+static struct rockchip_mux_recalced_data rv1126_mux_recalced_data[] = {
+ {
+ .num = 0,
+ .pin = 20,
+ .reg = 0x10000,
+ .bit = 0,
+ .mask = 0xf
+ },
+ {
+ .num = 0,
+ .pin = 21,
+ .reg = 0x10000,
+ .bit = 4,
+ .mask = 0xf
+ },
+ {
+ .num = 0,
+ .pin = 22,
+ .reg = 0x10000,
+ .bit = 8,
+ .mask = 0xf
+ },
+ {
+ .num = 0,
+ .pin = 23,
+ .reg = 0x10000,
+ .bit = 12,
+ .mask = 0xf
+ },
+};
+
static struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
{
.num = 2,
@@ -642,6 +689,103 @@ static struct rockchip_mux_route_data px30_mux_route_data[] = {
RK_MUXROUTE_SAME(1, RK_PB7, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rxm1 */
};
+static struct rockchip_mux_route_data rv1126_mux_route_data[] = {
+ RK_MUXROUTE_GRF(3, RK_PD2, 1, 0x10260, WRITE_MASK_VAL(0, 0, 0)), /* I2S0_MCLK_M0 */
+ RK_MUXROUTE_GRF(3, RK_PB0, 3, 0x10260, WRITE_MASK_VAL(0, 0, 1)), /* I2S0_MCLK_M1 */
+
+ RK_MUXROUTE_GRF(0, RK_PD4, 4, 0x10260, WRITE_MASK_VAL(3, 2, 0)), /* I2S1_MCLK_M0 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x10260, WRITE_MASK_VAL(3, 2, 1)), /* I2S1_MCLK_M1 */
+ RK_MUXROUTE_GRF(2, RK_PC7, 6, 0x10260, WRITE_MASK_VAL(3, 2, 2)), /* I2S1_MCLK_M2 */
+
+ RK_MUXROUTE_GRF(1, RK_PD0, 1, 0x10260, WRITE_MASK_VAL(4, 4, 0)), /* I2S2_MCLK_M0 */
+ RK_MUXROUTE_GRF(2, RK_PB3, 2, 0x10260, WRITE_MASK_VAL(4, 4, 1)), /* I2S2_MCLK_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PD4, 2, 0x10260, WRITE_MASK_VAL(12, 12, 0)), /* PDM_CLK0_M0 */
+ RK_MUXROUTE_GRF(3, RK_PC0, 3, 0x10260, WRITE_MASK_VAL(12, 12, 1)), /* PDM_CLK0_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PC6, 1, 0x10264, WRITE_MASK_VAL(0, 0, 0)), /* CIF_CLKOUT_M0 */
+ RK_MUXROUTE_GRF(2, RK_PD1, 3, 0x10264, WRITE_MASK_VAL(0, 0, 1)), /* CIF_CLKOUT_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA4, 5, 0x10264, WRITE_MASK_VAL(5, 4, 0)), /* I2C3_SCL_M0 */
+ RK_MUXROUTE_GRF(2, RK_PD4, 7, 0x10264, WRITE_MASK_VAL(5, 4, 1)), /* I2C3_SCL_M1 */
+ RK_MUXROUTE_GRF(1, RK_PD6, 3, 0x10264, WRITE_MASK_VAL(5, 4, 2)), /* I2C3_SCL_M2 */
+
+ RK_MUXROUTE_GRF(3, RK_PA0, 7, 0x10264, WRITE_MASK_VAL(6, 6, 0)), /* I2C4_SCL_M0 */
+ RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x10264, WRITE_MASK_VAL(6, 6, 1)), /* I2C4_SCL_M1 */
+
+ RK_MUXROUTE_GRF(2, RK_PA5, 7, 0x10264, WRITE_MASK_VAL(9, 8, 0)), /* I2C5_SCL_M0 */
+ RK_MUXROUTE_GRF(3, RK_PB0, 5, 0x10264, WRITE_MASK_VAL(9, 8, 1)), /* I2C5_SCL_M1 */
+ RK_MUXROUTE_GRF(1, RK_PD0, 4, 0x10264, WRITE_MASK_VAL(9, 8, 2)), /* I2C5_SCL_M2 */
+
+ RK_MUXROUTE_GRF(3, RK_PC0, 5, 0x10264, WRITE_MASK_VAL(11, 10, 0)), /* SPI1_CLK_M0 */
+ RK_MUXROUTE_GRF(1, RK_PC6, 3, 0x10264, WRITE_MASK_VAL(11, 10, 1)), /* SPI1_CLK_M1 */
+ RK_MUXROUTE_GRF(2, RK_PD5, 6, 0x10264, WRITE_MASK_VAL(11, 10, 2)), /* SPI1_CLK_M2 */
+
+ RK_MUXROUTE_GRF(3, RK_PC0, 2, 0x10264, WRITE_MASK_VAL(12, 12, 0)), /* RGMII_CLK_M0 */
+ RK_MUXROUTE_GRF(2, RK_PB7, 2, 0x10264, WRITE_MASK_VAL(12, 12, 1)), /* RGMII_CLK_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA1, 3, 0x10264, WRITE_MASK_VAL(13, 13, 0)), /* CAN_TXD_M0 */
+ RK_MUXROUTE_GRF(3, RK_PA7, 5, 0x10264, WRITE_MASK_VAL(13, 13, 1)), /* CAN_TXD_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA4, 6, 0x10268, WRITE_MASK_VAL(0, 0, 0)), /* PWM8_M0 */
+ RK_MUXROUTE_GRF(2, RK_PD7, 5, 0x10268, WRITE_MASK_VAL(0, 0, 1)), /* PWM8_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA5, 6, 0x10268, WRITE_MASK_VAL(2, 2, 0)), /* PWM9_M0 */
+ RK_MUXROUTE_GRF(2, RK_PD6, 5, 0x10268, WRITE_MASK_VAL(2, 2, 1)), /* PWM9_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA6, 6, 0x10268, WRITE_MASK_VAL(4, 4, 0)), /* PWM10_M0 */
+ RK_MUXROUTE_GRF(2, RK_PD5, 5, 0x10268, WRITE_MASK_VAL(4, 4, 1)), /* PWM10_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA7, 6, 0x10268, WRITE_MASK_VAL(6, 6, 0)), /* PWM11_IR_M0 */
+ RK_MUXROUTE_GRF(3, RK_PA1, 5, 0x10268, WRITE_MASK_VAL(6, 6, 1)), /* PWM11_IR_M1 */
+
+ RK_MUXROUTE_GRF(1, RK_PA5, 3, 0x10268, WRITE_MASK_VAL(8, 8, 0)), /* UART2_TX_M0 */
+ RK_MUXROUTE_GRF(3, RK_PA2, 1, 0x10268, WRITE_MASK_VAL(8, 8, 1)), /* UART2_TX_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PC6, 3, 0x10268, WRITE_MASK_VAL(11, 10, 0)), /* UART3_TX_M0 */
+ RK_MUXROUTE_GRF(1, RK_PA7, 2, 0x10268, WRITE_MASK_VAL(11, 10, 1)), /* UART3_TX_M1 */
+ RK_MUXROUTE_GRF(3, RK_PA0, 4, 0x10268, WRITE_MASK_VAL(11, 10, 2)), /* UART3_TX_M2 */
+
+ RK_MUXROUTE_GRF(3, RK_PA4, 4, 0x10268, WRITE_MASK_VAL(13, 12, 0)), /* UART4_TX_M0 */
+ RK_MUXROUTE_GRF(2, RK_PA6, 4, 0x10268, WRITE_MASK_VAL(13, 12, 1)), /* UART4_TX_M1 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 3, 0x10268, WRITE_MASK_VAL(13, 12, 2)), /* UART4_TX_M2 */
+
+ RK_MUXROUTE_GRF(3, RK_PA6, 4, 0x10268, WRITE_MASK_VAL(15, 14, 0)), /* UART5_TX_M0 */
+ RK_MUXROUTE_GRF(2, RK_PB0, 4, 0x10268, WRITE_MASK_VAL(15, 14, 1)), /* UART5_TX_M1 */
+ RK_MUXROUTE_GRF(2, RK_PA0, 3, 0x10268, WRITE_MASK_VAL(15, 14, 2)), /* UART5_TX_M2 */
+
+ RK_MUXROUTE_PMU(0, RK_PB6, 3, 0x0114, WRITE_MASK_VAL(0, 0, 0)), /* PWM0_M0 */
+ RK_MUXROUTE_PMU(2, RK_PB3, 5, 0x0114, WRITE_MASK_VAL(0, 0, 1)), /* PWM0_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PB7, 3, 0x0114, WRITE_MASK_VAL(2, 2, 0)), /* PWM1_M0 */
+ RK_MUXROUTE_PMU(2, RK_PB2, 5, 0x0114, WRITE_MASK_VAL(2, 2, 1)), /* PWM1_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PC0, 3, 0x0114, WRITE_MASK_VAL(4, 4, 0)), /* PWM2_M0 */
+ RK_MUXROUTE_PMU(2, RK_PB1, 5, 0x0114, WRITE_MASK_VAL(4, 4, 1)), /* PWM2_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PC1, 3, 0x0114, WRITE_MASK_VAL(6, 6, 0)), /* PWM3_IR_M0 */
+ RK_MUXROUTE_PMU(2, RK_PB0, 5, 0x0114, WRITE_MASK_VAL(6, 6, 1)), /* PWM3_IR_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PC2, 3, 0x0114, WRITE_MASK_VAL(8, 8, 0)), /* PWM4_M0 */
+ RK_MUXROUTE_PMU(2, RK_PA7, 5, 0x0114, WRITE_MASK_VAL(8, 8, 1)), /* PWM4_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PC3, 3, 0x0114, WRITE_MASK_VAL(10, 10, 0)), /* PWM5_M0 */
+ RK_MUXROUTE_PMU(2, RK_PA6, 5, 0x0114, WRITE_MASK_VAL(10, 10, 1)), /* PWM5_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PB2, 3, 0x0114, WRITE_MASK_VAL(12, 12, 0)), /* PWM6_M0 */
+ RK_MUXROUTE_PMU(2, RK_PD4, 5, 0x0114, WRITE_MASK_VAL(12, 12, 1)), /* PWM6_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PB1, 3, 0x0114, WRITE_MASK_VAL(14, 14, 0)), /* PWM7_IR_M0 */
+ RK_MUXROUTE_PMU(3, RK_PA0, 5, 0x0114, WRITE_MASK_VAL(14, 14, 1)), /* PWM7_IR_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PB0, 1, 0x0118, WRITE_MASK_VAL(1, 0, 0)), /* SPI0_CLK_M0 */
+ RK_MUXROUTE_PMU(2, RK_PA1, 1, 0x0118, WRITE_MASK_VAL(1, 0, 1)), /* SPI0_CLK_M1 */
+ RK_MUXROUTE_PMU(2, RK_PB2, 6, 0x0118, WRITE_MASK_VAL(1, 0, 2)), /* SPI0_CLK_M2 */
+
+ RK_MUXROUTE_PMU(0, RK_PB6, 2, 0x0118, WRITE_MASK_VAL(2, 2, 0)), /* UART1_TX_M0 */
+ RK_MUXROUTE_PMU(1, RK_PD0, 5, 0x0118, WRITE_MASK_VAL(2, 2, 1)), /* UART1_TX_M1 */
+};
+
static struct rockchip_mux_route_data rk3128_mux_route_data[] = {
RK_MUXROUTE_SAME(1, RK_PB2, 1, 0x144, BIT(16 + 3) | BIT(16 + 4)), /* spi-0 */
RK_MUXROUTE_SAME(1, RK_PD3, 3, 0x144, BIT(16 + 3) | BIT(16 + 4) | BIT(3)), /* spi-1 */
@@ -877,8 +1021,12 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY)
return RK_FUNC_GPIO;
- regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
- ? info->regmap_pmu : info->regmap_base;
+ if (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
+ regmap = info->regmap_pmu;
+ else if (bank->iomux[iomux_num].type & IOMUX_L_SOURCE_PMU)
+ regmap = (pin % 8 < 4) ? info->regmap_pmu : info->regmap_base;
+ else
+ regmap = info->regmap_base;
/* get basic quadrupel of mux registers and the correct reg inside */
mux_type = bank->iomux[iomux_num].type;
@@ -987,8 +1135,12 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
dev_dbg(dev, "setting mux of GPIO%d-%d to %d\n", bank->bank_num, pin, mux);
- regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
- ? info->regmap_pmu : info->regmap_base;
+ if (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
+ regmap = info->regmap_pmu;
+ else if (bank->iomux[iomux_num].type & IOMUX_L_SOURCE_PMU)
+ regmap = (pin % 8 < 4) ? info->regmap_pmu : info->regmap_base;
+ else
+ regmap = info->regmap_base;
/* get basic quadrupel of mux registers and the correct reg inside */
mux_type = bank->iomux[iomux_num].type;
@@ -1268,6 +1420,119 @@ static int rv1108_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RV1126_PULL_PMU_OFFSET 0x40
+#define RV1126_PULL_GRF_GPIO1A0_OFFSET 0x10108
+#define RV1126_PULL_PINS_PER_REG 8
+#define RV1126_PULL_BITS_PER_PIN 2
+#define RV1126_PULL_BANK_STRIDE 16
+#define RV1126_GPIO_C4_D7(p) (p >= 20 && p <= 31) /* GPIO0_C4 ~ GPIO0_D7 */
+
+static int rv1126_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 24 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ if (RV1126_GPIO_C4_D7(pin_num)) {
+ *regmap = info->regmap_base;
+ *reg = RV1126_PULL_GRF_GPIO1A0_OFFSET;
+ *reg -= (((31 - pin_num) / RV1126_PULL_PINS_PER_REG + 1) * 4);
+ *bit = pin_num % RV1126_PULL_PINS_PER_REG;
+ *bit *= RV1126_PULL_BITS_PER_PIN;
+ return 0;
+ }
+ *regmap = info->regmap_pmu;
+ *reg = RV1126_PULL_PMU_OFFSET;
+ } else {
+ *reg = RV1126_PULL_GRF_GPIO1A0_OFFSET;
+ *regmap = info->regmap_base;
+ *reg += (bank->bank_num - 1) * RV1126_PULL_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / RV1126_PULL_PINS_PER_REG) * 4);
+ *bit = (pin_num % RV1126_PULL_PINS_PER_REG);
+ *bit *= RV1126_PULL_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RV1126_DRV_PMU_OFFSET 0x20
+#define RV1126_DRV_GRF_GPIO1A0_OFFSET 0x10090
+#define RV1126_DRV_BITS_PER_PIN 4
+#define RV1126_DRV_PINS_PER_REG 4
+#define RV1126_DRV_BANK_STRIDE 32
+
+static int rv1126_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 24 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ if (RV1126_GPIO_C4_D7(pin_num)) {
+ *regmap = info->regmap_base;
+ *reg = RV1126_DRV_GRF_GPIO1A0_OFFSET;
+ *reg -= (((31 - pin_num) / RV1126_DRV_PINS_PER_REG + 1) * 4);
+ *reg -= 0x4;
+ *bit = pin_num % RV1126_DRV_PINS_PER_REG;
+ *bit *= RV1126_DRV_BITS_PER_PIN;
+ return 0;
+ }
+ *regmap = info->regmap_pmu;
+ *reg = RV1126_DRV_PMU_OFFSET;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RV1126_DRV_GRF_GPIO1A0_OFFSET;
+ *reg += (bank->bank_num - 1) * RV1126_DRV_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / RV1126_DRV_PINS_PER_REG) * 4);
+ *bit = pin_num % RV1126_DRV_PINS_PER_REG;
+ *bit *= RV1126_DRV_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RV1126_SCHMITT_PMU_OFFSET 0x60
+#define RV1126_SCHMITT_GRF_GPIO1A0_OFFSET 0x10188
+#define RV1126_SCHMITT_BANK_STRIDE 16
+#define RV1126_SCHMITT_PINS_PER_GRF_REG 8
+#define RV1126_SCHMITT_PINS_PER_PMU_REG 8
+
+static int rv1126_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+ int pins_per_reg;
+
+ if (bank->bank_num == 0) {
+ if (RV1126_GPIO_C4_D7(pin_num)) {
+ *regmap = info->regmap_base;
+ *reg = RV1126_SCHMITT_GRF_GPIO1A0_OFFSET;
+ *reg -= (((31 - pin_num) / RV1126_SCHMITT_PINS_PER_GRF_REG + 1) * 4);
+ *bit = pin_num % RV1126_SCHMITT_PINS_PER_GRF_REG;
+ return 0;
+ }
+ *regmap = info->regmap_pmu;
+ *reg = RV1126_SCHMITT_PMU_OFFSET;
+ pins_per_reg = RV1126_SCHMITT_PINS_PER_PMU_REG;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RV1126_SCHMITT_GRF_GPIO1A0_OFFSET;
+ pins_per_reg = RV1126_SCHMITT_PINS_PER_GRF_REG;
+ *reg += (bank->bank_num - 1) * RV1126_SCHMITT_BANK_STRIDE;
+ }
+ *reg += ((pin_num / pins_per_reg) * 4);
+ *bit = pin_num % pins_per_reg;
+
+ return 0;
+}
+
#define RK3308_SCHMITT_PINS_PER_REG 8
#define RK3308_SCHMITT_BANK_STRIDE 16
#define RK3308_SCHMITT_GRF_OFFSET 0x1a0
@@ -1998,6 +2263,12 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
goto config;
}
+ if (ctrl->type == RV1126) {
+ rmask_bits = RV1126_DRV_BITS_PER_PIN;
+ ret = strength;
+ goto config;
+ }
+
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(rockchip_perpin_drv_list[drv_type]); i++) {
if (rockchip_perpin_drv_list[drv_type][i] == strength) {
@@ -2168,6 +2439,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
break;
case PX30:
case RV1108:
+ case RV1126:
case RK3188:
case RK3288:
case RK3308:
@@ -2393,11 +2665,24 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
+static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset,
+ bool input)
+{
+ struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ struct rockchip_pin_bank *bank;
+
+ bank = pin_to_bank(info, offset);
+ return rockchip_set_mux(bank, offset - bank->pin_base, RK_FUNC_GPIO);
+}
+
static const struct pinmux_ops rockchip_pmx_ops = {
.get_functions_count = rockchip_pmx_get_funcs_count,
.get_function_name = rockchip_pmx_get_func_name,
.get_function_groups = rockchip_pmx_get_groups,
.set_mux = rockchip_pmx_set,
+ .gpio_set_direction = rockchip_pmx_gpio_set_direction,
};
/*
@@ -2416,6 +2701,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
return pull ? false : true;
case PX30:
case RV1108:
+ case RV1126:
case RK3188:
case RK3288:
case RK3308:
@@ -2889,12 +3175,14 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
/* preset iomux offset value, set new start value */
if (iom->offset >= 0) {
- if (iom->type & IOMUX_SOURCE_PMU)
+ if ((iom->type & IOMUX_SOURCE_PMU) ||
+ (iom->type & IOMUX_L_SOURCE_PMU))
pmu_offs = iom->offset;
else
grf_offs = iom->offset;
} else { /* set current iomux offset */
- iom->offset = (iom->type & IOMUX_SOURCE_PMU) ?
+ iom->offset = ((iom->type & IOMUX_SOURCE_PMU) ||
+ (iom->type & IOMUX_L_SOURCE_PMU)) ?
pmu_offs : grf_offs;
}
@@ -2919,7 +3207,7 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
inc = (iom->type & (IOMUX_WIDTH_4BIT |
IOMUX_WIDTH_3BIT |
IOMUX_WIDTH_2BIT)) ? 8 : 4;
- if (iom->type & IOMUX_SOURCE_PMU)
+ if ((iom->type & IOMUX_SOURCE_PMU) || (iom->type & IOMUX_L_SOURCE_PMU))
pmu_offs += inc;
else
grf_offs += inc;
@@ -3178,6 +3466,48 @@ static struct rockchip_pin_ctrl rv1108_pin_ctrl = {
.schmitt_calc_reg = rv1108_calc_schmitt_reg_and_bit,
};
+static struct rockchip_pin_bank rv1126_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0",
+ IOMUX_WIDTH_4BIT | IOMUX_SOURCE_PMU,
+ IOMUX_WIDTH_4BIT | IOMUX_SOURCE_PMU,
+ IOMUX_WIDTH_4BIT | IOMUX_L_SOURCE_PMU,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(1, 32, "gpio1",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x10010, 0x10018, 0x10020, 0x10028),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(4, 2, "gpio4",
+ IOMUX_WIDTH_4BIT, 0, 0, 0),
+};
+
+static struct rockchip_pin_ctrl rv1126_pin_ctrl = {
+ .pin_banks = rv1126_pin_banks,
+ .nr_banks = ARRAY_SIZE(rv1126_pin_banks),
+ .label = "RV1126-GPIO",
+ .type = RV1126,
+ .grf_mux_offset = 0x10004, /* mux offset from GPIO0_D0 */
+ .pmu_mux_offset = 0x0,
+ .iomux_routes = rv1126_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rv1126_mux_route_data),
+ .iomux_recalced = rv1126_mux_recalced_data,
+ .niomux_recalced = ARRAY_SIZE(rv1126_mux_recalced_data),
+ .pull_calc_reg = rv1126_calc_pull_reg_and_bit,
+ .drv_calc_reg = rv1126_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rv1126_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk2928_pin_banks[] = {
PIN_BANK(0, 32, "gpio0"),
PIN_BANK(1, 32, "gpio1"),
@@ -3568,6 +3898,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &px30_pin_ctrl },
{ .compatible = "rockchip,rv1108-pinctrl",
.data = &rv1108_pin_ctrl },
+ { .compatible = "rockchip,rv1126-pinctrl",
+ .data = &rv1126_pin_ctrl },
{ .compatible = "rockchip,rk2928-pinctrl",
.data = &rk2928_pin_ctrl },
{ .compatible = "rockchip,rk3036-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index ec46f8815ac9..4759f336941e 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -186,6 +186,7 @@
enum rockchip_pinctrl_type {
PX30,
RV1108,
+ RV1126,
RK2928,
RK3066B,
RK3128,
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 0fea71fd9a00..cf7f9cbe6044 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -12,7 +12,6 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h> /* of_get_named_gpio() */
#include <linux/of_address.h>
#include <linux/gpio/driver.h>
#include <linux/regmap.h>
@@ -1162,6 +1161,31 @@ static void st_parse_syscfgs(struct st_pinctrl *info, int bank,
return;
}
+static int st_pctl_dt_calculate_pin(struct st_pinctrl *info,
+ phandle bank, unsigned int offset)
+{
+ struct device_node *np;
+ struct gpio_chip *chip;
+ int retval = -EINVAL;
+ int i;
+
+ np = of_find_node_by_phandle(bank);
+ if (!np)
+ return -EINVAL;
+
+ for (i = 0; i < info->nbanks; i++) {
+ chip = &info->banks[i].gpio_chip;
+ if (chip->of_node == np) {
+ if (offset < chip->ngpio)
+ retval = chip->base + offset;
+ break;
+ }
+ }
+
+ of_node_put(np);
+ return retval;
+}
+
/*
* Each pin is represented in of the below forms.
* <bank offset mux direction rt_type rt_delay rt_clk>
@@ -1175,6 +1199,8 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
struct device *dev = info->dev;
struct st_pinconf *conf;
struct device_node *pins;
+ phandle bank;
+ unsigned int offset;
int i = 0, npins = 0, nr_props, ret = 0;
pins = of_get_child_by_name(np, "st,pins");
@@ -1214,9 +1240,9 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
conf = &grp->pin_conf[i];
/* bank & offset */
- be32_to_cpup(list++);
- be32_to_cpup(list++);
- conf->pin = of_get_named_gpio(pins, pp->name, 0);
+ bank = be32_to_cpup(list++);
+ offset = be32_to_cpup(list++);
+ conf->pin = st_pctl_dt_calculate_pin(info, bank, offset);
conf->name = pp->name;
grp->pins[i] = conf->pin;
/* mux */
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index f415c13caae0..9dc2d803a586 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -15,6 +15,7 @@ config PINCTRL_MSM
config PINCTRL_APQ8064
tristate "Qualcomm APQ8064 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -23,6 +24,7 @@ config PINCTRL_APQ8064
config PINCTRL_APQ8084
tristate "Qualcomm APQ8084 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -31,6 +33,7 @@ config PINCTRL_APQ8084
config PINCTRL_IPQ4019
tristate "Qualcomm IPQ4019 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -39,6 +42,7 @@ config PINCTRL_IPQ4019
config PINCTRL_IPQ8064
tristate "Qualcomm IPQ8064 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -47,6 +51,7 @@ config PINCTRL_IPQ8064
config PINCTRL_IPQ8074
tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -57,6 +62,7 @@ config PINCTRL_IPQ8074
config PINCTRL_IPQ6018
tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -67,6 +73,7 @@ config PINCTRL_IPQ6018
config PINCTRL_MSM8226
tristate "Qualcomm 8226 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -76,6 +83,7 @@ config PINCTRL_MSM8226
config PINCTRL_MSM8660
tristate "Qualcomm 8660 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -84,6 +92,7 @@ config PINCTRL_MSM8660
config PINCTRL_MSM8960
tristate "Qualcomm 8960 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -100,6 +109,7 @@ config PINCTRL_MDM9607
config PINCTRL_MDM9615
tristate "Qualcomm 9615 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -108,6 +118,7 @@ config PINCTRL_MDM9615
config PINCTRL_MSM8X74
tristate "Qualcomm 8x74 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -116,6 +127,7 @@ config PINCTRL_MSM8X74
config PINCTRL_MSM8909
tristate "Qualcomm 8909 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -132,6 +144,7 @@ config PINCTRL_MSM8916
config PINCTRL_MSM8953
tristate "Qualcomm 8953 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -142,6 +155,7 @@ config PINCTRL_MSM8953
config PINCTRL_MSM8976
tristate "Qualcomm 8976 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -152,6 +166,7 @@ config PINCTRL_MSM8976
config PINCTRL_MSM8994
tristate "Qualcomm 8994 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -161,6 +176,7 @@ config PINCTRL_MSM8994
config PINCTRL_MSM8996
tristate "Qualcomm MSM8996 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -169,6 +185,7 @@ config PINCTRL_MSM8996
config PINCTRL_MSM8998
tristate "Qualcomm MSM8998 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -177,6 +194,7 @@ config PINCTRL_MSM8998
config PINCTRL_QCM2290
tristate "Qualcomm QCM2290 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -185,6 +203,7 @@ config PINCTRL_QCM2290
config PINCTRL_QCS404
tristate "Qualcomm QCS404 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -232,6 +251,7 @@ config PINCTRL_QCOM_SSBI_PMIC
config PINCTRL_SC7180
tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -241,6 +261,7 @@ config PINCTRL_SC7180
config PINCTRL_SC7280
tristate "Qualcomm Technologies Inc SC7280 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -250,6 +271,7 @@ config PINCTRL_SC7280
config PINCTRL_SC7280_LPASS_LPI
tristate "Qualcomm Technologies Inc SC7280 LPASS LPI pin controller driver"
depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_LPASS_LPI
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -259,6 +281,7 @@ config PINCTRL_SC7280_LPASS_LPI
config PINCTRL_SC8180X
tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
depends on (OF || ACPI)
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -268,6 +291,7 @@ config PINCTRL_SC8180X
config PINCTRL_SC8280XP
tristate "Qualcomm Technologies Inc SC8280xp pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -277,6 +301,7 @@ config PINCTRL_SC8280XP
config PINCTRL_SDM660
tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -286,6 +311,7 @@ config PINCTRL_SDM660
config PINCTRL_SDM845
tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
depends on (OF || ACPI)
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -295,6 +321,7 @@ config PINCTRL_SDM845
config PINCTRL_SDX55
tristate "Qualcomm Technologies Inc SDX55 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -304,6 +331,7 @@ config PINCTRL_SDX55
config PINCTRL_SM6115
tristate "Qualcomm Technologies Inc SM6115,SM4250 pin controller driver"
depends on GPIOLIB && OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -313,6 +341,7 @@ config PINCTRL_SM6115
config PINCTRL_SM6125
tristate "Qualcomm Technologies Inc SM6125 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -322,6 +351,7 @@ config PINCTRL_SM6125
config PINCTRL_SM6350
tristate "Qualcomm Technologies Inc SM6350 pin controller driver"
depends on GPIOLIB && OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -331,6 +361,7 @@ config PINCTRL_SM6350
config PINCTRL_SM6375
tristate "Qualcomm Technologies Inc SM6375 pin controller driver"
depends on GPIOLIB && OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -340,6 +371,7 @@ config PINCTRL_SM6375
config PINCTRL_SDX65
tristate "Qualcomm Technologies Inc SDX65 pin controller driver"
depends on GPIOLIB && OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -349,6 +381,7 @@ config PINCTRL_SDX65
config PINCTRL_SM8150
tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -358,6 +391,7 @@ config PINCTRL_SM8150
config PINCTRL_SM8250
tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -367,6 +401,7 @@ config PINCTRL_SM8250
config PINCTRL_SM8250_LPASS_LPI
tristate "Qualcomm Technologies Inc SM8250 LPASS LPI pin controller driver"
depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_LPASS_LPI
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -375,6 +410,7 @@ config PINCTRL_SM8250_LPASS_LPI
config PINCTRL_SM8350
tristate "Qualcomm Technologies Inc SM8350 pin controller driver"
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -384,12 +420,33 @@ config PINCTRL_SM8350
config PINCTRL_SM8450
tristate "Qualcomm Technologies Inc SM8450 pin controller driver"
depends on GPIOLIB && OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM8450 platform.
+config PINCTRL_SM8450_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SM8450 LPASS LPI pin controller driver"
+ depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SM8450 platform.
+
+config PINCTRL_SC8280XP_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SC8280XP LPASS LPI pin controller driver"
+ depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SC8280XP platform.
+
config PINCTRL_LPASS_LPI
tristate "Qualcomm Technologies Inc LPASS LPI pin controller driver"
select PINMUX
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index fbd64853a24d..8269a1db8794 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -45,4 +45,6 @@ obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
obj-$(CONFIG_PINCTRL_SM8250_LPASS_LPI) += pinctrl-sm8250-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o
obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o
+obj-$(CONFIG_PINCTRL_SM8450_LPASS_LPI) += pinctrl-sm8450-lpass-lpi.o
+obj-$(CONFIG_PINCTRL_SC8280XP_LPASS_LPI) += pinctrl-sc8280xp-lpass-lpi.o
obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
new file mode 100644
index 000000000000..4b9c0beac32e
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Linaro Ltd.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_dmic3_clk,
+ LPI_MUX_dmic3_data,
+ LPI_MUX_dmic4_clk,
+ LPI_MUX_dmic4_data,
+ LPI_MUX_i2s1_clk,
+ LPI_MUX_i2s1_data,
+ LPI_MUX_i2s1_ws,
+ LPI_MUX_i2s2_clk,
+ LPI_MUX_i2s2_data,
+ LPI_MUX_i2s2_ws,
+ LPI_MUX_i2s3_clk,
+ LPI_MUX_i2s3_data,
+ LPI_MUX_i2s3_ws,
+ LPI_MUX_qua_mi2s_data,
+ LPI_MUX_qua_mi2s_sclk,
+ LPI_MUX_qua_mi2s_ws,
+ LPI_MUX_swr_rx_clk,
+ LPI_MUX_swr_rx_data,
+ LPI_MUX_swr_tx_clk,
+ LPI_MUX_swr_tx_data,
+ LPI_MUX_wsa_swr_clk,
+ LPI_MUX_wsa_swr_data,
+ LPI_MUX_wsa2_swr_clk,
+ LPI_MUX_wsa2_swr_data,
+ LPI_MUX_ext_mclk1_a,
+ LPI_MUX_ext_mclk1_b,
+ LPI_MUX_ext_mclk1_c,
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static int gpio0_pins[] = { 0 };
+static int gpio1_pins[] = { 1 };
+static int gpio2_pins[] = { 2 };
+static int gpio3_pins[] = { 3 };
+static int gpio4_pins[] = { 4 };
+static int gpio5_pins[] = { 5 };
+static int gpio6_pins[] = { 6 };
+static int gpio7_pins[] = { 7 };
+static int gpio8_pins[] = { 8 };
+static int gpio9_pins[] = { 9 };
+static int gpio10_pins[] = { 10 };
+static int gpio11_pins[] = { 11 };
+static int gpio12_pins[] = { 12 };
+static int gpio13_pins[] = { 13 };
+static int gpio14_pins[] = { 14 };
+static int gpio15_pins[] = { 15 };
+static int gpio16_pins[] = { 16 };
+static int gpio17_pins[] = { 17 };
+static int gpio18_pins[] = { 18 };
+
+static const struct pinctrl_pin_desc sc8280xp_lpi_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+};
+
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
+static const char * const dmic1_clk_groups[] = { "gpio6" };
+static const char * const dmic1_data_groups[] = { "gpio7" };
+static const char * const dmic2_clk_groups[] = { "gpio8" };
+static const char * const dmic2_data_groups[] = { "gpio9" };
+static const char * const dmic4_clk_groups[] = { "gpio17" };
+static const char * const dmic4_data_groups[] = { "gpio18" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const dmic3_clk_groups[] = { "gpio12" };
+static const char * const dmic3_data_groups[] = { "gpio13" };
+static const char * const qua_mi2s_sclk_groups[] = { "gpio0" };
+static const char * const qua_mi2s_ws_groups[] = { "gpio1" };
+static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4", "gpio5" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const wsa_swr_clk_groups[] = { "gpio10" };
+static const char * const wsa_swr_data_groups[] = { "gpio11" };
+static const char * const wsa2_swr_clk_groups[] = { "gpio15" };
+static const char * const wsa2_swr_data_groups[] = { "gpio16" };
+static const char * const i2s2_data_groups[] = { "gpio15", "gpio16" };
+static const char * const i2s3_clk_groups[] = { "gpio12"};
+static const char * const i2s3_ws_groups[] = { "gpio13"};
+static const char * const i2s3_data_groups[] = { "gpio17", "gpio18"};
+static const char * const ext_mclk1_c_groups[] = { "gpio5" };
+static const char * const ext_mclk1_b_groups[] = { "gpio9" };
+static const char * const ext_mclk1_a_groups[] = { "gpio13" };
+
+static const struct lpi_pingroup sc8280xp_groups[] = {
+ LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _),
+ LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _),
+ LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _),
+ LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(5, 12, swr_rx_data, ext_mclk1_c, qua_mi2s_data, _),
+ LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _),
+ LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _),
+ LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _),
+ LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, ext_mclk1_b, _),
+ LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _),
+ LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _),
+ LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s3_clk, _, _),
+ LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s3_ws, ext_mclk1_a, _),
+ LPI_PINGROUP(14, 6, swr_tx_data, _, _, _),
+ LPI_PINGROUP(15, 20, i2s2_data, wsa2_swr_clk, _, _),
+ LPI_PINGROUP(16, 22, i2s2_data, wsa2_swr_data, _, _),
+ LPI_PINGROUP(17, LPI_NO_SLEW, dmic4_clk, i2s3_data, _, _),
+ LPI_PINGROUP(18, LPI_NO_SLEW, dmic4_data, i2s3_data, _, _),
+};
+
+static const struct lpi_function sc8280xp_functions[] = {
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(dmic3_clk),
+ LPI_FUNCTION(dmic3_data),
+ LPI_FUNCTION(dmic4_clk),
+ LPI_FUNCTION(dmic4_data),
+ LPI_FUNCTION(i2s1_clk),
+ LPI_FUNCTION(i2s1_data),
+ LPI_FUNCTION(i2s1_ws),
+ LPI_FUNCTION(i2s2_clk),
+ LPI_FUNCTION(i2s2_data),
+ LPI_FUNCTION(i2s2_ws),
+ LPI_FUNCTION(i2s3_clk),
+ LPI_FUNCTION(i2s3_data),
+ LPI_FUNCTION(i2s3_ws),
+ LPI_FUNCTION(qua_mi2s_data),
+ LPI_FUNCTION(qua_mi2s_sclk),
+ LPI_FUNCTION(qua_mi2s_ws),
+ LPI_FUNCTION(swr_rx_clk),
+ LPI_FUNCTION(swr_rx_data),
+ LPI_FUNCTION(swr_tx_clk),
+ LPI_FUNCTION(swr_tx_data),
+ LPI_FUNCTION(wsa_swr_clk),
+ LPI_FUNCTION(wsa_swr_data),
+ LPI_FUNCTION(wsa2_swr_clk),
+ LPI_FUNCTION(wsa2_swr_data),
+ LPI_FUNCTION(ext_mclk1_a),
+ LPI_FUNCTION(ext_mclk1_b),
+ LPI_FUNCTION(ext_mclk1_c),
+};
+
+static const struct lpi_pinctrl_variant_data sc8280xp_lpi_data = {
+ .pins = sc8280xp_lpi_pins,
+ .npins = ARRAY_SIZE(sc8280xp_lpi_pins),
+ .groups = sc8280xp_groups,
+ .ngroups = ARRAY_SIZE(sc8280xp_groups),
+ .functions = sc8280xp_functions,
+ .nfunctions = ARRAY_SIZE(sc8280xp_functions),
+};
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sc8280xp-lpass-lpi-pinctrl",
+ .data = &sc8280xp_lpi_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sc8280xp-lpass-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("QTI SC8280XP LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
new file mode 100644
index 000000000000..c3c8c34148f1
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Linaro Ltd.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_dmic3_clk,
+ LPI_MUX_dmic3_data,
+ LPI_MUX_dmic4_clk,
+ LPI_MUX_dmic4_data,
+ LPI_MUX_i2s1_clk,
+ LPI_MUX_i2s1_data,
+ LPI_MUX_i2s1_ws,
+ LPI_MUX_i2s2_clk,
+ LPI_MUX_i2s2_data,
+ LPI_MUX_i2s2_ws,
+ LPI_MUX_i2s3_clk,
+ LPI_MUX_i2s3_data,
+ LPI_MUX_i2s3_ws,
+ LPI_MUX_i2s4_clk,
+ LPI_MUX_i2s4_data,
+ LPI_MUX_i2s4_ws,
+ LPI_MUX_qua_mi2s_data,
+ LPI_MUX_qua_mi2s_sclk,
+ LPI_MUX_qua_mi2s_ws,
+ LPI_MUX_swr_rx_clk,
+ LPI_MUX_swr_rx_data,
+ LPI_MUX_swr_tx_clk,
+ LPI_MUX_swr_tx_data,
+ LPI_MUX_wsa_swr_clk,
+ LPI_MUX_wsa_swr_data,
+ LPI_MUX_wsa2_swr_clk,
+ LPI_MUX_wsa2_swr_data,
+ LPI_MUX_slimbus_clk,
+ LPI_MUX_slimbus_data,
+ LPI_MUX_ext_mclk1_a,
+ LPI_MUX_ext_mclk1_b,
+ LPI_MUX_ext_mclk1_c,
+ LPI_MUX_ext_mclk1_d,
+ LPI_MUX_ext_mclk1_e,
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static int gpio0_pins[] = { 0 };
+static int gpio1_pins[] = { 1 };
+static int gpio2_pins[] = { 2 };
+static int gpio3_pins[] = { 3 };
+static int gpio4_pins[] = { 4 };
+static int gpio5_pins[] = { 5 };
+static int gpio6_pins[] = { 6 };
+static int gpio7_pins[] = { 7 };
+static int gpio8_pins[] = { 8 };
+static int gpio9_pins[] = { 9 };
+static int gpio10_pins[] = { 10 };
+static int gpio11_pins[] = { 11 };
+static int gpio12_pins[] = { 12 };
+static int gpio13_pins[] = { 13 };
+static int gpio14_pins[] = { 14 };
+static int gpio15_pins[] = { 15 };
+static int gpio16_pins[] = { 16 };
+static int gpio17_pins[] = { 17 };
+static int gpio18_pins[] = { 18 };
+static int gpio19_pins[] = { 19 };
+static int gpio20_pins[] = { 20 };
+static int gpio21_pins[] = { 21 };
+static int gpio22_pins[] = { 22 };
+
+static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+};
+
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
+static const char * const dmic1_clk_groups[] = { "gpio6" };
+static const char * const dmic1_data_groups[] = { "gpio7" };
+static const char * const dmic2_clk_groups[] = { "gpio8" };
+static const char * const dmic2_data_groups[] = { "gpio9" };
+static const char * const dmic4_clk_groups[] = { "gpio17" };
+static const char * const dmic4_data_groups[] = { "gpio18" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const dmic3_clk_groups[] = { "gpio12" };
+static const char * const dmic3_data_groups[] = { "gpio13" };
+static const char * const qua_mi2s_sclk_groups[] = { "gpio0" };
+static const char * const qua_mi2s_ws_groups[] = { "gpio1" };
+static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4", "gpio5" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const wsa_swr_clk_groups[] = { "gpio10" };
+static const char * const wsa_swr_data_groups[] = { "gpio11" };
+static const char * const wsa2_swr_clk_groups[] = { "gpio15" };
+static const char * const wsa2_swr_data_groups[] = { "gpio16" };
+static const char * const i2s2_data_groups[] = { "gpio15", "gpio16" };
+static const char * const i2s4_ws_groups[] = { "gpio13" };
+static const char * const i2s4_clk_groups[] = { "gpio12" };
+static const char * const i2s4_data_groups[] = { "gpio17", "gpio18" };
+static const char * const slimbus_clk_groups[] = { "gpio19"};
+static const char * const i2s3_clk_groups[] = { "gpio19"};
+static const char * const i2s3_ws_groups[] = { "gpio20"};
+static const char * const i2s3_data_groups[] = { "gpio21", "gpio22"};
+static const char * const slimbus_data_groups[] = { "gpio20"};
+static const char * const ext_mclk1_c_groups[] = { "gpio5" };
+static const char * const ext_mclk1_b_groups[] = { "gpio9" };
+static const char * const ext_mclk1_a_groups[] = { "gpio13" };
+static const char * const ext_mclk1_d_groups[] = { "gpio14" };
+static const char * const ext_mclk1_e_groups[] = { "gpio22" };
+
+static const struct lpi_pingroup sm8450_groups[] = {
+ LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _),
+ LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _),
+ LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _),
+ LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(5, 12, swr_rx_data, ext_mclk1_c, qua_mi2s_data, _),
+ LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _),
+ LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _),
+ LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _),
+ LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, ext_mclk1_b, _),
+ LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _),
+ LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _),
+ LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s4_clk, _, _),
+ LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s4_ws, ext_mclk1_a, _),
+ LPI_PINGROUP(14, 6, swr_tx_data, ext_mclk1_d, _, _),
+ LPI_PINGROUP(15, 20, i2s2_data, wsa2_swr_clk, _, _),
+ LPI_PINGROUP(16, 22, i2s2_data, wsa2_swr_data, _, _),
+ LPI_PINGROUP(17, LPI_NO_SLEW, dmic4_clk, i2s4_data, _, _),
+ LPI_PINGROUP(18, LPI_NO_SLEW, dmic4_data, i2s4_data, _, _),
+ LPI_PINGROUP(19, LPI_NO_SLEW, i2s3_clk, slimbus_clk, _, _),
+ LPI_PINGROUP(20, LPI_NO_SLEW, i2s3_ws, slimbus_data, _, _),
+ LPI_PINGROUP(21, LPI_NO_SLEW, i2s3_data, _, _, _),
+ LPI_PINGROUP(22, LPI_NO_SLEW, i2s3_data, ext_mclk1_e, _, _),
+};
+
+static const struct lpi_function sm8450_functions[] = {
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(dmic3_clk),
+ LPI_FUNCTION(dmic3_data),
+ LPI_FUNCTION(dmic4_clk),
+ LPI_FUNCTION(dmic4_data),
+ LPI_FUNCTION(i2s1_clk),
+ LPI_FUNCTION(i2s1_data),
+ LPI_FUNCTION(i2s1_ws),
+ LPI_FUNCTION(i2s2_clk),
+ LPI_FUNCTION(i2s2_data),
+ LPI_FUNCTION(i2s2_ws),
+ LPI_FUNCTION(i2s3_clk),
+ LPI_FUNCTION(i2s3_data),
+ LPI_FUNCTION(i2s3_ws),
+ LPI_FUNCTION(i2s4_clk),
+ LPI_FUNCTION(i2s4_data),
+ LPI_FUNCTION(i2s4_ws),
+ LPI_FUNCTION(qua_mi2s_data),
+ LPI_FUNCTION(qua_mi2s_sclk),
+ LPI_FUNCTION(qua_mi2s_ws),
+ LPI_FUNCTION(swr_rx_clk),
+ LPI_FUNCTION(swr_rx_data),
+ LPI_FUNCTION(swr_tx_clk),
+ LPI_FUNCTION(swr_tx_data),
+ LPI_FUNCTION(slimbus_clk),
+ LPI_FUNCTION(slimbus_data),
+ LPI_FUNCTION(wsa_swr_clk),
+ LPI_FUNCTION(wsa_swr_data),
+ LPI_FUNCTION(wsa2_swr_clk),
+ LPI_FUNCTION(wsa2_swr_data),
+ LPI_FUNCTION(ext_mclk1_a),
+ LPI_FUNCTION(ext_mclk1_b),
+ LPI_FUNCTION(ext_mclk1_c),
+ LPI_FUNCTION(ext_mclk1_d),
+ LPI_FUNCTION(ext_mclk1_e),
+};
+
+static const struct lpi_pinctrl_variant_data sm8450_lpi_data = {
+ .pins = sm8450_lpi_pins,
+ .npins = ARRAY_SIZE(sm8450_lpi_pins),
+ .groups = sm8450_groups,
+ .ngroups = ARRAY_SIZE(sm8450_groups),
+ .functions = sm8450_functions,
+ .nfunctions = ARRAY_SIZE(sm8450_functions),
+};
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sm8450-lpass-lpi-pinctrl",
+ .data = &sm8450_lpi_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sm8450-lpass-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("QTI SM8450 LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index ccaf40a9c0e6..8c31a8f6b7e4 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2014, 2016-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/gpio/driver.h>
@@ -36,6 +37,8 @@
#define PMIC_GPIO_SUBTYPE_GPIOC_8CH 0xd
#define PMIC_GPIO_SUBTYPE_GPIO_LV 0x10
#define PMIC_GPIO_SUBTYPE_GPIO_MV 0x11
+#define PMIC_GPIO_SUBTYPE_GPIO_LV_VIN2 0x12
+#define PMIC_GPIO_SUBTYPE_GPIO_MV_VIN3 0x13
#define PMIC_MPP_REG_RT_STS 0x10
#define PMIC_MPP_REG_RT_STS_VAL_MASK 0x1
@@ -98,6 +101,9 @@
#define PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS 1
#define PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS 2
+#define PMIC_GPIO_OUT_STRENGTH_LOW 1
+#define PMIC_GPIO_OUT_STRENGTH_HIGH 3
+
/* PMIC_GPIO_REG_EN_CTL */
#define PMIC_GPIO_REG_MASTER_EN_SHIFT 7
@@ -171,7 +177,6 @@ struct pmic_gpio_state {
struct regmap *map;
struct pinctrl_dev *ctrl;
struct gpio_chip chip;
- struct irq_chip irq;
u8 usid;
u8 pid_base;
};
@@ -437,7 +442,17 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
arg = pad->pullup;
break;
case PMIC_GPIO_CONF_STRENGTH:
- arg = pad->strength;
+ switch (pad->strength) {
+ case PMIC_GPIO_OUT_STRENGTH_HIGH:
+ arg = PMIC_GPIO_STRENGTH_HIGH;
+ break;
+ case PMIC_GPIO_OUT_STRENGTH_LOW:
+ arg = PMIC_GPIO_STRENGTH_LOW;
+ break;
+ default:
+ arg = pad->strength;
+ break;
+ }
break;
case PMIC_GPIO_CONF_ATEST:
arg = pad->atest;
@@ -524,7 +539,17 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PMIC_GPIO_CONF_STRENGTH:
if (arg > PMIC_GPIO_STRENGTH_LOW)
return -EINVAL;
- pad->strength = arg;
+ switch (arg) {
+ case PMIC_GPIO_STRENGTH_HIGH:
+ pad->strength = PMIC_GPIO_OUT_STRENGTH_HIGH;
+ break;
+ case PMIC_GPIO_STRENGTH_LOW:
+ pad->strength = PMIC_GPIO_OUT_STRENGTH_LOW;
+ break;
+ default:
+ pad->strength = arg;
+ break;
+ }
break;
case PMIC_GPIO_CONF_ATEST:
if (!pad->lv_mv_type || arg > 4)
@@ -823,6 +848,16 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
pad->have_buffer = true;
pad->lv_mv_type = true;
break;
+ case PMIC_GPIO_SUBTYPE_GPIO_LV_VIN2:
+ pad->num_sources = 2;
+ pad->have_buffer = true;
+ pad->lv_mv_type = true;
+ break;
+ case PMIC_GPIO_SUBTYPE_GPIO_MV_VIN3:
+ pad->num_sources = 3;
+ pad->have_buffer = true;
+ pad->lv_mv_type = true;
+ break;
default:
dev_err(state->dev, "unknown GPIO type 0x%x\n", subtype);
return -ENODEV;
@@ -985,6 +1020,33 @@ static int pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
return 0;
}
+static void pmic_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ irq_chip_mask_parent(data);
+ gpiochip_disable_irq(gc, data->hwirq);
+}
+
+static void pmic_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ gpiochip_enable_irq(gc, data->hwirq);
+ irq_chip_unmask_parent(data);
+}
+
+static const struct irq_chip spmi_gpio_irq_chip = {
+ .name = "spmi-gpio",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pmic_gpio_irq_mask,
+ .irq_unmask = pmic_gpio_irq_unmask,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_wake = irq_chip_set_wake_parent,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int pmic_gpio_probe(struct platform_device *pdev)
{
struct irq_domain *parent_domain;
@@ -1078,16 +1140,8 @@ static int pmic_gpio_probe(struct platform_device *pdev)
if (!parent_domain)
return -ENXIO;
- state->irq.name = "spmi-gpio",
- state->irq.irq_ack = irq_chip_ack_parent,
- state->irq.irq_mask = irq_chip_mask_parent,
- state->irq.irq_unmask = irq_chip_unmask_parent,
- state->irq.irq_set_type = irq_chip_set_type_parent,
- state->irq.irq_set_wake = irq_chip_set_wake_parent,
- state->irq.flags = IRQCHIP_MASK_ON_SUSPEND,
-
girq = &state->chip.irq;
- girq->chip = &state->irq;
+ gpio_irq_chip_set_chip(girq, &spmi_gpio_irq_chip);
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
girq->fwnode = of_node_to_fwnode(state->dev->of_node);
@@ -1147,6 +1201,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm6350-gpio", .data = (void *) 9 },
+ { .compatible = "qcom,pm7250b-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 4837bceb767b..bd13b5ef246d 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1166,15 +1166,15 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
if (ret)
goto err_put_banks;
- ret = samsung_gpiolib_register(pdev, drvdata);
- if (ret)
- goto err_unregister;
-
if (ctrl->eint_gpio_init)
ctrl->eint_gpio_init(drvdata);
if (ctrl->eint_wkup_init)
ctrl->eint_wkup_init(drvdata);
+ ret = samsung_gpiolib_register(pdev, drvdata);
+ if (ret)
+ goto err_unregister;
+
platform_set_drvdata(pdev, drvdata);
return 0;
diff --git a/drivers/pinctrl/starfive/Kconfig b/drivers/pinctrl/starfive/Kconfig
new file mode 100644
index 000000000000..55c514e622f9
--- /dev/null
+++ b/drivers/pinctrl/starfive/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config PINCTRL_STARFIVE_JH7100
+ tristate "Pinctrl and GPIO driver for the StarFive JH7100 SoC"
+ depends on SOC_STARFIVE || COMPILE_TEST
+ depends on OF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select OF_GPIO
+ default SOC_STARFIVE
+ help
+ Say yes here to support pin control on the StarFive JH7100 SoC.
+ This also provides an interface to the GPIO pins not used by other
+ peripherals supporting inputs, outputs, configuring pull-up/pull-down
+ and interrupts on input changes.
diff --git a/drivers/pinctrl/starfive/Makefile b/drivers/pinctrl/starfive/Makefile
new file mode 100644
index 000000000000..0293f26a0a99
--- /dev/null
+++ b/drivers/pinctrl/starfive/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PINCTRL_STARFIVE_JH7100) += pinctrl-starfive-jh7100.o
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
index 3eb40e230d98..5b544fb7f3d8 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
@@ -20,12 +20,12 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
-#include <dt-bindings/pinctrl/pinctrl-starfive.h>
+#include <dt-bindings/pinctrl/pinctrl-starfive-jh7100.h>
-#include "core.h"
-#include "pinctrl-utils.h"
-#include "pinmux.h"
-#include "pinconf.h"
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "../pinmux.h"
+#include "../pinconf.h"
#define DRIVER_NAME "pinctrl-starfive"
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 14bcca73238a..e485506ea599 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1603,10 +1603,9 @@ int stm32_pctl_probe(struct platform_device *pdev)
bank->clk = of_clk_get_by_name(np, NULL);
if (IS_ERR(bank->clk)) {
- if (PTR_ERR(bank->clk) != -EPROBE_DEFER)
- dev_err(dev, "failed to get clk (%ld)\n", PTR_ERR(bank->clk));
fwnode_handle_put(child);
- return PTR_ERR(bank->clk);
+ return dev_err_probe(dev, PTR_ERR(bank->clk),
+ "failed to get clk\n");
}
i++;
}
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
index 31d62bbb7f43..96a350e70668 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
@@ -551,12 +551,9 @@ static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
int ret;
ret = platform_irq_count(pdev);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Couldn't determine irq count: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Couldn't determine irq count\n");
switch (ret) {
case 2:
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index b437847b6237..dbd327712205 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -3,6 +3,8 @@ if MIPS
source "drivers/platform/mips/Kconfig"
endif
+source "drivers/platform/loongarch/Kconfig"
+
source "drivers/platform/goldfish/Kconfig"
source "drivers/platform/chrome/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 4de08ef4ec9d..41640172975a 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_X86) += x86/
+obj-$(CONFIG_LOONGARCH) += loongarch/
obj-$(CONFIG_MELLANOX_PLATFORM) += mellanox/
obj-$(CONFIG_MIPS) += mips/
obj-$(CONFIG_OLPC_EC) += olpc/
diff --git a/drivers/platform/loongarch/Kconfig b/drivers/platform/loongarch/Kconfig
new file mode 100644
index 000000000000..5633e4d73991
--- /dev/null
+++ b/drivers/platform/loongarch/Kconfig
@@ -0,0 +1,31 @@
+#
+# LoongArch Platform Specific Drivers
+#
+
+menuconfig LOONGARCH_PLATFORM_DEVICES
+ bool "LoongArch Platform Specific Device Drivers"
+ default y
+ depends on LOONGARCH
+ help
+ Say Y here to get to see options for device drivers of various
+ LoongArch platforms, including vendor-specific laptop/desktop
+ extension and hardware monitor drivers. This option itself does
+ not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if LOONGARCH_PLATFORM_DEVICES
+
+config LOONGSON_LAPTOP
+ tristate "Generic Loongson-3 Laptop Driver"
+ depends on ACPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on INPUT
+ depends on MACH_LOONGSON64
+ select ACPI_VIDEO
+ select INPUT_SPARSEKMAP
+ default y
+ help
+ ACPI-based Loongson-3 family laptops generic driver.
+
+endif # LOONGARCH_PLATFORM_DEVICES
diff --git a/drivers/platform/loongarch/Makefile b/drivers/platform/loongarch/Makefile
new file mode 100644
index 000000000000..f43ab03db1a2
--- /dev/null
+++ b/drivers/platform/loongarch/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LOONGSON_LAPTOP) += loongson-laptop.o
diff --git a/drivers/platform/loongarch/loongson-laptop.c b/drivers/platform/loongarch/loongson-laptop.c
new file mode 100644
index 000000000000..f0166ad5d2c2
--- /dev/null
+++ b/drivers/platform/loongarch/loongson-laptop.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Loongson processor based LAPTOP/ALL-IN-ONE driver
+ *
+ * Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <acpi/video.h>
+
+/* 1. Driver-wide structs and misc. variables */
+
+/* ACPI HIDs */
+#define LOONGSON_ACPI_EC_HID "PNP0C09"
+#define LOONGSON_ACPI_HKEY_HID "LOON0000"
+
+#define ACPI_LAPTOP_NAME "loongson-laptop"
+#define ACPI_LAPTOP_ACPI_EVENT_PREFIX "loongson"
+
+#define MAX_ACPI_ARGS 3
+#define GENERIC_HOTKEY_MAP_MAX 64
+
+#define GENERIC_EVENT_TYPE_OFF 12
+#define GENERIC_EVENT_TYPE_MASK 0xF000
+#define GENERIC_EVENT_CODE_MASK 0x0FFF
+
+struct generic_sub_driver {
+ u32 type;
+ char *name;
+ acpi_handle *handle;
+ struct acpi_device *device;
+ struct platform_driver *driver;
+ int (*init)(struct generic_sub_driver *sub_driver);
+ void (*notify)(struct generic_sub_driver *sub_driver, u32 event);
+ u8 acpi_notify_installed;
+};
+
+static u32 input_device_registered;
+static struct input_dev *generic_inputdev;
+
+static acpi_handle hotkey_handle;
+static struct key_entry hotkey_keycode_map[GENERIC_HOTKEY_MAP_MAX];
+
+int loongson_laptop_turn_on_backlight(void);
+int loongson_laptop_turn_off_backlight(void);
+static int loongson_laptop_backlight_update(struct backlight_device *bd);
+
+/* 2. ACPI Helpers and device model */
+
+static int acpi_evalf(acpi_handle handle, int *res, char *method, char *fmt, ...)
+{
+ char res_type;
+ char *fmt0 = fmt;
+ va_list ap;
+ int success, quiet;
+ acpi_status status;
+ struct acpi_object_list params;
+ struct acpi_buffer result, *resultp;
+ union acpi_object in_objs[MAX_ACPI_ARGS], out_obj;
+
+ if (!*fmt) {
+ pr_err("acpi_evalf() called with empty format\n");
+ return 0;
+ }
+
+ if (*fmt == 'q') {
+ quiet = 1;
+ fmt++;
+ } else
+ quiet = 0;
+
+ res_type = *(fmt++);
+
+ params.count = 0;
+ params.pointer = &in_objs[0];
+
+ va_start(ap, fmt);
+ while (*fmt) {
+ char c = *(fmt++);
+ switch (c) {
+ case 'd': /* int */
+ in_objs[params.count].integer.value = va_arg(ap, int);
+ in_objs[params.count++].type = ACPI_TYPE_INTEGER;
+ break;
+ /* add more types as needed */
+ default:
+ pr_err("acpi_evalf() called with invalid format character '%c'\n", c);
+ va_end(ap);
+ return 0;
+ }
+ }
+ va_end(ap);
+
+ if (res_type != 'v') {
+ result.length = sizeof(out_obj);
+ result.pointer = &out_obj;
+ resultp = &result;
+ } else
+ resultp = NULL;
+
+ status = acpi_evaluate_object(handle, method, &params, resultp);
+
+ switch (res_type) {
+ case 'd': /* int */
+ success = (status == AE_OK && out_obj.type == ACPI_TYPE_INTEGER);
+ if (success && res)
+ *res = out_obj.integer.value;
+ break;
+ case 'v': /* void */
+ success = status == AE_OK;
+ break;
+ /* add more types as needed */
+ default:
+ pr_err("acpi_evalf() called with invalid format character '%c'\n", res_type);
+ return 0;
+ }
+
+ if (!success && !quiet)
+ pr_err("acpi_evalf(%s, %s, ...) failed: %s\n",
+ method, fmt0, acpi_format_exception(status));
+
+ return success;
+}
+
+static int hotkey_status_get(int *status)
+{
+ if (!acpi_evalf(hotkey_handle, status, "GSWS", "d"))
+ return -EIO;
+
+ return 0;
+}
+
+static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct generic_sub_driver *sub_driver = data;
+
+ if (!sub_driver || !sub_driver->notify)
+ return;
+ sub_driver->notify(sub_driver, event);
+}
+
+static int __init setup_acpi_notify(struct generic_sub_driver *sub_driver)
+{
+ acpi_status status;
+
+ if (!*sub_driver->handle)
+ return 0;
+
+ sub_driver->device = acpi_fetch_acpi_dev(*sub_driver->handle);
+ if (!sub_driver->device) {
+ pr_err("acpi_fetch_acpi_dev(%s) failed\n", sub_driver->name);
+ return -ENODEV;
+ }
+
+ sub_driver->device->driver_data = sub_driver;
+ sprintf(acpi_device_class(sub_driver->device), "%s/%s",
+ ACPI_LAPTOP_ACPI_EVENT_PREFIX, sub_driver->name);
+
+ status = acpi_install_notify_handler(*sub_driver->handle,
+ sub_driver->type, dispatch_acpi_notify, sub_driver);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_ALREADY_EXISTS) {
+ pr_notice("Another device driver is already "
+ "handling %s events\n", sub_driver->name);
+ } else {
+ pr_err("acpi_install_notify_handler(%s) failed: %s\n",
+ sub_driver->name, acpi_format_exception(status));
+ }
+ return -ENODEV;
+ }
+ sub_driver->acpi_notify_installed = 1;
+
+ return 0;
+}
+
+static int loongson_hotkey_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int loongson_hotkey_resume(struct device *dev)
+{
+ int status = 0;
+ struct key_entry ke;
+ struct backlight_device *bd;
+
+ /*
+ * Only if the firmware supports SW_LID event model, we can handle the
+ * event. This is for the consideration of development board without EC.
+ */
+ if (test_bit(SW_LID, generic_inputdev->swbit)) {
+ if (hotkey_status_get(&status) < 0)
+ return -EIO;
+ /*
+ * The input device sw element records the last lid status.
+ * When the system is awakened by other wake-up sources,
+ * the lid event will also be reported. The judgment of
+ * adding SW_LID bit which in sw element can avoid this
+ * case.
+ *
+ * Input system will drop lid event when current lid event
+ * value and last lid status in the same. So laptop driver
+ * doesn't report repeated events.
+ *
+ * Lid status is generally 0, but hardware exception is
+ * considered. So add lid status confirmation.
+ */
+ if (test_bit(SW_LID, generic_inputdev->sw) && !(status & (1 << SW_LID))) {
+ ke.type = KE_SW;
+ ke.sw.value = (u8)status;
+ ke.sw.code = SW_LID;
+ sparse_keymap_report_entry(generic_inputdev, &ke, 1, true);
+ }
+ }
+
+ bd = backlight_device_get_by_type(BACKLIGHT_PLATFORM);
+ if (bd) {
+ loongson_laptop_backlight_update(bd) ?
+ pr_warn("Loongson_backlight: resume brightness failed") :
+ pr_info("Loongson_backlight: resume brightness %d\n", bd->props.brightness);
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(loongson_hotkey_pm,
+ loongson_hotkey_suspend, loongson_hotkey_resume);
+
+static int loongson_hotkey_probe(struct platform_device *pdev)
+{
+ hotkey_handle = ACPI_HANDLE(&pdev->dev);
+
+ if (!hotkey_handle)
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct acpi_device_id loongson_device_ids[] = {
+ {LOONGSON_ACPI_HKEY_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, loongson_device_ids);
+
+static struct platform_driver loongson_hotkey_driver = {
+ .probe = loongson_hotkey_probe,
+ .driver = {
+ .name = "loongson-hotkey",
+ .owner = THIS_MODULE,
+ .pm = pm_ptr(&loongson_hotkey_pm),
+ .acpi_match_table = loongson_device_ids,
+ },
+};
+
+static int hotkey_map(void)
+{
+ u32 index;
+ acpi_status status;
+ struct acpi_buffer buf;
+ union acpi_object *pack;
+
+ buf.length = ACPI_ALLOCATE_BUFFER;
+ status = acpi_evaluate_object_typed(hotkey_handle, "KMAP", NULL, &buf, ACPI_TYPE_PACKAGE);
+ if (status != AE_OK) {
+ pr_err("ACPI exception: %s\n", acpi_format_exception(status));
+ return -1;
+ }
+ pack = buf.pointer;
+ for (index = 0; index < pack->package.count; index++) {
+ union acpi_object *element, *sub_pack;
+
+ sub_pack = &pack->package.elements[index];
+
+ element = &sub_pack->package.elements[0];
+ hotkey_keycode_map[index].type = element->integer.value;
+ element = &sub_pack->package.elements[1];
+ hotkey_keycode_map[index].code = element->integer.value;
+ element = &sub_pack->package.elements[2];
+ hotkey_keycode_map[index].keycode = element->integer.value;
+ }
+
+ return 0;
+}
+
+static int hotkey_backlight_set(bool enable)
+{
+ if (!acpi_evalf(hotkey_handle, NULL, "VCBL", "vd", enable ? 1 : 0))
+ return -EIO;
+
+ return 0;
+}
+
+static int ec_get_brightness(void)
+{
+ int status = 0;
+
+ if (!hotkey_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECBG", "d"))
+ return -EIO;
+
+ return status;
+}
+
+static int ec_set_brightness(int level)
+{
+
+ int ret = 0;
+
+ if (!hotkey_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(hotkey_handle, NULL, "ECBS", "vd", level))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int ec_backlight_level(u8 level)
+{
+ int status = 0;
+
+ if (!hotkey_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d"))
+ return -EIO;
+
+ if ((status < 0) || (level > status))
+ return status;
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECSL", "d"))
+ return -EIO;
+
+ if ((status < 0) || (level < status))
+ return status;
+
+ return level;
+}
+
+static int loongson_laptop_backlight_update(struct backlight_device *bd)
+{
+ int lvl = ec_backlight_level(bd->props.brightness);
+
+ if (lvl < 0)
+ return -EIO;
+ if (ec_set_brightness(lvl))
+ return -EIO;
+
+ return 0;
+}
+
+static int loongson_laptop_get_brightness(struct backlight_device *bd)
+{
+ int level;
+
+ level = ec_get_brightness();
+ if (level < 0)
+ return -EIO;
+
+ return level;
+}
+
+static const struct backlight_ops backlight_laptop_ops = {
+ .update_status = loongson_laptop_backlight_update,
+ .get_brightness = loongson_laptop_get_brightness,
+};
+
+static int laptop_backlight_register(void)
+{
+ int status = 0;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(props));
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d"))
+ return -EIO;
+
+ props.brightness = 1;
+ props.max_brightness = status;
+ props.type = BACKLIGHT_PLATFORM;
+
+ backlight_device_register("loongson_laptop",
+ NULL, NULL, &backlight_laptop_ops, &props);
+
+ return 0;
+}
+
+int loongson_laptop_turn_on_backlight(void)
+{
+ int status;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+ arg0.integer.value = 1;
+ status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_info("Loongson lvds error: 0x%x\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int loongson_laptop_turn_off_backlight(void)
+{
+ int status;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+ arg0.integer.value = 0;
+ status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_info("Loongson lvds error: 0x%x\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init event_init(struct generic_sub_driver *sub_driver)
+{
+ int ret;
+
+ ret = hotkey_map();
+ if (ret < 0) {
+ pr_err("Failed to parse keymap from DSDT\n");
+ return ret;
+ }
+
+ ret = sparse_keymap_setup(generic_inputdev, hotkey_keycode_map, NULL);
+ if (ret < 0) {
+ pr_err("Failed to setup input device keymap\n");
+ input_free_device(generic_inputdev);
+
+ return ret;
+ }
+
+ /*
+ * This hotkey driver handle backlight event when
+ * acpi_video_get_backlight_type() gets acpi_backlight_vendor
+ */
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor)
+ hotkey_backlight_set(true);
+ else
+ hotkey_backlight_set(false);
+
+ pr_info("ACPI: enabling firmware HKEY event interface...\n");
+
+ return ret;
+}
+
+static void event_notify(struct generic_sub_driver *sub_driver, u32 event)
+{
+ int type, scan_code;
+ struct key_entry *ke = NULL;
+
+ scan_code = event & GENERIC_EVENT_CODE_MASK;
+ type = (event & GENERIC_EVENT_TYPE_MASK) >> GENERIC_EVENT_TYPE_OFF;
+ ke = sparse_keymap_entry_from_scancode(generic_inputdev, scan_code);
+ if (ke) {
+ if (type == KE_SW) {
+ int status = 0;
+
+ if (hotkey_status_get(&status) < 0)
+ return;
+
+ ke->sw.value = !!(status & (1 << ke->sw.code));
+ }
+ sparse_keymap_report_entry(generic_inputdev, ke, 1, true);
+ }
+}
+
+/* 3. Infrastructure */
+
+static void generic_subdriver_exit(struct generic_sub_driver *sub_driver);
+
+static int __init generic_subdriver_init(struct generic_sub_driver *sub_driver)
+{
+ int ret;
+
+ if (!sub_driver || !sub_driver->driver)
+ return -EINVAL;
+
+ ret = platform_driver_register(sub_driver->driver);
+ if (ret)
+ return -EINVAL;
+
+ if (sub_driver->init)
+ sub_driver->init(sub_driver);
+
+ if (sub_driver->notify) {
+ ret = setup_acpi_notify(sub_driver);
+ if (ret == -ENODEV) {
+ ret = 0;
+ goto err_out;
+ }
+ if (ret < 0)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ generic_subdriver_exit(sub_driver);
+ return (ret < 0) ? ret : 0;
+}
+
+static void generic_subdriver_exit(struct generic_sub_driver *sub_driver)
+{
+
+ if (sub_driver->acpi_notify_installed) {
+ acpi_remove_notify_handler(*sub_driver->handle,
+ sub_driver->type, dispatch_acpi_notify);
+ sub_driver->acpi_notify_installed = 0;
+ }
+ platform_driver_unregister(sub_driver->driver);
+}
+
+static struct generic_sub_driver generic_sub_drivers[] __refdata = {
+ {
+ .name = "hotkey",
+ .init = event_init,
+ .notify = event_notify,
+ .handle = &hotkey_handle,
+ .type = ACPI_DEVICE_NOTIFY,
+ .driver = &loongson_hotkey_driver,
+ },
+};
+
+static int __init generic_acpi_laptop_init(void)
+{
+ bool ec_found;
+ int i, ret, status;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* The EC device is required */
+ ec_found = acpi_dev_found(LOONGSON_ACPI_EC_HID);
+ if (!ec_found)
+ return -ENODEV;
+
+ /* Enable SCI for EC */
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
+
+ generic_inputdev = input_allocate_device();
+ if (!generic_inputdev) {
+ pr_err("Unable to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ /* Prepare input device, but don't register */
+ generic_inputdev->name =
+ "Loongson Generic Laptop/All-in-One Extra Buttons";
+ generic_inputdev->phys = ACPI_LAPTOP_NAME "/input0";
+ generic_inputdev->id.bustype = BUS_HOST;
+ generic_inputdev->dev.parent = NULL;
+
+ /* Init subdrivers */
+ for (i = 0; i < ARRAY_SIZE(generic_sub_drivers); i++) {
+ ret = generic_subdriver_init(&generic_sub_drivers[i]);
+ if (ret < 0) {
+ input_free_device(generic_inputdev);
+ while (--i >= 0)
+ generic_subdriver_exit(&generic_sub_drivers[i]);
+ return ret;
+ }
+ }
+
+ ret = input_register_device(generic_inputdev);
+ if (ret < 0) {
+ input_free_device(generic_inputdev);
+ while (--i >= 0)
+ generic_subdriver_exit(&generic_sub_drivers[i]);
+ pr_err("Unable to register input device\n");
+ return ret;
+ }
+
+ input_device_registered = 1;
+
+ if (acpi_evalf(hotkey_handle, &status, "ECBG", "d")) {
+ pr_info("Loongson Laptop used, init brightness is 0x%x\n", status);
+ ret = laptop_backlight_register();
+ if (ret < 0)
+ pr_err("Loongson Laptop: laptop-backlight device register failed\n");
+ }
+
+ return 0;
+}
+
+static void __exit generic_acpi_laptop_exit(void)
+{
+ if (generic_inputdev) {
+ if (input_device_registered)
+ input_unregister_device(generic_inputdev);
+ else
+ input_free_device(generic_inputdev);
+ }
+}
+
+module_init(generic_acpi_laptop_init);
+module_exit(generic_acpi_laptop_exit);
+
+MODULE_AUTHOR("Jianmin Lv <lvjianmin@loongson.cn>");
+MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
+MODULE_DESCRIPTION("Loongson Laptop/All-in-One ACPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 617dbf98980e..97cfbc520a02 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -125,8 +125,7 @@ static irqreturn_t int0002_irq(int irq, void *data)
if (!(gpe_sts_reg & GPE0A_PME_B0_STS_BIT))
return IRQ_NONE;
- generic_handle_irq(irq_find_mapping(chip->irq.domain,
- GPE0A_PME_B0_VIRT_GPIO_PIN));
+ generic_handle_domain_irq_safe(chip->irq.domain, GPE0A_PME_B0_VIRT_GPIO_PIN);
pm_wakeup_hard_event(chip->parent);
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
index f83e9c393f31..5b8d1a9620a5 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.c
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -128,15 +128,15 @@ skl_int3472_fill_clk_pdata(struct device *dev, struct tps68470_clk_platform_data
for_each_acpi_consumer_dev(adev, consumer) {
sensor_name = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
acpi_dev_name(consumer));
- if (!sensor_name)
+ if (!sensor_name) {
+ acpi_dev_put(consumer);
return -ENOMEM;
+ }
(*clk_pdata)->consumers[i].consumer_dev_name = sensor_name;
i++;
}
- acpi_dev_put(consumer);
-
return n_consumers;
}
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index da78dc77aed3..4f05f610391b 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -206,7 +206,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
if (i >= 0) {
flags = acpi_dev_irq_flags(gpio->triggering,
gpio->polarity,
- gpio->shareable);
+ gpio->shareable,
+ gpio->wake_capable);
} else {
flags = IORESOURCE_DISABLED;
}
@@ -315,7 +316,7 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
if (p->interrupts[i])
__set_bit(p->interrupts[i], map.bits);
- flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable);
+ flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable, p->wake_capable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
@@ -339,7 +340,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
}
}
- flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable);
+ flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable, p->wake_capable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index a20bf12f3ce3..999e218d7793 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -254,7 +254,7 @@ void idle_inject_stop(struct idle_inject_device *ii_dev)
iit = per_cpu_ptr(&idle_inject_thread, cpu);
iit->should_run = 0;
- wait_task_inactive(iit->tsk, 0);
+ wait_task_inactive(iit->tsk, TASK_ANY);
}
cpu_hotplug_enable();
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index d36c3f597f77..a48d9b7d2921 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -3657,6 +3657,7 @@ ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
struct device *dev = &bp->dev;
sysfs_remove_link(&dev->kobj, "ttyGNSS");
+ sysfs_remove_link(&dev->kobj, "ttyGNSS2");
sysfs_remove_link(&dev->kobj, "ttyMAC");
sysfs_remove_link(&dev->kobj, "ptp");
sysfs_remove_link(&dev->kobj, "pps");
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 1beb596d1434..cb83f81da416 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -426,7 +426,7 @@ dasd_add_busid(const char *bus_id, int features)
if (!devmap) {
/* This bus_id is new. */
new->devindex = dasd_max_devindex++;
- strlcpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
+ strscpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
new->features = features;
new->device = NULL;
list_add(&new->list, &dasd_hashlists[hash]);
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 5ae64af9ccea..d4d31cd11d26 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -313,7 +313,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strlcpy(header.busid, dev_name(&device->cdev->dev),
+ strscpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
@@ -356,7 +356,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strlcpy(header.busid, dev_name(&device->cdev->dev),
+ strscpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 5187705bd0f3..93b80da60277 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -614,7 +614,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
rc = -ENAMETOOLONG;
goto seg_list_del;
}
- strlcpy(local_buf, buf, i + 1);
+ strscpy(local_buf, buf, i + 1);
dev_info->num_of_segments = num_of_segments;
rc = dcssblk_is_continuous(dev_info);
if (rc < 0)
diff --git a/drivers/s390/char/hmcdrv_cache.c b/drivers/s390/char/hmcdrv_cache.c
index 1f5bdb237862..43df27ceec11 100644
--- a/drivers/s390/char/hmcdrv_cache.c
+++ b/drivers/s390/char/hmcdrv_cache.c
@@ -154,7 +154,7 @@ static ssize_t hmcdrv_cache_do(const struct hmcdrv_ftp_cmdspec *ftp,
/* cache some file info (FTP command, file name and file
* size) unconditionally
*/
- strlcpy(hmcdrv_cache_file.fname, ftp->fname,
+ strscpy(hmcdrv_cache_file.fname, ftp->fname,
HMCDRV_FTP_FIDENT_MAX);
hmcdrv_cache_file.id = ftp->id;
pr_debug("caching cmd %d, file size %zu for '%s'\n",
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index b58df0dd0039..c21dc68e05a0 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -54,10 +54,10 @@ struct tape_class_device *register_tape_dev(
if (!tcd)
return ERR_PTR(-ENOMEM);
- strlcpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+ strscpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
- strlcpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+ strscpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index f6da215ccf9f..6165e6aae762 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -30,6 +30,7 @@
#include <asm/checksum.h>
#include <asm/os_info.h>
#include <asm/switch_to.h>
+#include <asm/maccess.h>
#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 4bb7965daa0f..1a9714af51e4 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -87,7 +87,7 @@ int qdio_allocate_dbf(struct qdio_irq *irq_ptr)
debug_unregister(irq_ptr->debug_area);
return -ENOMEM;
}
- strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+ strscpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
new_entry->dbf_info = irq_ptr->debug_area;
mutex_lock(&qdio_dbf_list_mutex);
list_add(&new_entry->dbf_list, &qdio_dbf_list);
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 86d9e428357b..7f5402fe857a 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/device.h>
#include <linux/slab.h>
#include <linux/mdev.h>
@@ -142,7 +141,6 @@ static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
- atomic_set(&private->avail, 1);
private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
GFP_KERNEL);
@@ -203,7 +201,6 @@ static void vfio_ccw_free_private(struct vfio_ccw_private *private)
mutex_destroy(&private->io_mutex);
kfree(private);
}
-
static int vfio_ccw_sch_probe(struct subchannel *sch)
{
struct pmcw *pmcw = &sch->schib.pmcw;
@@ -222,7 +219,12 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
dev_set_drvdata(&sch->dev, private);
- ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
+ private->mdev_type.sysfs_name = "io";
+ private->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)";
+ private->mdev_types[0] = &private->mdev_type;
+ ret = mdev_register_parent(&private->parent, &sch->dev,
+ &vfio_ccw_mdev_driver,
+ private->mdev_types, 1);
if (ret)
goto out_free;
@@ -241,7 +243,7 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
- mdev_unregister_device(&sch->dev);
+ mdev_unregister_parent(&private->parent);
dev_set_drvdata(&sch->dev, NULL);
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 4a806a2273b5..6ae4d012d800 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -11,7 +11,6 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
#include <linux/nospec.h>
#include <linux/slab.h>
@@ -45,47 +44,14 @@ static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length)
vfio_ccw_mdev_reset(private);
}
-static ssize_t name_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
-}
-static MDEV_TYPE_ATTR_RO(name);
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
-}
-static MDEV_TYPE_ATTR_RO(device_api);
-
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
+static int vfio_ccw_mdev_init_dev(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
- dev_get_drvdata(mtype_get_parent_dev(mtype));
+ container_of(vdev, struct vfio_ccw_private, vdev);
- return sprintf(buf, "%d\n", atomic_read(&private->avail));
+ init_completion(&private->release_comp);
+ return 0;
}
-static MDEV_TYPE_ATTR_RO(available_instances);
-
-static struct attribute *mdev_types_attrs[] = {
- &mdev_type_attr_name.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_available_instances.attr,
- NULL,
-};
-
-static struct attribute_group mdev_type_group = {
- .name = "io",
- .attrs = mdev_types_attrs,
-};
-
-static struct attribute_group *mdev_type_groups[] = {
- &mdev_type_group,
- NULL,
-};
static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
{
@@ -95,12 +61,9 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
if (private->state == VFIO_CCW_STATE_NOT_OPER)
return -ENODEV;
- if (atomic_dec_if_positive(&private->avail) < 0)
- return -EPERM;
-
- memset(&private->vdev, 0, sizeof(private->vdev));
- vfio_init_group_dev(&private->vdev, &mdev->dev,
- &vfio_ccw_dev_ops);
+ ret = vfio_init_device(&private->vdev, &mdev->dev, &vfio_ccw_dev_ops);
+ if (ret)
+ return ret;
VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
private->sch->schid.cssid,
@@ -109,16 +72,32 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
ret = vfio_register_emulated_iommu_dev(&private->vdev);
if (ret)
- goto err_atomic;
+ goto err_put_vdev;
dev_set_drvdata(&mdev->dev, private);
return 0;
-err_atomic:
- vfio_uninit_group_dev(&private->vdev);
- atomic_inc(&private->avail);
+err_put_vdev:
+ vfio_put_device(&private->vdev);
return ret;
}
+static void vfio_ccw_mdev_release_dev(struct vfio_device *vdev)
+{
+ struct vfio_ccw_private *private =
+ container_of(vdev, struct vfio_ccw_private, vdev);
+
+ /*
+ * We cannot free vfio_ccw_private here because it includes
+ * parent info which must be free'ed by css driver.
+ *
+ * Use a workaround by memset'ing the core device part and
+ * then notifying the remove path that all active references
+ * to this device have been released.
+ */
+ memset(vdev, 0, sizeof(*vdev));
+ complete(&private->release_comp);
+}
+
static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
@@ -130,8 +109,16 @@ static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
vfio_unregister_group_dev(&private->vdev);
- vfio_uninit_group_dev(&private->vdev);
- atomic_inc(&private->avail);
+ vfio_put_device(&private->vdev);
+ /*
+ * Wait for all active references on mdev are released so it
+ * is safe to defer kfree() to a later point.
+ *
+ * TODO: the clean fix is to split parent/mdev info from ccw
+ * private structure so each can be managed in its own life
+ * cycle.
+ */
+ wait_for_completion(&private->release_comp);
}
static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
@@ -592,6 +579,8 @@ static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count)
}
static const struct vfio_device_ops vfio_ccw_dev_ops = {
+ .init = vfio_ccw_mdev_init_dev,
+ .release = vfio_ccw_mdev_release_dev,
.open_device = vfio_ccw_mdev_open_device,
.close_device = vfio_ccw_mdev_close_device,
.read = vfio_ccw_mdev_read,
@@ -602,6 +591,8 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = {
};
struct mdev_driver vfio_ccw_mdev_driver = {
+ .device_api = VFIO_DEVICE_API_CCW_STRING,
+ .max_instances = 1,
.driver = {
.name = "vfio_ccw_mdev",
.owner = THIS_MODULE,
@@ -609,5 +600,4 @@ struct mdev_driver vfio_ccw_mdev_driver = {
},
.probe = vfio_ccw_mdev_probe,
.remove = vfio_ccw_mdev_remove,
- .supported_type_groups = mdev_type_groups,
};
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index cd24b7fada91..bd5fb81456af 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -18,6 +18,7 @@
#include <linux/workqueue.h>
#include <linux/vfio_ccw.h>
#include <linux/vfio.h>
+#include <linux/mdev.h>
#include <asm/crw.h>
#include <asm/debug.h>
@@ -72,7 +73,6 @@ struct vfio_ccw_crw {
* @sch: pointer to the subchannel
* @state: internal state of the device
* @completion: synchronization helper of the I/O completion
- * @avail: available for creating a mediated device
* @io_region: MMIO region to input/output I/O arguments/results
* @io_mutex: protect against concurrent update of I/O regions
* @region: additional regions for other subchannel operations
@@ -88,13 +88,14 @@ struct vfio_ccw_crw {
* @req_trigger: eventfd ctx for signaling userspace to return device
* @io_work: work for deferral process of I/O handling
* @crw_work: work for deferral process of CRW handling
+ * @release_comp: synchronization helper for vfio device release
+ * @parent: parent data structures for mdevs created
*/
struct vfio_ccw_private {
struct vfio_device vdev;
struct subchannel *sch;
int state;
struct completion *completion;
- atomic_t avail;
struct ccw_io_region *io_region;
struct mutex io_mutex;
struct vfio_ccw_region *region;
@@ -113,6 +114,12 @@ struct vfio_ccw_private {
struct eventfd_ctx *req_trigger;
struct work_struct io_work;
struct work_struct crw_work;
+
+ struct completion release_comp;
+
+ struct mdev_parent parent;
+ struct mdev_type mdev_type;
+ struct mdev_type *mdev_types[1];
} __aligned(8);
int vfio_ccw_sch_quiesce(struct subchannel *sch);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index ee82207b4e60..0b4cc8c597ae 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -684,42 +684,41 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
AP_DOMAINS);
}
-static int vfio_ap_mdev_probe(struct mdev_device *mdev)
+static int vfio_ap_mdev_init_dev(struct vfio_device *vdev)
{
- struct ap_matrix_mdev *matrix_mdev;
- int ret;
-
- if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
- return -EPERM;
-
- matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
- if (!matrix_mdev) {
- ret = -ENOMEM;
- goto err_dec_available;
- }
- vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev,
- &vfio_ap_matrix_dev_ops);
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
- matrix_mdev->mdev = mdev;
+ matrix_mdev->mdev = to_mdev_device(vdev->dev);
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
matrix_mdev->pqap_hook = handle_pqap;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
hash_init(matrix_mdev->qtable.queues);
+ return 0;
+}
+
+static int vfio_ap_mdev_probe(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ int ret;
+
+ matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev,
+ &vfio_ap_matrix_dev_ops);
+ if (IS_ERR(matrix_mdev))
+ return PTR_ERR(matrix_mdev);
+
ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret)
- goto err_list;
+ goto err_put_vdev;
dev_set_drvdata(&mdev->dev, matrix_mdev);
mutex_lock(&matrix_dev->mdevs_lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
-err_list:
- vfio_uninit_group_dev(&matrix_mdev->vdev);
- kfree(matrix_mdev);
-err_dec_available:
- atomic_inc(&matrix_dev->available_instances);
+err_put_vdev:
+ vfio_put_device(&matrix_mdev->vdev);
return ret;
}
@@ -766,6 +765,11 @@ static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
}
}
+static void vfio_ap_mdev_release_dev(struct vfio_device *vdev)
+{
+ vfio_free_device(vdev);
+}
+
static void vfio_ap_mdev_remove(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
@@ -779,54 +783,9 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
list_del(&matrix_mdev->node);
mutex_unlock(&matrix_dev->mdevs_lock);
mutex_unlock(&matrix_dev->guests_lock);
- vfio_uninit_group_dev(&matrix_mdev->vdev);
- kfree(matrix_mdev);
- atomic_inc(&matrix_dev->available_instances);
+ vfio_put_device(&matrix_mdev->vdev);
}
-static ssize_t name_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
-}
-
-static MDEV_TYPE_ATTR_RO(name);
-
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n",
- atomic_read(&matrix_dev->available_instances));
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
-}
-
-static MDEV_TYPE_ATTR_RO(device_api);
-
-static struct attribute *vfio_ap_mdev_type_attrs[] = {
- &mdev_type_attr_name.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_available_instances.attr,
- NULL,
-};
-
-static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
- .name = VFIO_AP_MDEV_TYPE_HWVIRT,
- .attrs = vfio_ap_mdev_type_attrs,
-};
-
-static struct attribute_group *vfio_ap_mdev_type_groups[] = {
- &vfio_ap_mdev_hwvirt_type_group,
- NULL,
-};
-
#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
"already assigned to %s"
@@ -1824,6 +1783,8 @@ static const struct attribute_group vfio_queue_attr_group = {
};
static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
+ .init = vfio_ap_mdev_init_dev,
+ .release = vfio_ap_mdev_release_dev,
.open_device = vfio_ap_mdev_open_device,
.close_device = vfio_ap_mdev_close_device,
.ioctl = vfio_ap_mdev_ioctl,
@@ -1831,6 +1792,8 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
};
static struct mdev_driver vfio_ap_matrix_driver = {
+ .device_api = VFIO_DEVICE_API_AP_STRING,
+ .max_instances = MAX_ZDEV_ENTRIES_EXT,
.driver = {
.name = "vfio_ap_mdev",
.owner = THIS_MODULE,
@@ -1839,20 +1802,22 @@ static struct mdev_driver vfio_ap_matrix_driver = {
},
.probe = vfio_ap_mdev_probe,
.remove = vfio_ap_mdev_remove,
- .supported_type_groups = vfio_ap_mdev_type_groups,
};
int vfio_ap_mdev_register(void)
{
int ret;
- atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
-
ret = mdev_register_driver(&vfio_ap_matrix_driver);
if (ret)
return ret;
- ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_driver);
+ matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT;
+ matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT;
+ matrix_dev->mdev_types[0] = &matrix_dev->mdev_type;
+ ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device,
+ &vfio_ap_matrix_driver,
+ matrix_dev->mdev_types, 1);
if (ret)
goto err_driver;
return 0;
@@ -1864,7 +1829,7 @@ err_driver:
void vfio_ap_mdev_unregister(void)
{
- mdev_unregister_device(&matrix_dev->device);
+ mdev_unregister_parent(&matrix_dev->parent);
mdev_unregister_driver(&vfio_ap_matrix_driver);
}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index d782cf463eab..2eddd5f34ed3 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -13,7 +13,6 @@
#define _VFIO_AP_PRIVATE_H_
#include <linux/types.h>
-#include <linux/device.h>
#include <linux/mdev.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -30,7 +29,6 @@
* struct ap_matrix_dev - Contains the data for the matrix device.
*
* @device: generic device structure associated with the AP matrix device
- * @available_instances: number of mediated matrix devices that can be created
* @info: the struct containing the output from the PQAP(QCI) instruction
* @mdev_list: the list of mediated matrix devices created
* @mdevs_lock: mutex for locking the AP matrix device. This lock will be
@@ -47,12 +45,14 @@
*/
struct ap_matrix_dev {
struct device device;
- atomic_t available_instances;
struct ap_config_info info;
struct list_head mdev_list;
struct mutex mdevs_lock; /* serializes access to each ap_matrix_mdev */
struct ap_driver *vfio_ap_drv;
struct mutex guests_lock; /* serializes access to each KVM guest */
+ struct mdev_parent parent;
+ struct mdev_type mdev_type;
+ struct mdev_type *mdev_types[];
};
extern struct ap_matrix_dev *matrix_dev;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index e0fdd54bfeb7..37b551bd43bf 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1566,7 +1566,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
goto out_dev;
}
- strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
+ strscpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
dev_info(&dev->dev,
"setup OK : r/w = %s/%s, protocol : %d\n",
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 98c4864932d2..0ff61d00feb1 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -28,7 +28,7 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_
"fsm(%s): init_fsm: Couldn't alloc instance\n", name);
return NULL;
}
- strlcpy(this->name, name, sizeof(this->name));
+ strscpy(this->name, name, sizeof(this->name));
init_waitqueue_head(&this->wait_q);
f = kzalloc(sizeof(fsm), order);
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 9eba0a32e9f9..e250f49535fa 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -188,9 +188,9 @@ static void qeth_get_drvinfo(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
- strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
+ strscpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
sizeof(info->driver));
- strlcpy(info->fw_version, card->info.mcl_level,
+ strscpy(info->fw_version, card->info.mcl_level,
sizeof(info->fw_version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index fd2f1c31bd21..df782646e856 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -103,7 +103,7 @@ static void __init zfcp_init_device_setup(char *devstr)
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
- strlcpy(busid, token, ZFCP_BUS_ID_SIZE);
+ strscpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index b61acbb09be3..77917b339870 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -885,7 +885,7 @@ static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
dev_name(&adapter->ccw_device->dev),
init_utsname()->nodename);
else
- strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+ strscpy(fc_host_symbolic_name(adapter->scsi_host),
gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
return 0;
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index bd72c38d7bfc..f569cf0095c2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -998,8 +998,9 @@ void
spi_dv_device(struct scsi_device *sdev)
{
struct scsi_target *starget = sdev->sdev_target;
- u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ unsigned int sleep_flags;
+ u8 *buffer;
/*
* Because this function and the power management code both call
@@ -1007,7 +1008,7 @@ spi_dv_device(struct scsi_device *sdev)
* while suspend or resume is in progress. Hence the
* lock/unlock_system_sleep() calls.
*/
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
if (scsi_autopm_get_device(sdev))
goto unlock_system_sleep;
@@ -1058,7 +1059,7 @@ put_autopm:
scsi_autopm_put_device(sdev);
unlock_system_sleep:
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 573f89eade3b..bc46721aa01c 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -60,6 +60,9 @@
#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
+/* channel callback timeout in ms */
+#define CALLBACK_TIMEOUT 2
+
/* Packet structure describing virtual storage requests. */
enum vstor_packet_operation {
VSTOR_OPERATION_COMPLETE_IO = 1,
@@ -1204,6 +1207,7 @@ static void storvsc_on_channel_callback(void *context)
struct hv_device *device;
struct storvsc_device *stor_device;
struct Scsi_Host *shost;
+ unsigned long time_limit = jiffies + msecs_to_jiffies(CALLBACK_TIMEOUT);
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1224,6 +1228,11 @@ static void storvsc_on_channel_callback(void *context)
u32 minlen = rqst_id ? sizeof(struct vstor_packet) :
sizeof(enum vstor_packet_operation);
+ if (unlikely(time_after(jiffies, time_limit))) {
+ hv_pkt_iter_close(channel);
+ return;
+ }
+
if (pktlen < minlen) {
dev_err(&device->device,
"Invalid pkt: id=%llu, len=%u, minlen=%u\n",
@@ -2059,7 +2068,7 @@ err_out3:
err_out2:
/*
* Once we have connected with the host, we would need to
- * to invoke storvsc_dev_remove() to rollback this state and
+ * invoke storvsc_dev_remove() to rollback this state and
* this call also frees up the stor_device; hence the jump around
* err_out1 label.
*/
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 2de3896489c8..897cb8db5084 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -132,7 +132,8 @@ static irqreturn_t ssb_gpio_irq_chipco_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
- generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
+ generic_handle_domain_irq_safe(bus->irq_domain, gpio);
+
ssb_chipco_gpio_polarity(chipco, irqs, val & irqs);
return IRQ_HANDLED;
@@ -330,7 +331,8 @@ static irqreturn_t ssb_gpio_irq_extif_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
- generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
+ generic_handle_domain_irq_safe(bus->irq_domain, gpio);
+
ssb_extif_gpio_polarity(extif, irqs, val & irqs);
return IRQ_HANDLED;
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 28f87cd8b3ed..290b1bb0e9cd 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -492,15 +492,18 @@ static bool is_normal_memory(pgprot_t p)
#endif
}
-static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
+static int __check_mem_type(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
- while (vma && is_normal_memory(vma->vm_page_prot)) {
- if (vma->vm_end >= end)
- return 0;
- vma = vma->vm_next;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, start);
+
+ for_each_vma_range(vmi, vma, end) {
+ if (!is_normal_memory(vma->vm_page_prot))
+ return -EINVAL;
}
- return -EINVAL;
+ return 0;
}
int optee_check_mem_type(unsigned long start, size_t num_pages)
@@ -516,8 +519,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
return 0;
mmap_read_lock(mm);
- rc = __check_mem_type(find_vma(mm, start),
- start + num_pages * PAGE_SIZE);
+ rc = __check_mem_type(mm, start, start + num_pages * PAGE_SIZE);
mmap_read_unlock(mm);
return rc;
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index def8e1a0399c..2506c6c8ca83 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_DA9062_THERMAL) += da9062-thermal.o
obj-y += intel/
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-y += st/
-obj-$(CONFIG_QCOM_TSENS) += qcom/
+obj-y += qcom/
obj-y += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index 10bfa6507eb4..5d92b70a5d53 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -76,59 +76,55 @@ static const struct thermal_zone_device_ops imx_sc_thermal_ops = {
static int imx_sc_thermal_probe(struct platform_device *pdev)
{
- struct device_node *np, *child, *sensor_np;
struct imx_sc_sensor *sensor;
- int ret;
+ const int *resource_id;
+ int i, ret;
ret = imx_scu_get_handle(&thermal_ipc_handle);
if (ret)
return ret;
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np)
- return -ENODEV;
+ resource_id = of_device_get_match_data(&pdev->dev);
+ if (!resource_id)
+ return -EINVAL;
- sensor_np = of_node_get(pdev->dev.of_node);
+ for (i = 0; resource_id[i] > 0; i++) {
- for_each_available_child_of_node(np, child) {
sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
- if (!sensor) {
- of_node_put(child);
- ret = -ENOMEM;
- goto put_node;
- }
+ if (!sensor)
+ return -ENOMEM;
- ret = thermal_zone_of_get_sensor_id(child,
- sensor_np,
- &sensor->resource_id);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to get valid sensor resource id: %d\n",
- ret);
- of_node_put(child);
- break;
- }
+ sensor->resource_id = resource_id[i];
- sensor->tzd = devm_thermal_of_zone_register(&pdev->dev,
- sensor->resource_id,
- sensor,
- &imx_sc_thermal_ops);
+ sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, sensor->resource_id,
+ sensor, &imx_sc_thermal_ops);
if (IS_ERR(sensor->tzd)) {
- dev_err(&pdev->dev, "failed to register thermal zone\n");
+ /*
+ * Save the error value before freeing the
+ * sensor pointer, otherwise we endup with a
+ * use-after-free error
+ */
ret = PTR_ERR(sensor->tzd);
- of_node_put(child);
- break;
+
+ devm_kfree(&pdev->dev, sensor);
+
+ /*
+ * The thermal framework notifies us there is
+ * no thermal zone description for such a
+ * sensor id
+ */
+ if (ret == -ENODEV)
+ continue;
+
+ dev_err(&pdev->dev, "failed to register thermal zone\n");
+ return ret;
}
if (devm_thermal_add_hwmon_sysfs(sensor->tzd))
dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n");
}
-put_node:
- of_node_put(sensor_np);
- of_node_put(np);
-
- return ret;
+ return 0;
}
static int imx_sc_thermal_remove(struct platform_device *pdev)
@@ -136,8 +132,10 @@ static int imx_sc_thermal_remove(struct platform_device *pdev)
return 0;
}
+static int imx_sc_sensors[] = { IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0, -1 };
+
static const struct of_device_id imx_sc_thermal_table[] = {
- { .compatible = "fsl,imx-sc-thermal", },
+ { .compatible = "fsl,imx-sc-thermal", .data = imx_sc_sensors },
{}
};
MODULE_DEVICE_TABLE(of, imx_sc_thermal_table);
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index f136cb350238..327f37202c69 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -604,7 +604,7 @@ static const struct tsens_ops ops_8939 = {
struct tsens_plat_data data_8939 = {
.num_sensors = 10,
.ops = &ops_8939,
- .hw_ids = (unsigned int []){ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10 },
+ .hw_ids = (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, 10 },
.feat = &tsens_v0_1_feat,
.fields = tsens_v0_1_regfields,
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 4df42d70d867..61c2b8855cb8 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -316,7 +316,7 @@ static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
return 0;
}
-static struct thermal_zone_device_ops rcar_thermal_zone_of_ops = {
+static const struct thermal_zone_device_ops rcar_thermal_zone_of_ops = {
.get_temp = rcar_thermal_get_temp,
};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 7e669b60a065..117eeaf7dd24 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1186,7 +1186,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
return ERR_PTR(-EINVAL);
}
- if (type && strlen(type) >= THERMAL_NAME_LENGTH) {
+ if (strlen(type) >= THERMAL_NAME_LENGTH) {
pr_err("Thermal zone name (%s) too long, should be under %d chars\n",
type, THERMAL_NAME_LENGTH);
return ERR_PTR(-EINVAL);
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index fd2fb84bf246..d4b6335ace15 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -130,50 +130,6 @@ static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
return -EINVAL;
}
-/**
- * thermal_zone_of_get_sensor_id - get sensor ID from a DT thermal zone
- * @tz_np: a valid thermal zone device node.
- * @sensor_np: a sensor node of a valid sensor device.
- * @id: the sensor ID returned if success.
- *
- * This function will get sensor ID from a given thermal zone node and
- * the sensor node must match the temperature provider @sensor_np.
- *
- * Return: 0 on success, proper error code otherwise.
- */
-
-int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
- struct device_node *sensor_np,
- u32 *id)
-{
- struct of_phandle_args sensor_specs;
- int ret;
-
- ret = of_parse_phandle_with_args(tz_np,
- "thermal-sensors",
- "#thermal-sensor-cells",
- 0,
- &sensor_specs);
- if (ret)
- return ret;
-
- if (sensor_specs.np != sensor_np) {
- of_node_put(sensor_specs.np);
- return -ENODEV;
- }
-
- if (sensor_specs.args_count > 1)
- pr_warn("%pOFn: too many cells in sensor specifier %d\n",
- sensor_specs.np, sensor_specs.args_count);
-
- *id = sensor_specs.args_count ? sensor_specs.args[0] : 0;
-
- of_node_put(sensor_specs.np);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(thermal_zone_of_get_sensor_id);
-
/*** functions parsing device tree nodes ***/
static int of_find_trip_id(struct device_node *np, struct device_node *trip)
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 78c5841bdfae..ec495c7dff03 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -128,9 +128,11 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
if (kstrtoint(buf, 10, &temperature))
return -EINVAL;
- ret = tz->ops->set_trip_temp(tz, trip, temperature);
- if (ret)
- return ret;
+ if (tz->ops->set_trip_temp) {
+ ret = tz->ops->set_trip_temp(tz, trip, temperature);
+ if (ret)
+ return ret;
+ }
if (tz->trips)
tz->trips[trip].temperature = temperature;
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 33d62d7e3929..9f3c54032556 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/log2.h>
+#include <linux/kmsan.h>
#include <linux/usb.h>
#include <linux/wait.h>
#include <linux/usb/hcd.h>
@@ -426,6 +427,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
URB_DMA_SG_COMBINED);
urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
+ kmsan_handle_urb(urb, is_out);
if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
dev->state < USB_STATE_CONFIGURED)
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 7badf5777597..febdc99b51a7 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -600,6 +600,11 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
}
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
}
+ if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
+ config.device_features =
+ nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
+ }
/* Skip checking capability if user didn't prefer to configure any
* device networking attributes. It is likely that user might have used
@@ -799,51 +804,76 @@ static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callba
return msg->len;
}
-static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
- struct sk_buff *msg, u64 features,
+static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
const struct virtio_net_config *config)
{
u16 val_u16;
- if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
+ if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
+ (features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
return 0;
- val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
+ val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs);
+
return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
}
+static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_net_config *config)
+{
+ u16 val_u16;
+
+ if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0)
+ return 0;
+
+ val_u16 = __virtio16_to_cpu(true, config->mtu);
+
+ return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16);
+}
+
+static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_net_config *config)
+{
+ if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0)
+ return 0;
+
+ return nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR,
+ sizeof(config->mac), config->mac);
+}
+
static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
{
struct virtio_net_config config = {};
- u64 features;
+ u64 features_device;
u16 val_u16;
- vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
-
- if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
- config.mac))
- return -EMSGSIZE;
+ vdev->config->get_config(vdev, 0, &config, sizeof(config));
val_u16 = __virtio16_to_cpu(true, config.status);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
return -EMSGSIZE;
- val_u16 = __virtio16_to_cpu(true, config.mtu);
- if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
- return -EMSGSIZE;
+ features_device = vdev->config->get_device_features(vdev);
- features = vdev->config->get_driver_features(vdev);
- if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device,
VDPA_ATTR_PAD))
return -EMSGSIZE;
- return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
+ if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_net_mac_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ return vdpa_dev_net_mq_config_fill(msg, features_device, &config);
}
static int
vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
int flags, struct netlink_ext_ack *extack)
{
+ u64 features_driver;
+ u8 status = 0;
u32 device_id;
void *hdr;
int err;
@@ -867,6 +897,17 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
goto msg_err;
}
+ /* only read driver features after the feature negotiation is done */
+ status = vdev->config->get_status(vdev);
+ if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
+ features_driver = vdev->config->get_driver_features(vdev);
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
+ VDPA_ATTR_PAD)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
+ }
+
switch (device_id) {
case VIRTIO_ID_NET:
err = vdpa_dev_net_config_fill(vdev, msg);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 225b7f5d8be3..b071f0d842fb 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -18,6 +18,7 @@
#include <linux/vdpa.h>
#include <linux/vhost_iotlb.h>
#include <linux/iova.h>
+#include <uapi/linux/vdpa.h>
#include "vdpa_sim.h"
@@ -245,13 +246,22 @@ static const struct dma_map_ops vdpasim_dma_ops = {
static const struct vdpa_config_ops vdpasim_config_ops;
static const struct vdpa_config_ops vdpasim_batch_config_ops;
-struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
+struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
+ const struct vdpa_dev_set_config *config)
{
const struct vdpa_config_ops *ops;
struct vdpasim *vdpasim;
struct device *dev;
int i, ret = -ENOMEM;
+ if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
+ if (config->device_features &
+ ~dev_attr->supported_features)
+ return ERR_PTR(-EINVAL);
+ dev_attr->supported_features =
+ config->device_features;
+ }
+
if (batch_mapping)
ops = &vdpasim_batch_config_ops;
else
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index 061986f30911..0e78737dcc16 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -71,7 +71,8 @@ struct vdpasim {
spinlock_t iommu_lock;
};
-struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *attr);
+struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *attr,
+ const struct vdpa_dev_set_config *config);
/* TODO: cross-endian support */
static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index c8bfea3b7db2..c6db1a1baf76 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -383,7 +383,7 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.work_fn = vdpasim_blk_work;
dev_attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT;
- simdev = vdpasim_create(&dev_attr);
+ simdev = vdpasim_create(&dev_attr, config);
if (IS_ERR(simdev))
return PTR_ERR(simdev);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 886449e88502..c3cb225ea469 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -254,7 +254,7 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.work_fn = vdpasim_net_work;
dev_attr.buffer_size = PAGE_SIZE;
- simdev = vdpasim_create(&dev_attr);
+ simdev = vdpasim_create(&dev_attr, config);
if (IS_ERR(simdev))
return PTR_ERR(simdev);
@@ -294,7 +294,8 @@ static struct vdpa_mgmt_dev mgmt_dev = {
.id_table = id_table,
.ops = &vdpasim_net_mgmtdev_ops,
.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR |
- 1 << VDPA_ATTR_DEV_NET_CFG_MTU),
+ 1 << VDPA_ATTR_DEV_NET_CFG_MTU |
+ 1 << VDPA_ATTR_DEV_FEATURES),
.max_supported_vqs = VDPASIM_NET_VQ_NUM,
.supported_features = VDPASIM_NET_FEATURES,
};
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 04522077735b..d448db0c4de3 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -17,6 +17,7 @@
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
#include <linux/virtio_pci_modern.h>
+#include <uapi/linux/vdpa.h>
#define VP_VDPA_QUEUE_MAX 256
#define VP_VDPA_DRIVER_NAME "vp_vdpa"
@@ -35,6 +36,7 @@ struct vp_vdpa {
struct virtio_pci_modern_device *mdev;
struct vp_vring *vring;
struct vdpa_callback config_cb;
+ u64 device_features;
char msix_name[VP_VDPA_NAME_SIZE];
int config_irq;
int queues;
@@ -66,9 +68,9 @@ static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa)
static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
{
- struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- return vp_modern_get_features(mdev);
+ return vp_vdpa->device_features;
}
static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
@@ -475,6 +477,7 @@ static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
struct pci_dev *pdev = mdev->pci_dev;
struct device *dev = &pdev->dev;
struct vp_vdpa *vp_vdpa = NULL;
+ u64 device_features;
int ret, i;
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
@@ -491,6 +494,20 @@ static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
vp_vdpa->queues = vp_modern_get_num_queues(mdev);
vp_vdpa->mdev = mdev;
+ device_features = vp_modern_get_features(mdev);
+ if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
+ if (add_config->device_features & ~device_features) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "Try to provision features "
+ "that are not supported by the device: "
+ "device_features 0x%llx provisioned 0x%llx\n",
+ device_features, add_config->device_features);
+ goto err;
+ }
+ device_features = add_config->device_features;
+ }
+ vp_vdpa->device_features = device_features;
+
ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
if (ret) {
dev_err(&pdev->dev,
@@ -599,6 +616,7 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mgtdev->id_table = mdev_id;
mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
mgtdev->supported_features = vp_modern_get_features(mdev);
+ mgtdev->config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES);
pci_set_master(pdev);
pci_set_drvdata(pdev, vp_vdpa_mgtdev);
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index 6130d00252ed..86c381ceb9a1 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -3,6 +3,7 @@ menuconfig VFIO
tristate "VFIO Non-Privileged userspace driver framework"
select IOMMU_API
select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64)
+ select INTERVAL_TREE
help
VFIO provides a framework for secure userspace device drivers.
See Documentation/driver-api/vfio.rst for more details.
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index 1a32357592e3..b693a1169286 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -1,9 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
vfio_virqfd-y := virqfd.o
-vfio-y += vfio_main.o
-
obj-$(CONFIG_VFIO) += vfio.o
+
+vfio-y += vfio_main.o \
+ iova_bitmap.o \
+ container.o
+
obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o
diff --git a/drivers/vfio/container.c b/drivers/vfio/container.c
new file mode 100644
index 000000000000..d74164abbf40
--- /dev/null
+++ b/drivers/vfio/container.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ *
+ * VFIO container (/dev/vfio/vfio)
+ */
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/capability.h>
+#include <linux/iommu.h>
+#include <linux/miscdevice.h>
+#include <linux/vfio.h>
+#include <uapi/linux/vfio.h>
+
+#include "vfio.h"
+
+struct vfio_container {
+ struct kref kref;
+ struct list_head group_list;
+ struct rw_semaphore group_lock;
+ struct vfio_iommu_driver *iommu_driver;
+ void *iommu_data;
+ bool noiommu;
+};
+
+static struct vfio {
+ struct list_head iommu_drivers_list;
+ struct mutex iommu_drivers_lock;
+} vfio;
+
+#ifdef CONFIG_VFIO_NOIOMMU
+bool vfio_noiommu __read_mostly;
+module_param_named(enable_unsafe_noiommu_mode,
+ vfio_noiommu, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
+#endif
+
+static void *vfio_noiommu_open(unsigned long arg)
+{
+ if (arg != VFIO_NOIOMMU_IOMMU)
+ return ERR_PTR(-EINVAL);
+ if (!capable(CAP_SYS_RAWIO))
+ return ERR_PTR(-EPERM);
+
+ return NULL;
+}
+
+static void vfio_noiommu_release(void *iommu_data)
+{
+}
+
+static long vfio_noiommu_ioctl(void *iommu_data,
+ unsigned int cmd, unsigned long arg)
+{
+ if (cmd == VFIO_CHECK_EXTENSION)
+ return vfio_noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
+
+ return -ENOTTY;
+}
+
+static int vfio_noiommu_attach_group(void *iommu_data,
+ struct iommu_group *iommu_group, enum vfio_group_type type)
+{
+ return 0;
+}
+
+static void vfio_noiommu_detach_group(void *iommu_data,
+ struct iommu_group *iommu_group)
+{
+}
+
+static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
+ .name = "vfio-noiommu",
+ .owner = THIS_MODULE,
+ .open = vfio_noiommu_open,
+ .release = vfio_noiommu_release,
+ .ioctl = vfio_noiommu_ioctl,
+ .attach_group = vfio_noiommu_attach_group,
+ .detach_group = vfio_noiommu_detach_group,
+};
+
+/*
+ * Only noiommu containers can use vfio-noiommu and noiommu containers can only
+ * use vfio-noiommu.
+ */
+static bool vfio_iommu_driver_allowed(struct vfio_container *container,
+ const struct vfio_iommu_driver *driver)
+{
+ if (!IS_ENABLED(CONFIG_VFIO_NOIOMMU))
+ return true;
+ return container->noiommu == (driver->ops == &vfio_noiommu_ops);
+}
+
+/*
+ * IOMMU driver registration
+ */
+int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
+{
+ struct vfio_iommu_driver *driver, *tmp;
+
+ if (WARN_ON(!ops->register_device != !ops->unregister_device))
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver)
+ return -ENOMEM;
+
+ driver->ops = ops;
+
+ mutex_lock(&vfio.iommu_drivers_lock);
+
+ /* Check for duplicates */
+ list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
+ if (tmp->ops == ops) {
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ kfree(driver);
+ return -EINVAL;
+ }
+ }
+
+ list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
+
+ mutex_unlock(&vfio.iommu_drivers_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
+
+void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
+{
+ struct vfio_iommu_driver *driver;
+
+ mutex_lock(&vfio.iommu_drivers_lock);
+ list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
+ if (driver->ops == ops) {
+ list_del(&driver->vfio_next);
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ kfree(driver);
+ return;
+ }
+ }
+ mutex_unlock(&vfio.iommu_drivers_lock);
+}
+EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
+
+/*
+ * Container objects - containers are created when /dev/vfio/vfio is
+ * opened, but their lifecycle extends until the last user is done, so
+ * it's freed via kref. Must support container/group/device being
+ * closed in any order.
+ */
+static void vfio_container_release(struct kref *kref)
+{
+ struct vfio_container *container;
+ container = container_of(kref, struct vfio_container, kref);
+
+ kfree(container);
+}
+
+static void vfio_container_get(struct vfio_container *container)
+{
+ kref_get(&container->kref);
+}
+
+static void vfio_container_put(struct vfio_container *container)
+{
+ kref_put(&container->kref, vfio_container_release);
+}
+
+void vfio_device_container_register(struct vfio_device *device)
+{
+ struct vfio_iommu_driver *iommu_driver =
+ device->group->container->iommu_driver;
+
+ if (iommu_driver && iommu_driver->ops->register_device)
+ iommu_driver->ops->register_device(
+ device->group->container->iommu_data, device);
+}
+
+void vfio_device_container_unregister(struct vfio_device *device)
+{
+ struct vfio_iommu_driver *iommu_driver =
+ device->group->container->iommu_driver;
+
+ if (iommu_driver && iommu_driver->ops->unregister_device)
+ iommu_driver->ops->unregister_device(
+ device->group->container->iommu_data, device);
+}
+
+long vfio_container_ioctl_check_extension(struct vfio_container *container,
+ unsigned long arg)
+{
+ struct vfio_iommu_driver *driver;
+ long ret = 0;
+
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+
+ switch (arg) {
+ /* No base extensions yet */
+ default:
+ /*
+ * If no driver is set, poll all registered drivers for
+ * extensions and return the first positive result. If
+ * a driver is already set, further queries will be passed
+ * only to that driver.
+ */
+ if (!driver) {
+ mutex_lock(&vfio.iommu_drivers_lock);
+ list_for_each_entry(driver, &vfio.iommu_drivers_list,
+ vfio_next) {
+
+ if (!list_empty(&container->group_list) &&
+ !vfio_iommu_driver_allowed(container,
+ driver))
+ continue;
+ if (!try_module_get(driver->ops->owner))
+ continue;
+
+ ret = driver->ops->ioctl(NULL,
+ VFIO_CHECK_EXTENSION,
+ arg);
+ module_put(driver->ops->owner);
+ if (ret > 0)
+ break;
+ }
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ } else
+ ret = driver->ops->ioctl(container->iommu_data,
+ VFIO_CHECK_EXTENSION, arg);
+ }
+
+ up_read(&container->group_lock);
+
+ return ret;
+}
+
+/* hold write lock on container->group_lock */
+static int __vfio_container_attach_groups(struct vfio_container *container,
+ struct vfio_iommu_driver *driver,
+ void *data)
+{
+ struct vfio_group *group;
+ int ret = -ENODEV;
+
+ list_for_each_entry(group, &container->group_list, container_next) {
+ ret = driver->ops->attach_group(data, group->iommu_group,
+ group->type);
+ if (ret)
+ goto unwind;
+ }
+
+ return ret;
+
+unwind:
+ list_for_each_entry_continue_reverse(group, &container->group_list,
+ container_next) {
+ driver->ops->detach_group(data, group->iommu_group);
+ }
+
+ return ret;
+}
+
+static long vfio_ioctl_set_iommu(struct vfio_container *container,
+ unsigned long arg)
+{
+ struct vfio_iommu_driver *driver;
+ long ret = -ENODEV;
+
+ down_write(&container->group_lock);
+
+ /*
+ * The container is designed to be an unprivileged interface while
+ * the group can be assigned to specific users. Therefore, only by
+ * adding a group to a container does the user get the privilege of
+ * enabling the iommu, which may allocate finite resources. There
+ * is no unset_iommu, but by removing all the groups from a container,
+ * the container is deprivileged and returns to an unset state.
+ */
+ if (list_empty(&container->group_list) || container->iommu_driver) {
+ up_write(&container->group_lock);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vfio.iommu_drivers_lock);
+ list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
+ void *data;
+
+ if (!vfio_iommu_driver_allowed(container, driver))
+ continue;
+ if (!try_module_get(driver->ops->owner))
+ continue;
+
+ /*
+ * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
+ * so test which iommu driver reported support for this
+ * extension and call open on them. We also pass them the
+ * magic, allowing a single driver to support multiple
+ * interfaces if they'd like.
+ */
+ if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
+ module_put(driver->ops->owner);
+ continue;
+ }
+
+ data = driver->ops->open(arg);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ module_put(driver->ops->owner);
+ continue;
+ }
+
+ ret = __vfio_container_attach_groups(container, driver, data);
+ if (ret) {
+ driver->ops->release(data);
+ module_put(driver->ops->owner);
+ continue;
+ }
+
+ container->iommu_driver = driver;
+ container->iommu_data = data;
+ break;
+ }
+
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ up_write(&container->group_lock);
+
+ return ret;
+}
+
+static long vfio_fops_unl_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_container *container = filep->private_data;
+ struct vfio_iommu_driver *driver;
+ void *data;
+ long ret = -EINVAL;
+
+ if (!container)
+ return ret;
+
+ switch (cmd) {
+ case VFIO_GET_API_VERSION:
+ ret = VFIO_API_VERSION;
+ break;
+ case VFIO_CHECK_EXTENSION:
+ ret = vfio_container_ioctl_check_extension(container, arg);
+ break;
+ case VFIO_SET_IOMMU:
+ ret = vfio_ioctl_set_iommu(container, arg);
+ break;
+ default:
+ driver = container->iommu_driver;
+ data = container->iommu_data;
+
+ if (driver) /* passthrough all unrecognized ioctls */
+ ret = driver->ops->ioctl(data, cmd, arg);
+ }
+
+ return ret;
+}
+
+static int vfio_fops_open(struct inode *inode, struct file *filep)
+{
+ struct vfio_container *container;
+
+ container = kzalloc(sizeof(*container), GFP_KERNEL);
+ if (!container)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&container->group_list);
+ init_rwsem(&container->group_lock);
+ kref_init(&container->kref);
+
+ filep->private_data = container;
+
+ return 0;
+}
+
+static int vfio_fops_release(struct inode *inode, struct file *filep)
+{
+ struct vfio_container *container = filep->private_data;
+ struct vfio_iommu_driver *driver = container->iommu_driver;
+
+ if (driver && driver->ops->notify)
+ driver->ops->notify(container->iommu_data,
+ VFIO_IOMMU_CONTAINER_CLOSE);
+
+ filep->private_data = NULL;
+
+ vfio_container_put(container);
+
+ return 0;
+}
+
+static const struct file_operations vfio_fops = {
+ .owner = THIS_MODULE,
+ .open = vfio_fops_open,
+ .release = vfio_fops_release,
+ .unlocked_ioctl = vfio_fops_unl_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+struct vfio_container *vfio_container_from_file(struct file *file)
+{
+ struct vfio_container *container;
+
+ /* Sanity check, is this really our fd? */
+ if (file->f_op != &vfio_fops)
+ return NULL;
+
+ container = file->private_data;
+ WARN_ON(!container); /* fget ensures we don't race vfio_release */
+ return container;
+}
+
+static struct miscdevice vfio_dev = {
+ .minor = VFIO_MINOR,
+ .name = "vfio",
+ .fops = &vfio_fops,
+ .nodename = "vfio/vfio",
+ .mode = S_IRUGO | S_IWUGO,
+};
+
+int vfio_container_attach_group(struct vfio_container *container,
+ struct vfio_group *group)
+{
+ struct vfio_iommu_driver *driver;
+ int ret = 0;
+
+ lockdep_assert_held(&group->group_lock);
+
+ if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ down_write(&container->group_lock);
+
+ /* Real groups and fake groups cannot mix */
+ if (!list_empty(&container->group_list) &&
+ container->noiommu != (group->type == VFIO_NO_IOMMU)) {
+ ret = -EPERM;
+ goto out_unlock_container;
+ }
+
+ if (group->type == VFIO_IOMMU) {
+ ret = iommu_group_claim_dma_owner(group->iommu_group, group);
+ if (ret)
+ goto out_unlock_container;
+ }
+
+ driver = container->iommu_driver;
+ if (driver) {
+ ret = driver->ops->attach_group(container->iommu_data,
+ group->iommu_group,
+ group->type);
+ if (ret) {
+ if (group->type == VFIO_IOMMU)
+ iommu_group_release_dma_owner(
+ group->iommu_group);
+ goto out_unlock_container;
+ }
+ }
+
+ group->container = container;
+ group->container_users = 1;
+ container->noiommu = (group->type == VFIO_NO_IOMMU);
+ list_add(&group->container_next, &container->group_list);
+
+ /* Get a reference on the container and mark a user within the group */
+ vfio_container_get(container);
+
+out_unlock_container:
+ up_write(&container->group_lock);
+ return ret;
+}
+
+void vfio_group_detach_container(struct vfio_group *group)
+{
+ struct vfio_container *container = group->container;
+ struct vfio_iommu_driver *driver;
+
+ lockdep_assert_held(&group->group_lock);
+ WARN_ON(group->container_users != 1);
+
+ down_write(&container->group_lock);
+
+ driver = container->iommu_driver;
+ if (driver)
+ driver->ops->detach_group(container->iommu_data,
+ group->iommu_group);
+
+ if (group->type == VFIO_IOMMU)
+ iommu_group_release_dma_owner(group->iommu_group);
+
+ group->container = NULL;
+ group->container_users = 0;
+ list_del(&group->container_next);
+
+ /* Detaching the last group deprivileges a container, remove iommu */
+ if (driver && list_empty(&container->group_list)) {
+ driver->ops->release(container->iommu_data);
+ module_put(driver->ops->owner);
+ container->iommu_driver = NULL;
+ container->iommu_data = NULL;
+ }
+
+ up_write(&container->group_lock);
+
+ vfio_container_put(container);
+}
+
+int vfio_device_assign_container(struct vfio_device *device)
+{
+ struct vfio_group *group = device->group;
+
+ lockdep_assert_held(&group->group_lock);
+
+ if (!group->container || !group->container->iommu_driver ||
+ WARN_ON(!group->container_users))
+ return -EINVAL;
+
+ if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ get_file(group->opened_file);
+ group->container_users++;
+ return 0;
+}
+
+void vfio_device_unassign_container(struct vfio_device *device)
+{
+ mutex_lock(&device->group->group_lock);
+ WARN_ON(device->group->container_users <= 1);
+ device->group->container_users--;
+ fput(device->group->opened_file);
+ mutex_unlock(&device->group->group_lock);
+}
+
+/*
+ * Pin contiguous user pages and return their associated host pages for local
+ * domain only.
+ * @device [in] : device
+ * @iova [in] : starting IOVA of user pages to be pinned.
+ * @npage [in] : count of pages to be pinned. This count should not
+ * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @prot [in] : protection flags
+ * @pages[out] : array of host pages
+ * Return error or number of pages pinned.
+ *
+ * A driver may only call this function if the vfio_device was created
+ * by vfio_register_emulated_iommu_dev().
+ */
+int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
+ int npage, int prot, struct page **pages)
+{
+ struct vfio_container *container;
+ struct vfio_group *group = device->group;
+ struct vfio_iommu_driver *driver;
+ int ret;
+
+ if (!pages || !npage || !vfio_assert_device_open(device))
+ return -EINVAL;
+
+ if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+ return -E2BIG;
+
+ /* group->container cannot change while a vfio device is open */
+ container = group->container;
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->pin_pages))
+ ret = driver->ops->pin_pages(container->iommu_data,
+ group->iommu_group, iova,
+ npage, prot, pages);
+ else
+ ret = -ENOTTY;
+
+ return ret;
+}
+EXPORT_SYMBOL(vfio_pin_pages);
+
+/*
+ * Unpin contiguous host pages for local domain only.
+ * @device [in] : device
+ * @iova [in] : starting address of user pages to be unpinned.
+ * @npage [in] : count of pages to be unpinned. This count should not
+ * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
+ */
+void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+
+ if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
+ return;
+
+ if (WARN_ON(!vfio_assert_device_open(device)))
+ return;
+
+ /* group->container cannot change while a vfio device is open */
+ container = device->group->container;
+ driver = container->iommu_driver;
+
+ driver->ops->unpin_pages(container->iommu_data, iova, npage);
+}
+EXPORT_SYMBOL(vfio_unpin_pages);
+
+/*
+ * This interface allows the CPUs to perform some sort of virtual DMA on
+ * behalf of the device.
+ *
+ * CPUs read/write from/into a range of IOVAs pointing to user space memory
+ * into/from a kernel buffer.
+ *
+ * As the read/write of user space memory is conducted via the CPUs and is
+ * not a real device DMA, it is not necessary to pin the user space memory.
+ *
+ * @device [in] : VFIO device
+ * @iova [in] : base IOVA of a user space buffer
+ * @data [in] : pointer to kernel buffer
+ * @len [in] : kernel buffer length
+ * @write : indicate read or write
+ * Return error code on failure or 0 on success.
+ */
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
+ size_t len, bool write)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret = 0;
+
+ if (!data || len <= 0 || !vfio_assert_device_open(device))
+ return -EINVAL;
+
+ /* group->container cannot change while a vfio device is open */
+ container = device->group->container;
+ driver = container->iommu_driver;
+
+ if (likely(driver && driver->ops->dma_rw))
+ ret = driver->ops->dma_rw(container->iommu_data,
+ iova, data, len, write);
+ else
+ ret = -ENOTTY;
+ return ret;
+}
+EXPORT_SYMBOL(vfio_dma_rw);
+
+int __init vfio_container_init(void)
+{
+ int ret;
+
+ mutex_init(&vfio.iommu_drivers_lock);
+ INIT_LIST_HEAD(&vfio.iommu_drivers_list);
+
+ ret = misc_register(&vfio_dev);
+ if (ret) {
+ pr_err("vfio: misc device register failed\n");
+ return ret;
+ }
+
+ if (IS_ENABLED(CONFIG_VFIO_NOIOMMU)) {
+ ret = vfio_register_iommu_driver(&vfio_noiommu_ops);
+ if (ret)
+ goto err_misc;
+ }
+ return 0;
+
+err_misc:
+ misc_deregister(&vfio_dev);
+ return ret;
+}
+
+void vfio_container_cleanup(void)
+{
+ if (IS_ENABLED(CONFIG_VFIO_NOIOMMU))
+ vfio_unregister_iommu_driver(&vfio_noiommu_ops);
+ misc_deregister(&vfio_dev);
+ mutex_destroy(&vfio.iommu_drivers_lock);
+}
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index 3feff729f3ce..b16874e913e4 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -108,9 +108,9 @@ static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
/* reset the device before cleaning up the interrupts */
ret = vfio_fsl_mc_reset_device(vdev);
- if (WARN_ON(ret))
+ if (ret)
dev_warn(&mc_cont->dev,
- "VFIO_FLS_MC: reset device has failed (%d)\n", ret);
+ "VFIO_FSL_MC: reset device has failed (%d)\n", ret);
vfio_fsl_mc_irqs_cleanup(vdev);
@@ -418,16 +418,7 @@ static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev,
return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
}
-static const struct vfio_device_ops vfio_fsl_mc_ops = {
- .name = "vfio-fsl-mc",
- .open_device = vfio_fsl_mc_open_device,
- .close_device = vfio_fsl_mc_close_device,
- .ioctl = vfio_fsl_mc_ioctl,
- .read = vfio_fsl_mc_read,
- .write = vfio_fsl_mc_write,
- .mmap = vfio_fsl_mc_mmap,
-};
-
+static const struct vfio_device_ops vfio_fsl_mc_ops;
static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -518,35 +509,43 @@ static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
}
-static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
+static int vfio_fsl_mc_init_dev(struct vfio_device *core_vdev)
{
- struct vfio_fsl_mc_device *vdev;
- struct device *dev = &mc_dev->dev;
+ struct vfio_fsl_mc_device *vdev =
+ container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(core_vdev->dev);
int ret;
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
-
- vfio_init_group_dev(&vdev->vdev, dev, &vfio_fsl_mc_ops);
vdev->mc_dev = mc_dev;
mutex_init(&vdev->igate);
if (is_fsl_mc_bus_dprc(mc_dev))
- ret = vfio_assign_device_set(&vdev->vdev, &mc_dev->dev);
+ ret = vfio_assign_device_set(core_vdev, &mc_dev->dev);
else
- ret = vfio_assign_device_set(&vdev->vdev, mc_dev->dev.parent);
- if (ret)
- goto out_uninit;
+ ret = vfio_assign_device_set(core_vdev, mc_dev->dev.parent);
- ret = vfio_fsl_mc_init_device(vdev);
if (ret)
- goto out_uninit;
+ return ret;
+
+ /* device_set is released by vfio core if @init fails */
+ return vfio_fsl_mc_init_device(vdev);
+}
+
+static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
+{
+ struct vfio_fsl_mc_device *vdev;
+ struct device *dev = &mc_dev->dev;
+ int ret;
+
+ vdev = vfio_alloc_device(vfio_fsl_mc_device, vdev, dev,
+ &vfio_fsl_mc_ops);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
ret = vfio_register_group_dev(&vdev->vdev);
if (ret) {
dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
- goto out_device;
+ goto out_put_vdev;
}
ret = vfio_fsl_mc_scan_container(mc_dev);
@@ -557,30 +556,44 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
out_group_dev:
vfio_unregister_group_dev(&vdev->vdev);
-out_device:
- vfio_fsl_uninit_device(vdev);
-out_uninit:
- vfio_uninit_group_dev(&vdev->vdev);
- kfree(vdev);
+out_put_vdev:
+ vfio_put_device(&vdev->vdev);
return ret;
}
+static void vfio_fsl_mc_release_dev(struct vfio_device *core_vdev)
+{
+ struct vfio_fsl_mc_device *vdev =
+ container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
+
+ vfio_fsl_uninit_device(vdev);
+ mutex_destroy(&vdev->igate);
+ vfio_free_device(core_vdev);
+}
+
static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
struct vfio_fsl_mc_device *vdev = dev_get_drvdata(dev);
vfio_unregister_group_dev(&vdev->vdev);
- mutex_destroy(&vdev->igate);
-
dprc_remove_devices(mc_dev, NULL, 0);
- vfio_fsl_uninit_device(vdev);
-
- vfio_uninit_group_dev(&vdev->vdev);
- kfree(vdev);
+ vfio_put_device(&vdev->vdev);
return 0;
}
+static const struct vfio_device_ops vfio_fsl_mc_ops = {
+ .name = "vfio-fsl-mc",
+ .init = vfio_fsl_mc_init_dev,
+ .release = vfio_fsl_mc_release_dev,
+ .open_device = vfio_fsl_mc_open_device,
+ .close_device = vfio_fsl_mc_close_device,
+ .ioctl = vfio_fsl_mc_ioctl,
+ .read = vfio_fsl_mc_read,
+ .write = vfio_fsl_mc_write,
+ .mmap = vfio_fsl_mc_mmap,
+};
+
static struct fsl_mc_driver vfio_fsl_mc_driver = {
.probe = vfio_fsl_mc_probe,
.remove = vfio_fsl_mc_remove,
diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c
new file mode 100644
index 000000000000..6631e8befe1b
--- /dev/null
+++ b/drivers/vfio/iova_bitmap.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+#include <linux/iova_bitmap.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+
+#define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
+
+/*
+ * struct iova_bitmap_map - A bitmap representing an IOVA range
+ *
+ * Main data structure for tracking mapped user pages of bitmap data.
+ *
+ * For example, for something recording dirty IOVAs, it will be provided a
+ * struct iova_bitmap structure, as a general structure for iterating the
+ * total IOVA range. The struct iova_bitmap_map, though, represents the
+ * subset of said IOVA space that is pinned by its parent structure (struct
+ * iova_bitmap).
+ *
+ * The user does not need to exact location of the bits in the bitmap.
+ * From user perspective the only API available is iova_bitmap_set() which
+ * records the IOVA *range* in the bitmap by setting the corresponding
+ * bits.
+ *
+ * The bitmap is an array of u64 whereas each bit represents an IOVA of
+ * range of (1 << pgshift). Thus formula for the bitmap data to be set is:
+ *
+ * data[(iova / page_size) / 64] & (1ULL << (iova % 64))
+ */
+struct iova_bitmap_map {
+ /* base IOVA representing bit 0 of the first page */
+ unsigned long iova;
+
+ /* page size order that each bit granules to */
+ unsigned long pgshift;
+
+ /* page offset of the first user page pinned */
+ unsigned long pgoff;
+
+ /* number of pages pinned */
+ unsigned long npages;
+
+ /* pinned pages representing the bitmap data */
+ struct page **pages;
+};
+
+/*
+ * struct iova_bitmap - The IOVA bitmap object
+ *
+ * Main data structure for iterating over the bitmap data.
+ *
+ * Abstracts the pinning work and iterates in IOVA ranges.
+ * It uses a windowing scheme and pins the bitmap in relatively
+ * big ranges e.g.
+ *
+ * The bitmap object uses one base page to store all the pinned pages
+ * pointers related to the bitmap. For sizeof(struct page*) == 8 it stores
+ * 512 struct page pointers which, if the base page size is 4K, it means
+ * 2M of bitmap data is pinned at a time. If the iova_bitmap page size is
+ * also 4K then the range window to iterate is 64G.
+ *
+ * For example iterating on a total IOVA range of 4G..128G, it will walk
+ * through this set of ranges:
+ *
+ * 4G - 68G-1 (64G)
+ * 68G - 128G-1 (64G)
+ *
+ * An example of the APIs on how to use/iterate over the IOVA bitmap:
+ *
+ * bitmap = iova_bitmap_alloc(iova, length, page_size, data);
+ * if (IS_ERR(bitmap))
+ * return PTR_ERR(bitmap);
+ *
+ * ret = iova_bitmap_for_each(bitmap, arg, dirty_reporter_fn);
+ *
+ * iova_bitmap_free(bitmap);
+ *
+ * Each iteration of the @dirty_reporter_fn is called with a unique @iova
+ * and @length argument, indicating the current range available through the
+ * iova_bitmap. The @dirty_reporter_fn uses iova_bitmap_set() to mark dirty
+ * areas (@iova_length) within that provided range, as following:
+ *
+ * iova_bitmap_set(bitmap, iova, iova_length);
+ *
+ * The internals of the object uses an index @mapped_base_index that indexes
+ * which u64 word of the bitmap is mapped, up to @mapped_total_index.
+ * Those keep being incremented until @mapped_total_index is reached while
+ * mapping up to PAGE_SIZE / sizeof(struct page*) maximum of pages.
+ *
+ * The IOVA bitmap is usually located on what tracks DMA mapped ranges or
+ * some form of IOVA range tracking that co-relates to the user passed
+ * bitmap.
+ */
+struct iova_bitmap {
+ /* IOVA range representing the currently mapped bitmap data */
+ struct iova_bitmap_map mapped;
+
+ /* userspace address of the bitmap */
+ u64 __user *bitmap;
+
+ /* u64 index that @mapped points to */
+ unsigned long mapped_base_index;
+
+ /* how many u64 can we walk in total */
+ unsigned long mapped_total_index;
+
+ /* base IOVA of the whole bitmap */
+ unsigned long iova;
+
+ /* length of the IOVA range for the whole bitmap */
+ size_t length;
+};
+
+/*
+ * Converts a relative IOVA to a bitmap index.
+ * This function provides the index into the u64 array (bitmap::bitmap)
+ * for a given IOVA offset.
+ * Relative IOVA means relative to the bitmap::mapped base IOVA
+ * (stored in mapped::iova). All computations in this file are done using
+ * relative IOVAs and thus avoid an extra subtraction against mapped::iova.
+ * The user API iova_bitmap_set() always uses a regular absolute IOVAs.
+ */
+static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
+ unsigned long iova)
+{
+ unsigned long pgsize = 1 << bitmap->mapped.pgshift;
+
+ return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
+}
+
+/*
+ * Converts a bitmap index to a *relative* IOVA.
+ */
+static unsigned long iova_bitmap_index_to_offset(struct iova_bitmap *bitmap,
+ unsigned long index)
+{
+ unsigned long pgshift = bitmap->mapped.pgshift;
+
+ return (index * BITS_PER_TYPE(*bitmap->bitmap)) << pgshift;
+}
+
+/*
+ * Returns the base IOVA of the mapped range.
+ */
+static unsigned long iova_bitmap_mapped_iova(struct iova_bitmap *bitmap)
+{
+ unsigned long skip = bitmap->mapped_base_index;
+
+ return bitmap->iova + iova_bitmap_index_to_offset(bitmap, skip);
+}
+
+/*
+ * Pins the bitmap user pages for the current range window.
+ * This is internal to IOVA bitmap and called when advancing the
+ * index (@mapped_base_index) or allocating the bitmap.
+ */
+static int iova_bitmap_get(struct iova_bitmap *bitmap)
+{
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+ unsigned long npages;
+ u64 __user *addr;
+ long ret;
+
+ /*
+ * @mapped_base_index is the index of the currently mapped u64 words
+ * that we have access. Anything before @mapped_base_index is not
+ * mapped. The range @mapped_base_index .. @mapped_total_index-1 is
+ * mapped but capped at a maximum number of pages.
+ */
+ npages = DIV_ROUND_UP((bitmap->mapped_total_index -
+ bitmap->mapped_base_index) *
+ sizeof(*bitmap->bitmap), PAGE_SIZE);
+
+ /*
+ * We always cap at max number of 'struct page' a base page can fit.
+ * This is, for example, on x86 means 2M of bitmap data max.
+ */
+ npages = min(npages, PAGE_SIZE / sizeof(struct page *));
+
+ /*
+ * Bitmap address to be pinned is calculated via pointer arithmetic
+ * with bitmap u64 word index.
+ */
+ addr = bitmap->bitmap + bitmap->mapped_base_index;
+
+ ret = pin_user_pages_fast((unsigned long)addr, npages,
+ FOLL_WRITE, mapped->pages);
+ if (ret <= 0)
+ return -EFAULT;
+
+ mapped->npages = (unsigned long)ret;
+ /* Base IOVA where @pages point to i.e. bit 0 of the first page */
+ mapped->iova = iova_bitmap_mapped_iova(bitmap);
+
+ /*
+ * offset of the page where pinned pages bit 0 is located.
+ * This handles the case where the bitmap is not PAGE_SIZE
+ * aligned.
+ */
+ mapped->pgoff = offset_in_page(addr);
+ return 0;
+}
+
+/*
+ * Unpins the bitmap user pages and clears @npages
+ * (un)pinning is abstracted from API user and it's done when advancing
+ * the index or freeing the bitmap.
+ */
+static void iova_bitmap_put(struct iova_bitmap *bitmap)
+{
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+
+ if (mapped->npages) {
+ unpin_user_pages(mapped->pages, mapped->npages);
+ mapped->npages = 0;
+ }
+}
+
+/**
+ * iova_bitmap_alloc() - Allocates an IOVA bitmap object
+ * @iova: Start address of the IOVA range
+ * @length: Length of the IOVA range
+ * @page_size: Page size of the IOVA bitmap. It defines what each bit
+ * granularity represents
+ * @data: Userspace address of the bitmap
+ *
+ * Allocates an IOVA object and initializes all its fields including the
+ * first user pages of @data.
+ *
+ * Return: A pointer to a newly allocated struct iova_bitmap
+ * or ERR_PTR() on error.
+ */
+struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+ unsigned long page_size, u64 __user *data)
+{
+ struct iova_bitmap_map *mapped;
+ struct iova_bitmap *bitmap;
+ int rc;
+
+ bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
+ if (!bitmap)
+ return ERR_PTR(-ENOMEM);
+
+ mapped = &bitmap->mapped;
+ mapped->pgshift = __ffs(page_size);
+ bitmap->bitmap = data;
+ bitmap->mapped_total_index =
+ iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
+ bitmap->iova = iova;
+ bitmap->length = length;
+ mapped->iova = iova;
+ mapped->pages = (struct page **)__get_free_page(GFP_KERNEL);
+ if (!mapped->pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = iova_bitmap_get(bitmap);
+ if (rc)
+ goto err;
+ return bitmap;
+
+err:
+ iova_bitmap_free(bitmap);
+ return ERR_PTR(rc);
+}
+
+/**
+ * iova_bitmap_free() - Frees an IOVA bitmap object
+ * @bitmap: IOVA bitmap to free
+ *
+ * It unpins and releases pages array memory and clears any leftover
+ * state.
+ */
+void iova_bitmap_free(struct iova_bitmap *bitmap)
+{
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+
+ iova_bitmap_put(bitmap);
+
+ if (mapped->pages) {
+ free_page((unsigned long)mapped->pages);
+ mapped->pages = NULL;
+ }
+
+ kfree(bitmap);
+}
+
+/*
+ * Returns the remaining bitmap indexes from mapped_total_index to process for
+ * the currently pinned bitmap pages.
+ */
+static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
+{
+ unsigned long remaining;
+
+ remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
+ remaining = min_t(unsigned long, remaining,
+ (bitmap->mapped.npages << PAGE_SHIFT) / sizeof(*bitmap->bitmap));
+
+ return remaining;
+}
+
+/*
+ * Returns the length of the mapped IOVA range.
+ */
+static unsigned long iova_bitmap_mapped_length(struct iova_bitmap *bitmap)
+{
+ unsigned long max_iova = bitmap->iova + bitmap->length - 1;
+ unsigned long iova = iova_bitmap_mapped_iova(bitmap);
+ unsigned long remaining;
+
+ /*
+ * iova_bitmap_mapped_remaining() returns a number of indexes which
+ * when converted to IOVA gives us a max length that the bitmap
+ * pinned data can cover. Afterwards, that is capped to
+ * only cover the IOVA range in @bitmap::iova .. @bitmap::length.
+ */
+ remaining = iova_bitmap_index_to_offset(bitmap,
+ iova_bitmap_mapped_remaining(bitmap));
+
+ if (iova + remaining - 1 > max_iova)
+ remaining -= ((iova + remaining - 1) - max_iova);
+
+ return remaining;
+}
+
+/*
+ * Returns true if there's not more data to iterate.
+ */
+static bool iova_bitmap_done(struct iova_bitmap *bitmap)
+{
+ return bitmap->mapped_base_index >= bitmap->mapped_total_index;
+}
+
+/*
+ * Advances to the next range, releases the current pinned
+ * pages and pins the next set of bitmap pages.
+ * Returns 0 on success or otherwise errno.
+ */
+static int iova_bitmap_advance(struct iova_bitmap *bitmap)
+{
+ unsigned long iova = iova_bitmap_mapped_length(bitmap) - 1;
+ unsigned long count = iova_bitmap_offset_to_index(bitmap, iova) + 1;
+
+ bitmap->mapped_base_index += count;
+
+ iova_bitmap_put(bitmap);
+ if (iova_bitmap_done(bitmap))
+ return 0;
+
+ /* When advancing the index we pin the next set of bitmap pages */
+ return iova_bitmap_get(bitmap);
+}
+
+/**
+ * iova_bitmap_for_each() - Iterates over the bitmap
+ * @bitmap: IOVA bitmap to iterate
+ * @opaque: Additional argument to pass to the callback
+ * @fn: Function that gets called for each IOVA range
+ *
+ * Helper function to iterate over bitmap data representing a portion of IOVA
+ * space. It hides the complexity of iterating bitmaps and translating the
+ * mapped bitmap user pages into IOVA ranges to process.
+ *
+ * Return: 0 on success, and an error on failure either upon
+ * iteration or when the callback returns an error.
+ */
+int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn)
+{
+ int ret = 0;
+
+ for (; !iova_bitmap_done(bitmap) && !ret;
+ ret = iova_bitmap_advance(bitmap)) {
+ ret = fn(bitmap, iova_bitmap_mapped_iova(bitmap),
+ iova_bitmap_mapped_length(bitmap), opaque);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * iova_bitmap_set() - Records an IOVA range in bitmap
+ * @bitmap: IOVA bitmap
+ * @iova: IOVA to start
+ * @length: IOVA range length
+ *
+ * Set the bits corresponding to the range [iova .. iova+length-1] in
+ * the user bitmap.
+ *
+ * Return: The number of bits set.
+ */
+void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length)
+{
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+ unsigned long offset = (iova - mapped->iova) >> mapped->pgshift;
+ unsigned long nbits = max_t(unsigned long, 1, length >> mapped->pgshift);
+ unsigned long page_idx = offset / BITS_PER_PAGE;
+ unsigned long page_offset = mapped->pgoff;
+ void *kaddr;
+
+ offset = offset % BITS_PER_PAGE;
+
+ do {
+ unsigned long size = min(BITS_PER_PAGE - offset, nbits);
+
+ kaddr = kmap_local_page(mapped->pages[page_idx]);
+ bitmap_set(kaddr + page_offset, offset, size);
+ kunmap_local(kaddr);
+ page_offset = offset = 0;
+ nbits -= size;
+ page_idx++;
+ } while (nbits > 0);
+}
+EXPORT_SYMBOL_GPL(iova_bitmap_set);
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index b8b9e7911e55..58f91b3bd670 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -8,9 +8,7 @@
*/
#include <linux/module.h>
-#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/uuid.h>
#include <linux/sysfs.h>
#include <linux/mdev.h>
@@ -20,71 +18,11 @@
#define DRIVER_AUTHOR "NVIDIA Corporation"
#define DRIVER_DESC "Mediated device Core Driver"
-static LIST_HEAD(parent_list);
-static DEFINE_MUTEX(parent_list_lock);
static struct class_compat *mdev_bus_compat_class;
static LIST_HEAD(mdev_list);
static DEFINE_MUTEX(mdev_list_lock);
-struct device *mdev_parent_dev(struct mdev_device *mdev)
-{
- return mdev->type->parent->dev;
-}
-EXPORT_SYMBOL(mdev_parent_dev);
-
-/*
- * Return the index in supported_type_groups that this mdev_device was created
- * from.
- */
-unsigned int mdev_get_type_group_id(struct mdev_device *mdev)
-{
- return mdev->type->type_group_id;
-}
-EXPORT_SYMBOL(mdev_get_type_group_id);
-
-/*
- * Used in mdev_type_attribute sysfs functions to return the index in the
- * supported_type_groups that the sysfs is called from.
- */
-unsigned int mtype_get_type_group_id(struct mdev_type *mtype)
-{
- return mtype->type_group_id;
-}
-EXPORT_SYMBOL(mtype_get_type_group_id);
-
-/*
- * Used in mdev_type_attribute sysfs functions to return the parent struct
- * device
- */
-struct device *mtype_get_parent_dev(struct mdev_type *mtype)
-{
- return mtype->parent->dev;
-}
-EXPORT_SYMBOL(mtype_get_parent_dev);
-
-/* Should be called holding parent_list_lock */
-static struct mdev_parent *__find_parent_device(struct device *dev)
-{
- struct mdev_parent *parent;
-
- list_for_each_entry(parent, &parent_list, next) {
- if (parent->dev == dev)
- return parent;
- }
- return NULL;
-}
-
-void mdev_release_parent(struct kref *kref)
-{
- struct mdev_parent *parent = container_of(kref, struct mdev_parent,
- ref);
- struct device *dev = parent->dev;
-
- kfree(parent);
- put_device(dev);
-}
-
/* Caller must hold parent unreg_sem read or write lock */
static void mdev_device_remove_common(struct mdev_device *mdev)
{
@@ -99,145 +37,96 @@ static void mdev_device_remove_common(struct mdev_device *mdev)
static int mdev_device_remove_cb(struct device *dev, void *data)
{
- struct mdev_device *mdev = mdev_from_dev(dev);
-
- if (mdev)
- mdev_device_remove_common(mdev);
+ if (dev->bus == &mdev_bus_type)
+ mdev_device_remove_common(to_mdev_device(dev));
return 0;
}
/*
- * mdev_register_device : Register a device
+ * mdev_register_parent: Register a device as parent for mdevs
+ * @parent: parent structure registered
* @dev: device structure representing parent device.
* @mdev_driver: Device driver to bind to the newly created mdev
+ * @types: Array of supported mdev types
+ * @nr_types: Number of entries in @types
+ *
+ * Registers the @parent stucture as a parent for mdev types and thus mdev
+ * devices. The caller needs to hold a reference on @dev that must not be
+ * released until after the call to mdev_unregister_parent().
*
- * Add device to list of registered parent devices.
* Returns a negative value on error, otherwise 0.
*/
-int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver)
+int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
+ struct mdev_driver *mdev_driver, struct mdev_type **types,
+ unsigned int nr_types)
{
- int ret;
- struct mdev_parent *parent;
char *env_string = "MDEV_STATE=registered";
char *envp[] = { env_string, NULL };
+ int ret;
- /* check for mandatory ops */
- if (!mdev_driver->supported_type_groups)
- return -EINVAL;
-
- dev = get_device(dev);
- if (!dev)
- return -EINVAL;
-
- mutex_lock(&parent_list_lock);
-
- /* Check for duplicate */
- parent = __find_parent_device(dev);
- if (parent) {
- parent = NULL;
- ret = -EEXIST;
- goto add_dev_err;
- }
-
- parent = kzalloc(sizeof(*parent), GFP_KERNEL);
- if (!parent) {
- ret = -ENOMEM;
- goto add_dev_err;
- }
-
- kref_init(&parent->ref);
+ memset(parent, 0, sizeof(*parent));
init_rwsem(&parent->unreg_sem);
-
parent->dev = dev;
parent->mdev_driver = mdev_driver;
+ parent->types = types;
+ parent->nr_types = nr_types;
+ atomic_set(&parent->available_instances, mdev_driver->max_instances);
if (!mdev_bus_compat_class) {
mdev_bus_compat_class = class_compat_register("mdev_bus");
- if (!mdev_bus_compat_class) {
- ret = -ENOMEM;
- goto add_dev_err;
- }
+ if (!mdev_bus_compat_class)
+ return -ENOMEM;
}
ret = parent_create_sysfs_files(parent);
if (ret)
- goto add_dev_err;
+ return ret;
ret = class_compat_create_link(mdev_bus_compat_class, dev, NULL);
if (ret)
dev_warn(dev, "Failed to create compatibility class link\n");
- list_add(&parent->next, &parent_list);
- mutex_unlock(&parent_list_lock);
-
dev_info(dev, "MDEV: Registered\n");
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
-
return 0;
-
-add_dev_err:
- mutex_unlock(&parent_list_lock);
- if (parent)
- mdev_put_parent(parent);
- else
- put_device(dev);
- return ret;
}
-EXPORT_SYMBOL(mdev_register_device);
+EXPORT_SYMBOL(mdev_register_parent);
/*
- * mdev_unregister_device : Unregister a parent device
- * @dev: device structure representing parent device.
- *
- * Remove device from list of registered parent devices. Give a chance to free
- * existing mediated devices for given device.
+ * mdev_unregister_parent : Unregister a parent device
+ * @parent: parent structure to unregister
*/
-
-void mdev_unregister_device(struct device *dev)
+void mdev_unregister_parent(struct mdev_parent *parent)
{
- struct mdev_parent *parent;
char *env_string = "MDEV_STATE=unregistered";
char *envp[] = { env_string, NULL };
- mutex_lock(&parent_list_lock);
- parent = __find_parent_device(dev);
-
- if (!parent) {
- mutex_unlock(&parent_list_lock);
- return;
- }
- dev_info(dev, "MDEV: Unregistering\n");
-
- list_del(&parent->next);
- mutex_unlock(&parent_list_lock);
+ dev_info(parent->dev, "MDEV: Unregistering\n");
down_write(&parent->unreg_sem);
-
- class_compat_remove_link(mdev_bus_compat_class, dev, NULL);
-
- device_for_each_child(dev, NULL, mdev_device_remove_cb);
-
+ class_compat_remove_link(mdev_bus_compat_class, parent->dev, NULL);
+ device_for_each_child(parent->dev, NULL, mdev_device_remove_cb);
parent_remove_sysfs_files(parent);
up_write(&parent->unreg_sem);
- mdev_put_parent(parent);
-
- /* We still have the caller's reference to use for the uevent */
- kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+ kobject_uevent_env(&parent->dev->kobj, KOBJ_CHANGE, envp);
}
-EXPORT_SYMBOL(mdev_unregister_device);
+EXPORT_SYMBOL(mdev_unregister_parent);
static void mdev_device_release(struct device *dev)
{
struct mdev_device *mdev = to_mdev_device(dev);
-
- /* Pairs with the get in mdev_device_create() */
- kobject_put(&mdev->type->kobj);
+ struct mdev_parent *parent = mdev->type->parent;
mutex_lock(&mdev_list_lock);
list_del(&mdev->next);
+ if (!parent->mdev_driver->get_available)
+ atomic_inc(&parent->available_instances);
mutex_unlock(&mdev_list_lock);
+ /* Pairs with the get in mdev_device_create() */
+ kobject_put(&mdev->type->kobj);
+
dev_dbg(&mdev->dev, "MDEV: destroying\n");
kfree(mdev);
}
@@ -259,6 +148,18 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
}
}
+ if (!drv->get_available) {
+ /*
+ * Note: that non-atomic read and dec is fine here because
+ * all modifications are under mdev_list_lock.
+ */
+ if (!atomic_read(&parent->available_instances)) {
+ mutex_unlock(&mdev_list_lock);
+ return -EUSERS;
+ }
+ atomic_dec(&parent->available_instances);
+ }
+
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
mutex_unlock(&mdev_list_lock);
diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c
index 9c2af59809e2..7825d83a55f8 100644
--- a/drivers/vfio/mdev/mdev_driver.c
+++ b/drivers/vfio/mdev/mdev_driver.c
@@ -7,7 +7,6 @@
* Kirti Wankhede <kwankhede@nvidia.com>
*/
-#include <linux/device.h>
#include <linux/iommu.h>
#include <linux/mdev.h>
@@ -47,7 +46,6 @@ struct bus_type mdev_bus_type = {
.remove = mdev_remove,
.match = mdev_match,
};
-EXPORT_SYMBOL_GPL(mdev_bus_type);
/**
* mdev_register_driver - register a new MDEV driver
@@ -57,10 +55,11 @@ EXPORT_SYMBOL_GPL(mdev_bus_type);
**/
int mdev_register_driver(struct mdev_driver *drv)
{
+ if (!drv->device_api)
+ return -EINVAL;
+
/* initialize common driver fields */
drv->driver.bus = &mdev_bus_type;
-
- /* register with core */
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(mdev_register_driver);
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index 7c9fc79f3d83..af457b27f607 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -13,25 +13,7 @@
int mdev_bus_register(void);
void mdev_bus_unregister(void);
-struct mdev_parent {
- struct device *dev;
- struct mdev_driver *mdev_driver;
- struct kref ref;
- struct list_head next;
- struct kset *mdev_types_kset;
- struct list_head type_list;
- /* Synchronize device creation/removal with parent unregistration */
- struct rw_semaphore unreg_sem;
-};
-
-struct mdev_type {
- struct kobject kobj;
- struct kobject *devices_kobj;
- struct mdev_parent *parent;
- struct list_head next;
- unsigned int type_group_id;
-};
-
+extern struct bus_type mdev_bus_type;
extern const struct attribute_group *mdev_device_groups[];
#define to_mdev_type_attr(_attr) \
@@ -48,16 +30,4 @@ void mdev_remove_sysfs_files(struct mdev_device *mdev);
int mdev_device_create(struct mdev_type *kobj, const guid_t *uuid);
int mdev_device_remove(struct mdev_device *dev);
-void mdev_release_parent(struct kref *kref);
-
-static inline void mdev_get_parent(struct mdev_parent *parent)
-{
- kref_get(&parent->ref);
-}
-
-static inline void mdev_put_parent(struct mdev_parent *parent)
-{
- kref_put(&parent->ref, mdev_release_parent);
-}
-
#endif /* MDEV_PRIVATE_H */
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 0ccfeb3dda24..abe3359dd477 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -9,14 +9,24 @@
#include <linux/sysfs.h>
#include <linux/ctype.h>
-#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include "mdev_private.h"
-/* Static functions */
+struct mdev_type_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf);
+ ssize_t (*store)(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, const char *buf,
+ size_t count);
+};
+
+#define MDEV_TYPE_ATTR_RO(_name) \
+ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
+#define MDEV_TYPE_ATTR_WO(_name) \
+ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
static ssize_t mdev_type_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
@@ -74,152 +84,156 @@ static ssize_t create_store(struct mdev_type *mtype,
return count;
}
-
static MDEV_TYPE_ATTR_WO(create);
+static ssize_t device_api_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", mtype->parent->mdev_driver->device_api);
+}
+static MDEV_TYPE_ATTR_RO(device_api);
+
+static ssize_t name_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n",
+ mtype->pretty_name ? mtype->pretty_name : mtype->sysfs_name);
+}
+
+static MDEV_TYPE_ATTR_RO(name);
+
+static ssize_t available_instances_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr,
+ char *buf)
+{
+ struct mdev_driver *drv = mtype->parent->mdev_driver;
+
+ if (drv->get_available)
+ return sysfs_emit(buf, "%u\n", drv->get_available(mtype));
+ return sysfs_emit(buf, "%u\n",
+ atomic_read(&mtype->parent->available_instances));
+}
+static MDEV_TYPE_ATTR_RO(available_instances);
+
+static ssize_t description_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr,
+ char *buf)
+{
+ return mtype->parent->mdev_driver->show_description(mtype, buf);
+}
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *mdev_types_core_attrs[] = {
+ &mdev_type_attr_create.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_name.attr,
+ &mdev_type_attr_available_instances.attr,
+ &mdev_type_attr_description.attr,
+ NULL,
+};
+
+static umode_t mdev_types_core_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (attr == &mdev_type_attr_description.attr &&
+ !to_mdev_type(kobj)->parent->mdev_driver->show_description)
+ return 0;
+ return attr->mode;
+}
+
+static struct attribute_group mdev_type_core_group = {
+ .attrs = mdev_types_core_attrs,
+ .is_visible = mdev_types_core_is_visible,
+};
+
+static const struct attribute_group *mdev_type_groups[] = {
+ &mdev_type_core_group,
+ NULL,
+};
+
static void mdev_type_release(struct kobject *kobj)
{
struct mdev_type *type = to_mdev_type(kobj);
pr_debug("Releasing group %s\n", kobj->name);
/* Pairs with the get in add_mdev_supported_type() */
- mdev_put_parent(type->parent);
- kfree(type);
+ put_device(type->parent->dev);
}
static struct kobj_type mdev_type_ktype = {
- .sysfs_ops = &mdev_type_sysfs_ops,
- .release = mdev_type_release,
+ .sysfs_ops = &mdev_type_sysfs_ops,
+ .release = mdev_type_release,
+ .default_groups = mdev_type_groups,
};
-static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
- unsigned int type_group_id)
+static int mdev_type_add(struct mdev_parent *parent, struct mdev_type *type)
{
- struct mdev_type *type;
- struct attribute_group *group =
- parent->mdev_driver->supported_type_groups[type_group_id];
int ret;
- if (!group->name) {
- pr_err("%s: Type name empty!\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- type = kzalloc(sizeof(*type), GFP_KERNEL);
- if (!type)
- return ERR_PTR(-ENOMEM);
-
type->kobj.kset = parent->mdev_types_kset;
type->parent = parent;
/* Pairs with the put in mdev_type_release() */
- mdev_get_parent(parent);
- type->type_group_id = type_group_id;
+ get_device(parent->dev);
ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
"%s-%s", dev_driver_string(parent->dev),
- group->name);
+ type->sysfs_name);
if (ret) {
kobject_put(&type->kobj);
- return ERR_PTR(ret);
+ return ret;
}
- ret = sysfs_create_file(&type->kobj, &mdev_type_attr_create.attr);
- if (ret)
- goto attr_create_failed;
-
type->devices_kobj = kobject_create_and_add("devices", &type->kobj);
if (!type->devices_kobj) {
ret = -ENOMEM;
goto attr_devices_failed;
}
- ret = sysfs_create_files(&type->kobj,
- (const struct attribute **)group->attrs);
- if (ret) {
- ret = -ENOMEM;
- goto attrs_failed;
- }
- return type;
+ return 0;
-attrs_failed:
- kobject_put(type->devices_kobj);
attr_devices_failed:
- sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr);
-attr_create_failed:
kobject_del(&type->kobj);
kobject_put(&type->kobj);
- return ERR_PTR(ret);
+ return ret;
}
-static void remove_mdev_supported_type(struct mdev_type *type)
+static void mdev_type_remove(struct mdev_type *type)
{
- struct attribute_group *group =
- type->parent->mdev_driver->supported_type_groups[type->type_group_id];
-
- sysfs_remove_files(&type->kobj,
- (const struct attribute **)group->attrs);
kobject_put(type->devices_kobj);
- sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr);
kobject_del(&type->kobj);
kobject_put(&type->kobj);
}
-static int add_mdev_supported_type_groups(struct mdev_parent *parent)
-{
- int i;
-
- for (i = 0; parent->mdev_driver->supported_type_groups[i]; i++) {
- struct mdev_type *type;
-
- type = add_mdev_supported_type(parent, i);
- if (IS_ERR(type)) {
- struct mdev_type *ltype, *tmp;
-
- list_for_each_entry_safe(ltype, tmp, &parent->type_list,
- next) {
- list_del(&ltype->next);
- remove_mdev_supported_type(ltype);
- }
- return PTR_ERR(type);
- }
- list_add(&type->next, &parent->type_list);
- }
- return 0;
-}
-
/* mdev sysfs functions */
void parent_remove_sysfs_files(struct mdev_parent *parent)
{
- struct mdev_type *type, *tmp;
-
- list_for_each_entry_safe(type, tmp, &parent->type_list, next) {
- list_del(&type->next);
- remove_mdev_supported_type(type);
- }
+ int i;
+ for (i = 0; i < parent->nr_types; i++)
+ mdev_type_remove(parent->types[i]);
kset_unregister(parent->mdev_types_kset);
}
int parent_create_sysfs_files(struct mdev_parent *parent)
{
- int ret;
+ int ret, i;
parent->mdev_types_kset = kset_create_and_add("mdev_supported_types",
NULL, &parent->dev->kobj);
-
if (!parent->mdev_types_kset)
return -ENOMEM;
- INIT_LIST_HEAD(&parent->type_list);
-
- ret = add_mdev_supported_type_groups(parent);
- if (ret)
- goto create_err;
+ for (i = 0; i < parent->nr_types; i++) {
+ ret = mdev_type_add(parent, parent->types[i]);
+ if (ret)
+ goto out_err;
+ }
return 0;
-create_err:
- kset_unregister(parent->mdev_types_kset);
- return ret;
+out_err:
+ while (--i >= 0)
+ mdev_type_remove(parent->types[i]);
+ return 0;
}
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index ea762e28c1cc..39eeca18a0f7 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -16,7 +16,7 @@
#include "hisi_acc_vfio_pci.h"
-/* return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
+/* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
static int qm_wait_dev_not_ready(struct hisi_qm *qm)
{
u32 val;
@@ -189,7 +189,7 @@ static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
struct device *dev = &qm->pdev->dev;
int ret;
- /* check VF state */
+ /* Check VF state */
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
return -EBUSY;
@@ -337,16 +337,7 @@ static int vf_qm_cache_wb(struct hisi_qm *qm)
return 0;
}
-static struct hisi_acc_vf_core_device *hssi_acc_drvdata(struct pci_dev *pdev)
-{
- struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
-
- return container_of(core_device, struct hisi_acc_vf_core_device,
- core_device);
-}
-
-static void vf_qm_fun_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev,
- struct hisi_qm *qm)
+static void vf_qm_fun_reset(struct hisi_qm *qm)
{
int i;
@@ -382,7 +373,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return -EINVAL;
}
- /* vf qp num check */
+ /* VF qp num check */
ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
if (ret <= 0) {
dev_err(dev, "failed to get vft qp nums\n");
@@ -396,7 +387,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
vf_qm->qp_num = ret;
- /* vf isolation state check */
+ /* VF isolation state check */
ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
if (ret) {
dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
@@ -405,7 +396,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
if (vf_data->que_iso_cfg != que_iso_state) {
dev_err(dev, "failed to match isolation state\n");
- return ret;
+ return -EINVAL;
}
ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
@@ -427,10 +418,10 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
int ret;
vf_data->acc_magic = ACC_DEV_MAGIC;
- /* save device id */
+ /* Save device id */
vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
- /* vf qp num save from PF */
+ /* VF qp num save from PF */
ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
if (ret <= 0) {
dev_err(dev, "failed to get vft qp nums!\n");
@@ -474,19 +465,19 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
ret = qm_set_regs(qm, vf_data);
if (ret) {
- dev_err(dev, "Set VF regs failed\n");
+ dev_err(dev, "set VF regs failed\n");
return ret;
}
ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
if (ret) {
- dev_err(dev, "Set sqc failed\n");
+ dev_err(dev, "set sqc failed\n");
return ret;
}
ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
if (ret) {
- dev_err(dev, "Set cqc failed\n");
+ dev_err(dev, "set cqc failed\n");
return ret;
}
@@ -528,12 +519,12 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return -EINVAL;
/* Every reg is 32 bit, the dma address is 64 bit. */
- vf_data->eqe_dma = vf_data->qm_eqc_dw[2];
+ vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
- vf_data->eqe_dma |= vf_data->qm_eqc_dw[1];
- vf_data->aeqe_dma = vf_data->qm_aeqc_dw[2];
+ vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
+ vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
- vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[1];
+ vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
/* Through SQC_BT/CQC_BT to get sqc and cqc address */
ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
@@ -552,6 +543,14 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return 0;
}
+static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev)
+{
+ struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
+
+ return container_of(core_device, struct hisi_acc_vf_core_device,
+ core_device);
+}
+
/* Check the PF's RAS state and Function INT state */
static int
hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
@@ -662,7 +661,10 @@ static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vd
if (hisi_acc_vdev->vf_qm_state != QM_READY)
return;
- vf_qm_fun_reset(hisi_acc_vdev, vf_qm);
+ /* Make sure the device is enabled */
+ qm_dev_cmd_init(vf_qm);
+
+ vf_qm_fun_reset(vf_qm);
}
static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
@@ -970,7 +972,7 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
{
- struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
if (hisi_acc_vdev->core_device.vdev.migration_flags !=
VFIO_MIGRATION_STOP_COPY)
@@ -1213,8 +1215,28 @@ static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
.migration_get_state = hisi_acc_vfio_pci_get_device_state,
};
+static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+ struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
+ struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev);
+
+ hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
+ hisi_acc_vdev->pf_qm = pf_qm;
+ hisi_acc_vdev->vf_dev = pdev;
+ mutex_init(&hisi_acc_vdev->state_mutex);
+
+ core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY;
+ core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops;
+
+ return vfio_pci_core_init_dev(core_vdev);
+}
+
static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.name = "hisi-acc-vfio-pci-migration",
+ .init = hisi_acc_vfio_pci_migrn_init_dev,
+ .release = vfio_pci_core_release_dev,
.open_device = hisi_acc_vfio_pci_open_device,
.close_device = hisi_acc_vfio_pci_close_device,
.ioctl = hisi_acc_vfio_pci_ioctl,
@@ -1228,6 +1250,8 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
.name = "hisi-acc-vfio-pci",
+ .init = vfio_pci_core_init_dev,
+ .release = vfio_pci_core_release_dev,
.open_device = hisi_acc_vfio_pci_open_device,
.close_device = vfio_pci_core_close_device,
.ioctl = vfio_pci_core_ioctl,
@@ -1239,73 +1263,45 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
.match = vfio_pci_core_match,
};
-static int
-hisi_acc_vfio_pci_migrn_init(struct hisi_acc_vf_core_device *hisi_acc_vdev,
- struct pci_dev *pdev, struct hisi_qm *pf_qm)
-{
- int vf_id;
-
- vf_id = pci_iov_vf_id(pdev);
- if (vf_id < 0)
- return vf_id;
-
- hisi_acc_vdev->vf_id = vf_id + 1;
- hisi_acc_vdev->core_device.vdev.migration_flags =
- VFIO_MIGRATION_STOP_COPY;
- hisi_acc_vdev->pf_qm = pf_qm;
- hisi_acc_vdev->vf_dev = pdev;
- mutex_init(&hisi_acc_vdev->state_mutex);
-
- return 0;
-}
-
static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev;
+ const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops;
struct hisi_qm *pf_qm;
+ int vf_id;
int ret;
- hisi_acc_vdev = kzalloc(sizeof(*hisi_acc_vdev), GFP_KERNEL);
- if (!hisi_acc_vdev)
- return -ENOMEM;
-
pf_qm = hisi_acc_get_pf_qm(pdev);
if (pf_qm && pf_qm->ver >= QM_HW_V3) {
- ret = hisi_acc_vfio_pci_migrn_init(hisi_acc_vdev, pdev, pf_qm);
- if (!ret) {
- vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
- &hisi_acc_vfio_pci_migrn_ops);
- hisi_acc_vdev->core_device.vdev.mig_ops =
- &hisi_acc_vfio_pci_migrn_state_ops;
- } else {
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id >= 0)
+ ops = &hisi_acc_vfio_pci_migrn_ops;
+ else
pci_warn(pdev, "migration support failed, continue with generic interface\n");
- vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
- &hisi_acc_vfio_pci_ops);
- }
- } else {
- vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
- &hisi_acc_vfio_pci_ops);
}
+ hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device,
+ core_device.vdev, &pdev->dev, ops);
+ if (IS_ERR(hisi_acc_vdev))
+ return PTR_ERR(hisi_acc_vdev);
+
dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
if (ret)
- goto out_free;
+ goto out_put_vdev;
return 0;
-out_free:
- vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
- kfree(hisi_acc_vdev);
+out_put_vdev:
+ vfio_put_device(&hisi_acc_vdev->core_device.vdev);
return ret;
}
static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
{
- struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
- vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
- kfree(hisi_acc_vdev);
+ vfio_put_device(&hisi_acc_vdev->core_device.vdev);
}
static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
index 5494f4983bbe..67343325b320 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
@@ -16,7 +16,6 @@
#define SEC_CORE_INT_STATUS 0x301008
#define HPRE_HAC_INT_STATUS 0x301800
#define HZIP_CORE_INT_STATUS 0x3010AC
-#define QM_QUE_ISO_CFG 0x301154
#define QM_VFT_CFG_RDY 0x10006c
#define QM_VFT_CFG_OP_WR 0x100058
@@ -80,7 +79,7 @@ struct acc_vf_data {
/* QM reserved 5 regs */
u32 qm_rsv_regs[5];
u32 padding;
- /* qm memory init information */
+ /* QM memory init information */
u64 eqe_dma;
u64 aeqe_dma;
u64 sqc_dma;
@@ -99,7 +98,7 @@ struct hisi_acc_vf_migration_file {
struct hisi_acc_vf_core_device {
struct vfio_pci_core_device core_device;
u8 deferred_reset:1;
- /* for migration state */
+ /* For migration state */
struct mutex state_mutex;
enum vfio_device_mig_state mig_state;
struct pci_dev *pf_dev;
@@ -108,7 +107,7 @@ struct hisi_acc_vf_core_device {
struct hisi_qm vf_qm;
u32 vf_qm_state;
int vf_id;
- /* for reset handler */
+ /* For reset handler */
spinlock_t reset_lock;
struct hisi_acc_vf_migration_file *resuming_migf;
struct hisi_acc_vf_migration_file *saving_migf;
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index dd5d7bfe0a49..c604b70437a5 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -5,8 +5,12 @@
#include "cmd.h"
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
u16 *vhca_id);
+static void
+_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
{
@@ -66,25 +70,35 @@ int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
return 0;
}
+static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev)
+{
+ /* Mark the tracker under an error and wake it up if it's running */
+ mvdev->tracker.is_err = true;
+ complete(&mvdev->tracker_comp);
+}
+
static int mlx5fv_vf_event(struct notifier_block *nb,
unsigned long event, void *data)
{
struct mlx5vf_pci_core_device *mvdev =
container_of(nb, struct mlx5vf_pci_core_device, nb);
- mutex_lock(&mvdev->state_mutex);
switch (event) {
case MLX5_PF_NOTIFY_ENABLE_VF:
+ mutex_lock(&mvdev->state_mutex);
mvdev->mdev_detach = false;
+ mlx5vf_state_mutex_unlock(mvdev);
break;
case MLX5_PF_NOTIFY_DISABLE_VF:
- mlx5vf_disable_fds(mvdev);
+ mlx5vf_cmd_close_migratable(mvdev);
+ mutex_lock(&mvdev->state_mutex);
mvdev->mdev_detach = true;
+ mlx5vf_state_mutex_unlock(mvdev);
break;
default:
break;
}
- mlx5vf_state_mutex_unlock(mvdev);
+
return 0;
}
@@ -93,8 +107,11 @@ void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
if (!mvdev->migrate_cap)
return;
+ /* Must be done outside the lock to let it progress */
+ set_tracker_error(mvdev);
mutex_lock(&mvdev->state_mutex);
mlx5vf_disable_fds(mvdev);
+ _mlx5vf_free_page_tracker_resources(mvdev);
mlx5vf_state_mutex_unlock(mvdev);
}
@@ -109,7 +126,8 @@ void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
}
void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
- const struct vfio_migration_ops *mig_ops)
+ const struct vfio_migration_ops *mig_ops,
+ const struct vfio_log_ops *log_ops)
{
struct pci_dev *pdev = mvdev->core_device.pdev;
int ret;
@@ -151,6 +169,9 @@ void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
VFIO_MIGRATION_STOP_COPY |
VFIO_MIGRATION_P2P;
mvdev->core_device.vdev.mig_ops = mig_ops;
+ init_completion(&mvdev->tracker_comp);
+ if (MLX5_CAP_GEN(mvdev->mdev, adv_virtualization))
+ mvdev->core_device.vdev.log_ops = log_ops;
end:
mlx5_vf_put_core_dev(mvdev->mdev);
@@ -188,11 +209,13 @@ err_exec:
return ret;
}
-static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- struct mlx5_vf_migration_file *migf, u32 *mkey)
+static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_vf_migration_file *migf,
+ struct mlx5_vhca_recv_buf *recv_buf,
+ u32 *mkey)
{
- size_t npages = DIV_ROUND_UP(migf->total_length, PAGE_SIZE);
- struct sg_dma_page_iter dma_iter;
+ size_t npages = migf ? DIV_ROUND_UP(migf->total_length, PAGE_SIZE) :
+ recv_buf->npages;
int err = 0, inlen;
__be64 *mtt;
void *mkc;
@@ -209,8 +232,17 @@ static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
DIV_ROUND_UP(npages, 2));
mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0)
- *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
+ if (migf) {
+ struct sg_dma_page_iter dma_iter;
+
+ for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0)
+ *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
+ } else {
+ int i;
+
+ for (i = 0; i < npages; i++)
+ *mtt++ = cpu_to_be64(recv_buf->dma_addrs[i]);
+ }
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
@@ -223,7 +255,8 @@ static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2));
- MLX5_SET64(mkc, mkc, len, migf->total_length);
+ MLX5_SET64(mkc, mkc, len,
+ migf ? migf->total_length : (npages * PAGE_SIZE));
err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
kvfree(in);
return err;
@@ -297,7 +330,7 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
if (err)
goto err_dma_map;
- err = _create_state_mkey(mdev, pdn, migf, &mkey);
+ err = _create_mkey(mdev, pdn, migf, NULL, &mkey);
if (err)
goto err_create_mkey;
@@ -369,7 +402,7 @@ int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
if (err)
goto err_reg;
- err = _create_state_mkey(mdev, pdn, migf, &mkey);
+ err = _create_mkey(mdev, pdn, migf, NULL, &mkey);
if (err)
goto err_mkey;
@@ -391,3 +424,939 @@ end:
mutex_unlock(&migf->lock);
return err;
}
+
+static void combine_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ u32 req_nodes)
+{
+ struct interval_tree_node *prev, *curr, *comb_start, *comb_end;
+ unsigned long min_gap;
+ unsigned long curr_gap;
+
+ /* Special shortcut when a single range is required */
+ if (req_nodes == 1) {
+ unsigned long last;
+
+ curr = comb_start = interval_tree_iter_first(root, 0, ULONG_MAX);
+ while (curr) {
+ last = curr->last;
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ if (prev != comb_start)
+ interval_tree_remove(prev, root);
+ }
+ comb_start->last = last;
+ return;
+ }
+
+ /* Combine ranges which have the smallest gap */
+ while (cur_nodes > req_nodes) {
+ prev = NULL;
+ min_gap = ULONG_MAX;
+ curr = interval_tree_iter_first(root, 0, ULONG_MAX);
+ while (curr) {
+ if (prev) {
+ curr_gap = curr->start - prev->last;
+ if (curr_gap < min_gap) {
+ min_gap = curr_gap;
+ comb_start = prev;
+ comb_end = curr;
+ }
+ }
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ }
+ comb_start->last = comb_end->last;
+ interval_tree_remove(comb_end, root);
+ cur_nodes--;
+ }
+}
+
+static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev,
+ struct mlx5vf_pci_core_device *mvdev,
+ struct rb_root_cached *ranges, u32 nnodes)
+{
+ int max_num_range =
+ MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_max_num_range);
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ int record_size = MLX5_ST_SZ_BYTES(page_track_range);
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ struct interval_tree_node *node = NULL;
+ u64 total_ranges_len = 0;
+ u32 num_ranges = nnodes;
+ u8 log_addr_space_size;
+ void *range_list_ptr;
+ void *obj_context;
+ void *cmd_hdr;
+ int inlen;
+ void *in;
+ int err;
+ int i;
+
+ if (num_ranges > max_num_range) {
+ combine_ranges(ranges, nnodes, max_num_range);
+ num_ranges = max_num_range;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_page_track_obj_in) +
+ record_size * num_ranges;
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cmd_hdr = MLX5_ADDR_OF(create_page_track_obj_in, in,
+ general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type,
+ MLX5_OBJ_TYPE_PAGE_TRACK);
+ obj_context = MLX5_ADDR_OF(create_page_track_obj_in, in, obj_context);
+ MLX5_SET(page_track, obj_context, vhca_id, mvdev->vhca_id);
+ MLX5_SET(page_track, obj_context, track_type, 1);
+ MLX5_SET(page_track, obj_context, log_page_size,
+ ilog2(tracker->host_qp->tracked_page_size));
+ MLX5_SET(page_track, obj_context, log_msg_size,
+ ilog2(tracker->host_qp->max_msg_size));
+ MLX5_SET(page_track, obj_context, reporting_qpn, tracker->fw_qp->qpn);
+ MLX5_SET(page_track, obj_context, num_ranges, num_ranges);
+
+ range_list_ptr = MLX5_ADDR_OF(page_track, obj_context, track_range);
+ node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
+ for (i = 0; i < num_ranges; i++) {
+ void *addr_range_i_base = range_list_ptr + record_size * i;
+ unsigned long length = node->last - node->start;
+
+ MLX5_SET64(page_track_range, addr_range_i_base, start_address,
+ node->start);
+ MLX5_SET64(page_track_range, addr_range_i_base, length, length);
+ total_ranges_len += length;
+ node = interval_tree_iter_next(node, 0, ULONG_MAX);
+ }
+
+ WARN_ON(node);
+ log_addr_space_size = ilog2(total_ranges_len);
+ if (log_addr_space_size <
+ (MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_min_addr_space)) ||
+ log_addr_space_size >
+ (MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_addr_space))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ MLX5_SET(page_track, obj_context, log_addr_space_size,
+ log_addr_space_size);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+ goto out;
+
+ tracker->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ kfree(in);
+ return err;
+}
+
+static int mlx5vf_cmd_destroy_tracker(struct mlx5_core_dev *mdev,
+ u32 tracker_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, tracker_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5vf_cmd_modify_tracker(struct mlx5_core_dev *mdev,
+ u32 tracker_id, unsigned long iova,
+ unsigned long length, u32 tracker_state)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_page_track_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ void *obj_context;
+ void *cmd_hdr;
+
+ cmd_hdr = MLX5_ADDR_OF(modify_page_track_obj_in, in, general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, tracker_id);
+
+ obj_context = MLX5_ADDR_OF(modify_page_track_obj_in, in, obj_context);
+ MLX5_SET64(page_track, obj_context, modify_field_select, 0x3);
+ MLX5_SET64(page_track, obj_context, range_start_address, iova);
+ MLX5_SET64(page_track, obj_context, length, length);
+ MLX5_SET(page_track, obj_context, state, tracker_state);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int alloc_cq_frag_buf(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_cq_buf *buf, int nent,
+ int cqe_size)
+{
+ struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
+ u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
+ u8 log_wq_sz = ilog2(cqe_size);
+ int err;
+
+ err = mlx5_frag_buf_alloc_node(mdev, nent * cqe_size, frag_buf,
+ mdev->priv.numa_node);
+ if (err)
+ return err;
+
+ mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
+ buf->cqe_size = cqe_size;
+ buf->nent = nent;
+ return 0;
+}
+
+static void init_cq_frag_buf(struct mlx5_vhca_cq_buf *buf)
+{
+ struct mlx5_cqe64 *cqe64;
+ void *cqe;
+ int i;
+
+ for (i = 0; i < buf->nent; i++) {
+ cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
+ cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
+ cqe64->op_own = MLX5_CQE_INVALID << 4;
+ }
+}
+
+static void mlx5vf_destroy_cq(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_cq *cq)
+{
+ mlx5_core_destroy_cq(mdev, &cq->mcq);
+ mlx5_frag_buf_free(mdev, &cq->buf.frag_buf);
+ mlx5_db_free(mdev, &cq->db);
+}
+
+static void mlx5vf_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
+{
+ if (type != MLX5_EVENT_TYPE_CQ_ERROR)
+ return;
+
+ set_tracker_error(container_of(mcq, struct mlx5vf_pci_core_device,
+ tracker.cq.mcq));
+}
+
+static int mlx5vf_event_notifier(struct notifier_block *nb, unsigned long type,
+ void *data)
+{
+ struct mlx5_vhca_page_tracker *tracker =
+ mlx5_nb_cof(nb, struct mlx5_vhca_page_tracker, nb);
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ tracker, struct mlx5vf_pci_core_device, tracker);
+ struct mlx5_eqe *eqe = data;
+ u8 event_type = (u8)type;
+ u8 queue_type;
+ int qp_num;
+
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ queue_type = eqe->data.qp_srq.type;
+ if (queue_type != MLX5_EVENT_QUEUE_TYPE_QP)
+ break;
+ qp_num = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ if (qp_num != tracker->host_qp->qpn &&
+ qp_num != tracker->fw_qp->qpn)
+ break;
+ set_tracker_error(mvdev);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static void mlx5vf_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
+{
+ struct mlx5vf_pci_core_device *mvdev =
+ container_of(mcq, struct mlx5vf_pci_core_device,
+ tracker.cq.mcq);
+
+ complete(&mvdev->tracker_comp);
+}
+
+static int mlx5vf_create_cq(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_page_tracker *tracker,
+ size_t ncqe)
+{
+ int cqe_size = cache_line_size() == 128 ? 128 : 64;
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_vhca_cq *cq;
+ int inlen, err, eqn;
+ void *cqc, *in;
+ __be64 *pas;
+ int vector;
+
+ cq = &tracker->cq;
+ ncqe = roundup_pow_of_two(ncqe);
+ err = mlx5_db_alloc_node(mdev, &cq->db, mdev->priv.numa_node);
+ if (err)
+ return err;
+
+ cq->ncqe = ncqe;
+ cq->mcq.set_ci_db = cq->db.db;
+ cq->mcq.arm_db = cq->db.db + 1;
+ cq->mcq.cqe_sz = cqe_size;
+ err = alloc_cq_frag_buf(mdev, &cq->buf, ncqe, cqe_size);
+ if (err)
+ goto err_db_free;
+
+ init_cq_frag_buf(&cq->buf);
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
+ cq->buf.frag_buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_buff;
+ }
+
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
+ err = mlx5_vector2eqn(mdev, vector, &eqn);
+ if (err)
+ goto err_vec;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, tracker->uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->buf.frag_buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+ mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
+ cq->mcq.comp = mlx5vf_cq_complete;
+ cq->mcq.event = mlx5vf_cq_event;
+ err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
+ if (err)
+ goto err_vec;
+
+ mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
+ cq->mcq.cons_index);
+ kvfree(in);
+ return 0;
+
+err_vec:
+ kvfree(in);
+err_buff:
+ mlx5_frag_buf_free(mdev, &cq->buf.frag_buf);
+err_db_free:
+ mlx5_db_free(mdev, &cq->db);
+ return err;
+}
+
+static struct mlx5_vhca_qp *
+mlx5vf_create_rc_qp(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_page_tracker *tracker, u32 max_recv_wr)
+{
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct mlx5_vhca_qp *qp;
+ u8 log_rq_stride;
+ u8 log_rq_sz;
+ void *qpc;
+ int inlen;
+ void *in;
+ int err;
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->rq.wqe_cnt = roundup_pow_of_two(max_recv_wr);
+ log_rq_stride = ilog2(MLX5_SEND_WQE_DS);
+ log_rq_sz = ilog2(qp->rq.wqe_cnt);
+ err = mlx5_db_alloc_node(mdev, &qp->db, mdev->priv.numa_node);
+ if (err)
+ goto err_free;
+
+ if (max_recv_wr) {
+ err = mlx5_frag_buf_alloc_node(mdev,
+ wq_get_byte_sz(log_rq_sz, log_rq_stride),
+ &qp->buf, mdev->priv.numa_node);
+ if (err)
+ goto err_db_free;
+ mlx5_init_fbc(qp->buf.frags, log_rq_stride, log_rq_sz, &qp->rq.fbc);
+ }
+
+ qp->rq.db = &qp->db.db[MLX5_RCV_DBR];
+ inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
+ qp->buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_in;
+ }
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, tracker->pdn);
+ MLX5_SET(qpc, qpc, uar_page, tracker->uar->index);
+ MLX5_SET(qpc, qpc, log_page_size,
+ qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
+ if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
+ MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ if (max_recv_wr) {
+ MLX5_SET(qpc, qpc, cqn_rcv, tracker->cq.mcq.cqn);
+ MLX5_SET(qpc, qpc, log_rq_stride, log_rq_stride - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, log_rq_sz);
+ MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+ mlx5_fill_page_frag_array(&qp->buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in,
+ in, pas));
+ } else {
+ MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
+ }
+
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ kvfree(in);
+ if (err)
+ goto err_in;
+
+ qp->qpn = MLX5_GET(create_qp_out, out, qpn);
+ return qp;
+
+err_in:
+ if (max_recv_wr)
+ mlx5_frag_buf_free(mdev, &qp->buf);
+err_db_free:
+ mlx5_db_free(mdev, &qp->db);
+err_free:
+ kfree(qp);
+ return ERR_PTR(err);
+}
+
+static void mlx5vf_post_recv(struct mlx5_vhca_qp *qp)
+{
+ struct mlx5_wqe_data_seg *data;
+ unsigned int ix;
+
+ WARN_ON(qp->rq.pc - qp->rq.cc >= qp->rq.wqe_cnt);
+ ix = qp->rq.pc & (qp->rq.wqe_cnt - 1);
+ data = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ix);
+ data->byte_count = cpu_to_be32(qp->max_msg_size);
+ data->lkey = cpu_to_be32(qp->recv_buf.mkey);
+ data->addr = cpu_to_be64(qp->recv_buf.next_rq_offset);
+ qp->rq.pc++;
+ /* Make sure that descriptors are written before doorbell record. */
+ dma_wmb();
+ *qp->rq.db = cpu_to_be32(qp->rq.pc & 0xffff);
+}
+
+static int mlx5vf_activate_qp(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp, u32 remote_qpn,
+ bool host_qp)
+{
+ u32 init_in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
+ u32 rtr_in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
+ u32 rts_in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
+ void *qpc;
+ int ret;
+
+ /* Init */
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, init_in, qpc);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED);
+ MLX5_SET(qpc, qpc, rre, 1);
+ MLX5_SET(qpc, qpc, rwe, 1);
+ MLX5_SET(rst2init_qp_in, init_in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, init_in, qpn, qp->qpn);
+ ret = mlx5_cmd_exec_in(mdev, rst2init_qp, init_in);
+ if (ret)
+ return ret;
+
+ if (host_qp) {
+ struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
+ int i;
+
+ for (i = 0; i < qp->rq.wqe_cnt; i++) {
+ mlx5vf_post_recv(qp);
+ recv_buf->next_rq_offset += qp->max_msg_size;
+ }
+ }
+
+ /* RTR */
+ qpc = MLX5_ADDR_OF(init2rtr_qp_in, rtr_in, qpc);
+ MLX5_SET(init2rtr_qp_in, rtr_in, qpn, qp->qpn);
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
+ MLX5_SET(qpc, qpc, log_msg_max, MLX5_CAP_GEN(mdev, log_max_msg));
+ MLX5_SET(qpc, qpc, remote_qpn, remote_qpn);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, primary_address_path.fl, 1);
+ MLX5_SET(qpc, qpc, min_rnr_nak, 1);
+ MLX5_SET(init2rtr_qp_in, rtr_in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, rtr_in, qpn, qp->qpn);
+ ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, rtr_in);
+ if (ret || host_qp)
+ return ret;
+
+ /* RTS */
+ qpc = MLX5_ADDR_OF(rtr2rts_qp_in, rts_in, qpc);
+ MLX5_SET(rtr2rts_qp_in, rts_in, qpn, qp->qpn);
+ MLX5_SET(qpc, qpc, retry_count, 7);
+ MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
+ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
+ MLX5_SET(rtr2rts_qp_in, rts_in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, rts_in, qpn, qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, rtr2rts_qp, rts_in);
+}
+
+static void mlx5vf_destroy_qp(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
+
+ mlx5_frag_buf_free(mdev, &qp->buf);
+ mlx5_db_free(mdev, &qp->db);
+ kfree(qp);
+}
+
+static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
+{
+ int i;
+
+ /* Undo alloc_pages_bulk_array() */
+ for (i = 0; i < recv_buf->npages; i++)
+ __free_page(recv_buf->page_list[i]);
+
+ kvfree(recv_buf->page_list);
+}
+
+static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
+ unsigned int npages)
+{
+ unsigned int filled = 0, done = 0;
+ int i;
+
+ recv_buf->page_list = kvcalloc(npages, sizeof(*recv_buf->page_list),
+ GFP_KERNEL);
+ if (!recv_buf->page_list)
+ return -ENOMEM;
+
+ for (;;) {
+ filled = alloc_pages_bulk_array(GFP_KERNEL, npages - done,
+ recv_buf->page_list + done);
+ if (!filled)
+ goto err;
+
+ done += filled;
+ if (done == npages)
+ break;
+ }
+
+ recv_buf->npages = npages;
+ return 0;
+
+err:
+ for (i = 0; i < npages; i++) {
+ if (recv_buf->page_list[i])
+ __free_page(recv_buf->page_list[i]);
+ }
+
+ kvfree(recv_buf->page_list);
+ return -ENOMEM;
+}
+
+static int register_dma_recv_pages(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_recv_buf *recv_buf)
+{
+ int i, j;
+
+ recv_buf->dma_addrs = kvcalloc(recv_buf->npages,
+ sizeof(*recv_buf->dma_addrs),
+ GFP_KERNEL);
+ if (!recv_buf->dma_addrs)
+ return -ENOMEM;
+
+ for (i = 0; i < recv_buf->npages; i++) {
+ recv_buf->dma_addrs[i] = dma_map_page(mdev->device,
+ recv_buf->page_list[i],
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(mdev->device, recv_buf->dma_addrs[i]))
+ goto error;
+ }
+ return 0;
+
+error:
+ for (j = 0; j < i; j++)
+ dma_unmap_single(mdev->device, recv_buf->dma_addrs[j],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ kvfree(recv_buf->dma_addrs);
+ return -ENOMEM;
+}
+
+static void unregister_dma_recv_pages(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_recv_buf *recv_buf)
+{
+ int i;
+
+ for (i = 0; i < recv_buf->npages; i++)
+ dma_unmap_single(mdev->device, recv_buf->dma_addrs[i],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ kvfree(recv_buf->dma_addrs);
+}
+
+static void mlx5vf_free_qp_recv_resources(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp)
+{
+ struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
+
+ mlx5_core_destroy_mkey(mdev, recv_buf->mkey);
+ unregister_dma_recv_pages(mdev, recv_buf);
+ free_recv_pages(&qp->recv_buf);
+}
+
+static int mlx5vf_alloc_qp_recv_resources(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp, u32 pdn,
+ u64 rq_size)
+{
+ unsigned int npages = DIV_ROUND_UP_ULL(rq_size, PAGE_SIZE);
+ struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
+ int err;
+
+ err = alloc_recv_pages(recv_buf, npages);
+ if (err < 0)
+ return err;
+
+ err = register_dma_recv_pages(mdev, recv_buf);
+ if (err)
+ goto end;
+
+ err = _create_mkey(mdev, pdn, NULL, recv_buf, &recv_buf->mkey);
+ if (err)
+ goto err_create_mkey;
+
+ return 0;
+
+err_create_mkey:
+ unregister_dma_recv_pages(mdev, recv_buf);
+end:
+ free_recv_pages(recv_buf);
+ return err;
+}
+
+static void
+_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev)
+{
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ struct mlx5_core_dev *mdev = mvdev->mdev;
+
+ lockdep_assert_held(&mvdev->state_mutex);
+
+ if (!mvdev->log_active)
+ return;
+
+ WARN_ON(mvdev->mdev_detach);
+
+ mlx5_eq_notifier_unregister(mdev, &tracker->nb);
+ mlx5vf_cmd_destroy_tracker(mdev, tracker->id);
+ mlx5vf_destroy_qp(mdev, tracker->fw_qp);
+ mlx5vf_free_qp_recv_resources(mdev, tracker->host_qp);
+ mlx5vf_destroy_qp(mdev, tracker->host_qp);
+ mlx5vf_destroy_cq(mdev, &tracker->cq);
+ mlx5_core_dealloc_pd(mdev, tracker->pdn);
+ mlx5_put_uars_page(mdev, tracker->uar);
+ mvdev->log_active = false;
+}
+
+int mlx5vf_stop_page_tracker(struct vfio_device *vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+
+ mutex_lock(&mvdev->state_mutex);
+ if (!mvdev->log_active)
+ goto end;
+
+ _mlx5vf_free_page_tracker_resources(mvdev);
+ mvdev->log_active = false;
+end:
+ mlx5vf_state_mutex_unlock(mvdev);
+ return 0;
+}
+
+int mlx5vf_start_page_tracker(struct vfio_device *vdev,
+ struct rb_root_cached *ranges, u32 nnodes,
+ u64 *page_size)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ u8 log_tracked_page = ilog2(*page_size);
+ struct mlx5_vhca_qp *host_qp;
+ struct mlx5_vhca_qp *fw_qp;
+ struct mlx5_core_dev *mdev;
+ u32 max_msg_size = PAGE_SIZE;
+ u64 rq_size = SZ_2M;
+ u32 max_recv_wr;
+ int err;
+
+ mutex_lock(&mvdev->state_mutex);
+ if (mvdev->mdev_detach) {
+ err = -ENOTCONN;
+ goto end;
+ }
+
+ if (mvdev->log_active) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ mdev = mvdev->mdev;
+ memset(tracker, 0, sizeof(*tracker));
+ tracker->uar = mlx5_get_uars_page(mdev);
+ if (IS_ERR(tracker->uar)) {
+ err = PTR_ERR(tracker->uar);
+ goto end;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &tracker->pdn);
+ if (err)
+ goto err_uar;
+
+ max_recv_wr = DIV_ROUND_UP_ULL(rq_size, max_msg_size);
+ err = mlx5vf_create_cq(mdev, tracker, max_recv_wr);
+ if (err)
+ goto err_dealloc_pd;
+
+ host_qp = mlx5vf_create_rc_qp(mdev, tracker, max_recv_wr);
+ if (IS_ERR(host_qp)) {
+ err = PTR_ERR(host_qp);
+ goto err_cq;
+ }
+
+ host_qp->max_msg_size = max_msg_size;
+ if (log_tracked_page < MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_min_page_size)) {
+ log_tracked_page = MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_min_page_size);
+ } else if (log_tracked_page > MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_max_page_size)) {
+ log_tracked_page = MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_max_page_size);
+ }
+
+ host_qp->tracked_page_size = (1ULL << log_tracked_page);
+ err = mlx5vf_alloc_qp_recv_resources(mdev, host_qp, tracker->pdn,
+ rq_size);
+ if (err)
+ goto err_host_qp;
+
+ fw_qp = mlx5vf_create_rc_qp(mdev, tracker, 0);
+ if (IS_ERR(fw_qp)) {
+ err = PTR_ERR(fw_qp);
+ goto err_recv_resources;
+ }
+
+ err = mlx5vf_activate_qp(mdev, host_qp, fw_qp->qpn, true);
+ if (err)
+ goto err_activate;
+
+ err = mlx5vf_activate_qp(mdev, fw_qp, host_qp->qpn, false);
+ if (err)
+ goto err_activate;
+
+ tracker->host_qp = host_qp;
+ tracker->fw_qp = fw_qp;
+ err = mlx5vf_create_tracker(mdev, mvdev, ranges, nnodes);
+ if (err)
+ goto err_activate;
+
+ MLX5_NB_INIT(&tracker->nb, mlx5vf_event_notifier, NOTIFY_ANY);
+ mlx5_eq_notifier_register(mdev, &tracker->nb);
+ *page_size = host_qp->tracked_page_size;
+ mvdev->log_active = true;
+ mlx5vf_state_mutex_unlock(mvdev);
+ return 0;
+
+err_activate:
+ mlx5vf_destroy_qp(mdev, fw_qp);
+err_recv_resources:
+ mlx5vf_free_qp_recv_resources(mdev, host_qp);
+err_host_qp:
+ mlx5vf_destroy_qp(mdev, host_qp);
+err_cq:
+ mlx5vf_destroy_cq(mdev, &tracker->cq);
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, tracker->pdn);
+err_uar:
+ mlx5_put_uars_page(mdev, tracker->uar);
+end:
+ mlx5vf_state_mutex_unlock(mvdev);
+ return err;
+}
+
+static void
+set_report_output(u32 size, int index, struct mlx5_vhca_qp *qp,
+ struct iova_bitmap *dirty)
+{
+ u32 entry_size = MLX5_ST_SZ_BYTES(page_track_report_entry);
+ u32 nent = size / entry_size;
+ struct page *page;
+ u64 addr;
+ u64 *buf;
+ int i;
+
+ if (WARN_ON(index >= qp->recv_buf.npages ||
+ (nent > qp->max_msg_size / entry_size)))
+ return;
+
+ page = qp->recv_buf.page_list[index];
+ buf = kmap_local_page(page);
+ for (i = 0; i < nent; i++) {
+ addr = MLX5_GET(page_track_report_entry, buf + i,
+ dirty_address_low);
+ addr |= (u64)MLX5_GET(page_track_report_entry, buf + i,
+ dirty_address_high) << 32;
+ iova_bitmap_set(dirty, addr, qp->tracked_page_size);
+ }
+ kunmap_local(buf);
+}
+
+static void
+mlx5vf_rq_cqe(struct mlx5_vhca_qp *qp, struct mlx5_cqe64 *cqe,
+ struct iova_bitmap *dirty, int *tracker_status)
+{
+ u32 size;
+ int ix;
+
+ qp->rq.cc++;
+ *tracker_status = be32_to_cpu(cqe->immediate) >> 28;
+ size = be32_to_cpu(cqe->byte_cnt);
+ ix = be16_to_cpu(cqe->wqe_counter) & (qp->rq.wqe_cnt - 1);
+
+ /* zero length CQE, no data */
+ WARN_ON(!size && *tracker_status == MLX5_PAGE_TRACK_STATE_REPORTING);
+ if (size)
+ set_report_output(size, ix, qp, dirty);
+
+ qp->recv_buf.next_rq_offset = ix * qp->max_msg_size;
+ mlx5vf_post_recv(qp);
+}
+
+static void *get_cqe(struct mlx5_vhca_cq *cq, int n)
+{
+ return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
+}
+
+static struct mlx5_cqe64 *get_sw_cqe(struct mlx5_vhca_cq *cq, int n)
+{
+ void *cqe = get_cqe(cq, n & (cq->ncqe - 1));
+ struct mlx5_cqe64 *cqe64;
+
+ cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
+
+ if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
+ !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ncqe)))) {
+ return cqe64;
+ } else {
+ return NULL;
+ }
+}
+
+static int
+mlx5vf_cq_poll_one(struct mlx5_vhca_cq *cq, struct mlx5_vhca_qp *qp,
+ struct iova_bitmap *dirty, int *tracker_status)
+{
+ struct mlx5_cqe64 *cqe;
+ u8 opcode;
+
+ cqe = get_sw_cqe(cq, cq->mcq.cons_index);
+ if (!cqe)
+ return CQ_EMPTY;
+
+ ++cq->mcq.cons_index;
+ /*
+ * Make sure we read CQ entry contents after we've checked the
+ * ownership bit.
+ */
+ rmb();
+ opcode = get_cqe_opcode(cqe);
+ switch (opcode) {
+ case MLX5_CQE_RESP_SEND_IMM:
+ mlx5vf_rq_cqe(qp, cqe, dirty, tracker_status);
+ return CQ_OK;
+ default:
+ return CQ_POLL_ERR;
+ }
+}
+
+int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
+ unsigned long length,
+ struct iova_bitmap *dirty)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ struct mlx5_vhca_cq *cq = &tracker->cq;
+ struct mlx5_core_dev *mdev;
+ int poll_err, err;
+
+ mutex_lock(&mvdev->state_mutex);
+ if (!mvdev->log_active) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ if (mvdev->mdev_detach) {
+ err = -ENOTCONN;
+ goto end;
+ }
+
+ mdev = mvdev->mdev;
+ err = mlx5vf_cmd_modify_tracker(mdev, tracker->id, iova, length,
+ MLX5_PAGE_TRACK_STATE_REPORTING);
+ if (err)
+ goto end;
+
+ tracker->status = MLX5_PAGE_TRACK_STATE_REPORTING;
+ while (tracker->status == MLX5_PAGE_TRACK_STATE_REPORTING &&
+ !tracker->is_err) {
+ poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp, dirty,
+ &tracker->status);
+ if (poll_err == CQ_EMPTY) {
+ mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
+ cq->mcq.cons_index);
+ poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp,
+ dirty, &tracker->status);
+ if (poll_err == CQ_EMPTY) {
+ wait_for_completion(&mvdev->tracker_comp);
+ continue;
+ }
+ }
+ if (poll_err == CQ_POLL_ERR) {
+ err = -EIO;
+ goto end;
+ }
+ mlx5_cq_set_ci(&cq->mcq);
+ }
+
+ if (tracker->status == MLX5_PAGE_TRACK_STATE_ERROR)
+ tracker->is_err = true;
+
+ if (tracker->is_err)
+ err = -EIO;
+end:
+ mlx5vf_state_mutex_unlock(mvdev);
+ return err;
+}
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 8208f4701a90..921d5720a1e5 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -9,6 +9,8 @@
#include <linux/kernel.h>
#include <linux/vfio_pci_core.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/qp.h>
struct mlx5vf_async_data {
struct mlx5_async_work cb_work;
@@ -39,6 +41,56 @@ struct mlx5_vf_migration_file {
struct mlx5vf_async_data async_data;
};
+struct mlx5_vhca_cq_buf {
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
+ int cqe_size;
+ int nent;
+};
+
+struct mlx5_vhca_cq {
+ struct mlx5_vhca_cq_buf buf;
+ struct mlx5_db db;
+ struct mlx5_core_cq mcq;
+ size_t ncqe;
+};
+
+struct mlx5_vhca_recv_buf {
+ u32 npages;
+ struct page **page_list;
+ dma_addr_t *dma_addrs;
+ u32 next_rq_offset;
+ u32 mkey;
+};
+
+struct mlx5_vhca_qp {
+ struct mlx5_frag_buf buf;
+ struct mlx5_db db;
+ struct mlx5_vhca_recv_buf recv_buf;
+ u32 tracked_page_size;
+ u32 max_msg_size;
+ u32 qpn;
+ struct {
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int wqe_cnt;
+ __be32 *db;
+ struct mlx5_frag_buf_ctrl fbc;
+ } rq;
+};
+
+struct mlx5_vhca_page_tracker {
+ u32 id;
+ u32 pdn;
+ u8 is_err:1;
+ struct mlx5_uars_page *uar;
+ struct mlx5_vhca_cq cq;
+ struct mlx5_vhca_qp *host_qp;
+ struct mlx5_vhca_qp *fw_qp;
+ struct mlx5_nb nb;
+ int status;
+};
+
struct mlx5vf_pci_core_device {
struct vfio_pci_core_device core_device;
int vf_id;
@@ -46,6 +98,8 @@ struct mlx5vf_pci_core_device {
u8 migrate_cap:1;
u8 deferred_reset:1;
u8 mdev_detach:1;
+ u8 log_active:1;
+ struct completion tracker_comp;
/* protect migration state */
struct mutex state_mutex;
enum vfio_device_mig_state mig_state;
@@ -53,6 +107,7 @@ struct mlx5vf_pci_core_device {
spinlock_t reset_lock;
struct mlx5_vf_migration_file *resuming_migf;
struct mlx5_vf_migration_file *saving_migf;
+ struct mlx5_vhca_page_tracker tracker;
struct workqueue_struct *cb_wq;
struct notifier_block nb;
struct mlx5_core_dev *mdev;
@@ -63,7 +118,8 @@ int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size);
void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
- const struct vfio_migration_ops *mig_ops);
+ const struct vfio_migration_ops *mig_ops,
+ const struct vfio_log_ops *log_ops);
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
@@ -73,4 +129,9 @@ int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
+int mlx5vf_start_page_tracker(struct vfio_device *vdev,
+ struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
+int mlx5vf_stop_page_tracker(struct vfio_device *vdev);
+int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
+ unsigned long length, struct iova_bitmap *dirty);
#endif /* MLX5_VFIO_CMD_H */
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index a9b63d15c5d3..fd6ccb8454a2 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -579,8 +579,41 @@ static const struct vfio_migration_ops mlx5vf_pci_mig_ops = {
.migration_get_state = mlx5vf_pci_get_device_state,
};
+static const struct vfio_log_ops mlx5vf_pci_log_ops = {
+ .log_start = mlx5vf_start_page_tracker,
+ .log_stop = mlx5vf_stop_page_tracker,
+ .log_read_and_clear = mlx5vf_tracker_read_and_clear,
+};
+
+static int mlx5vf_pci_init_dev(struct vfio_device *core_vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev,
+ struct mlx5vf_pci_core_device, core_device.vdev);
+ int ret;
+
+ ret = vfio_pci_core_init_dev(core_vdev);
+ if (ret)
+ return ret;
+
+ mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops,
+ &mlx5vf_pci_log_ops);
+
+ return 0;
+}
+
+static void mlx5vf_pci_release_dev(struct vfio_device *core_vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev,
+ struct mlx5vf_pci_core_device, core_device.vdev);
+
+ mlx5vf_cmd_remove_migratable(mvdev);
+ vfio_pci_core_release_dev(core_vdev);
+}
+
static const struct vfio_device_ops mlx5vf_pci_ops = {
.name = "mlx5-vfio-pci",
+ .init = mlx5vf_pci_init_dev,
+ .release = mlx5vf_pci_release_dev,
.open_device = mlx5vf_pci_open_device,
.close_device = mlx5vf_pci_close_device,
.ioctl = vfio_pci_core_ioctl,
@@ -598,21 +631,19 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
struct mlx5vf_pci_core_device *mvdev;
int ret;
- mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
- if (!mvdev)
- return -ENOMEM;
- vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
- mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops);
+ mvdev = vfio_alloc_device(mlx5vf_pci_core_device, core_device.vdev,
+ &pdev->dev, &mlx5vf_pci_ops);
+ if (IS_ERR(mvdev))
+ return PTR_ERR(mvdev);
+
dev_set_drvdata(&pdev->dev, &mvdev->core_device);
ret = vfio_pci_core_register_device(&mvdev->core_device);
if (ret)
- goto out_free;
+ goto out_put_vdev;
return 0;
-out_free:
- mlx5vf_cmd_remove_migratable(mvdev);
- vfio_pci_core_uninit_device(&mvdev->core_device);
- kfree(mvdev);
+out_put_vdev:
+ vfio_put_device(&mvdev->core_device.vdev);
return ret;
}
@@ -621,9 +652,7 @@ static void mlx5vf_pci_remove(struct pci_dev *pdev)
struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
vfio_pci_core_unregister_device(&mvdev->core_device);
- mlx5vf_cmd_remove_migratable(mvdev);
- vfio_pci_core_uninit_device(&mvdev->core_device);
- kfree(mvdev);
+ vfio_put_device(&mvdev->core_device.vdev);
}
static const struct pci_device_id mlx5vf_pci_table[] = {
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 4d1a97415a27..1d4919edfbde 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -25,7 +25,7 @@
#include <linux/types.h>
#include <linux/uaccess.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
@@ -127,6 +127,8 @@ static int vfio_pci_open_device(struct vfio_device *core_vdev)
static const struct vfio_device_ops vfio_pci_ops = {
.name = "vfio-pci",
+ .init = vfio_pci_core_init_dev,
+ .release = vfio_pci_core_release_dev,
.open_device = vfio_pci_open_device,
.close_device = vfio_pci_core_close_device,
.ioctl = vfio_pci_core_ioctl,
@@ -146,20 +148,19 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (vfio_pci_is_denylisted(pdev))
return -EINVAL;
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
- vfio_pci_core_init_device(vdev, pdev, &vfio_pci_ops);
+ vdev = vfio_alloc_device(vfio_pci_core_device, vdev, &pdev->dev,
+ &vfio_pci_ops);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
dev_set_drvdata(&pdev->dev, vdev);
ret = vfio_pci_core_register_device(vdev);
if (ret)
- goto out_free;
+ goto out_put_vdev;
return 0;
-out_free:
- vfio_pci_core_uninit_device(vdev);
- kfree(vdev);
+out_put_vdev:
+ vfio_put_device(&vdev->vdev);
return ret;
}
@@ -168,8 +169,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
vfio_pci_core_unregister_device(vdev);
- vfio_pci_core_uninit_device(vdev);
- kfree(vdev);
+ vfio_put_device(&vdev->vdev);
}
static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 442d3ba4122b..4a350421c5f6 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -26,7 +26,7 @@
#include <linux/vfio.h>
#include <linux/slab.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
/* Fake capability ID for standard config space */
#define PCI_CAP_ID_BASIC 0
@@ -1166,7 +1166,7 @@ static int vfio_msi_config_write(struct vfio_pci_core_device *vdev, int pos,
flags = le16_to_cpu(*pflags);
/* MSI is enabled via ioctl */
- if (!is_msi(vdev))
+ if (vdev->irq_type != VFIO_PCI_MSI_IRQ_INDEX)
flags &= ~PCI_MSI_FLAGS_ENABLE;
/* Check queue size */
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index c8d3b0450fb3..badc9d828cac 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -28,7 +28,7 @@
#include <linux/nospec.h>
#include <linux/sched/mm.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "core driver for VFIO based PCI devices"
@@ -41,6 +41,23 @@ static bool disable_idle_d3;
static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
static LIST_HEAD(vfio_pci_sriov_pfs);
+struct vfio_pci_dummy_resource {
+ struct resource resource;
+ int index;
+ struct list_head res_next;
+};
+
+struct vfio_pci_vf_token {
+ struct mutex lock;
+ uuid_t uuid;
+ int users;
+};
+
+struct vfio_pci_mmap_vma {
+ struct vm_area_struct *vma;
+ struct list_head vma_next;
+};
+
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -260,16 +277,189 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat
return ret;
}
+static int vfio_pci_runtime_pm_entry(struct vfio_pci_core_device *vdev,
+ struct eventfd_ctx *efdctx)
+{
+ /*
+ * The vdev power related flags are protected with 'memory_lock'
+ * semaphore.
+ */
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ if (vdev->pm_runtime_engaged) {
+ up_write(&vdev->memory_lock);
+ return -EINVAL;
+ }
+
+ vdev->pm_runtime_engaged = true;
+ vdev->pm_wake_eventfd_ctx = efdctx;
+ pm_runtime_put_noidle(&vdev->pdev->dev);
+ up_write(&vdev->memory_lock);
+
+ return 0;
+}
+
+static int vfio_pci_core_pm_entry(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ int ret;
+
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
+ if (ret != 1)
+ return ret;
+
+ /*
+ * Inside vfio_pci_runtime_pm_entry(), only the runtime PM usage count
+ * will be decremented. The pm_runtime_put() will be invoked again
+ * while returning from the ioctl and then the device can go into
+ * runtime suspended state.
+ */
+ return vfio_pci_runtime_pm_entry(vdev, NULL);
+}
+
+static int vfio_pci_core_pm_entry_with_wakeup(
+ struct vfio_device *device, u32 flags,
+ struct vfio_device_low_power_entry_with_wakeup __user *arg,
+ size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ struct vfio_device_low_power_entry_with_wakeup entry;
+ struct eventfd_ctx *efdctx;
+ int ret;
+
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
+ sizeof(entry));
+ if (ret != 1)
+ return ret;
+
+ if (copy_from_user(&entry, arg, sizeof(entry)))
+ return -EFAULT;
+
+ if (entry.wakeup_eventfd < 0)
+ return -EINVAL;
+
+ efdctx = eventfd_ctx_fdget(entry.wakeup_eventfd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+
+ ret = vfio_pci_runtime_pm_entry(vdev, efdctx);
+ if (ret)
+ eventfd_ctx_put(efdctx);
+
+ return ret;
+}
+
+static void __vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
+{
+ if (vdev->pm_runtime_engaged) {
+ vdev->pm_runtime_engaged = false;
+ pm_runtime_get_noresume(&vdev->pdev->dev);
+
+ if (vdev->pm_wake_eventfd_ctx) {
+ eventfd_ctx_put(vdev->pm_wake_eventfd_ctx);
+ vdev->pm_wake_eventfd_ctx = NULL;
+ }
+ }
+}
+
+static void vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
+{
+ /*
+ * The vdev power related flags are protected with 'memory_lock'
+ * semaphore.
+ */
+ down_write(&vdev->memory_lock);
+ __vfio_pci_runtime_pm_exit(vdev);
+ up_write(&vdev->memory_lock);
+}
+
+static int vfio_pci_core_pm_exit(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ int ret;
+
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
+ if (ret != 1)
+ return ret;
+
+ /*
+ * The device is always in the active state here due to pm wrappers
+ * around ioctls. If the device had entered a low power state and
+ * pm_wake_eventfd_ctx is valid, vfio_pci_core_runtime_resume() has
+ * already signaled the eventfd and exited low power mode itself.
+ * pm_runtime_engaged protects the redundant call here.
+ */
+ vfio_pci_runtime_pm_exit(vdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int vfio_pci_core_runtime_suspend(struct device *dev)
+{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
+
+ down_write(&vdev->memory_lock);
+ /*
+ * The user can move the device into D3hot state before invoking
+ * power management IOCTL. Move the device into D0 state here and then
+ * the pci-driver core runtime PM suspend function will move the device
+ * into the low power state. Also, for the devices which have
+ * NoSoftRst-, it will help in restoring the original state
+ * (saved locally in 'vdev->pm_save').
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
+ up_write(&vdev->memory_lock);
+
+ /*
+ * If INTx is enabled, then mask INTx before going into the runtime
+ * suspended state and unmask the same in the runtime resume.
+ * If INTx has already been masked by the user, then
+ * vfio_pci_intx_mask() will return false and in that case, INTx
+ * should not be unmasked in the runtime resume.
+ */
+ vdev->pm_intx_masked = ((vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) &&
+ vfio_pci_intx_mask(vdev));
+
+ return 0;
+}
+
+static int vfio_pci_core_runtime_resume(struct device *dev)
+{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
+
+ /*
+ * Resume with a pm_wake_eventfd_ctx signals the eventfd and exit
+ * low power mode.
+ */
+ down_write(&vdev->memory_lock);
+ if (vdev->pm_wake_eventfd_ctx) {
+ eventfd_signal(vdev->pm_wake_eventfd_ctx, 1);
+ __vfio_pci_runtime_pm_exit(vdev);
+ }
+ up_write(&vdev->memory_lock);
+
+ if (vdev->pm_intx_masked)
+ vfio_pci_intx_unmask(vdev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
/*
- * The dev_pm_ops needs to be provided to make pci-driver runtime PM working,
- * so use structure without any callbacks.
- *
* The pci-driver core runtime PM routines always save the device state
* before going into suspended state. If the device is going into low power
* state with only with runtime PM ops, then no explicit handling is needed
* for the devices which have NoSoftRst-.
*/
-static const struct dev_pm_ops vfio_pci_core_pm_ops = { };
+static const struct dev_pm_ops vfio_pci_core_pm_ops = {
+ SET_RUNTIME_PM_OPS(vfio_pci_core_runtime_suspend,
+ vfio_pci_core_runtime_resume,
+ NULL)
+};
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
{
@@ -371,6 +561,18 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
/*
* This function can be invoked while the power state is non-D0.
+ * This non-D0 power state can be with or without runtime PM.
+ * vfio_pci_runtime_pm_exit() will internally increment the usage
+ * count corresponding to pm_runtime_put() called during low power
+ * feature entry and then pm_runtime_resume() will wake up the device,
+ * if the device has already gone into the suspended state. Otherwise,
+ * the vfio_pci_set_power_state() will change the device power state
+ * to D0.
+ */
+ vfio_pci_runtime_pm_exit(vdev);
+ pm_runtime_resume(&pdev->dev);
+
+ /*
* This function calls __pci_reset_function_locked() which internally
* can use pci_pm_reset() for the function reset. pci_pm_reset() will
* fail if the power state is non-D0. Also, for the devices which
@@ -645,10 +847,10 @@ static int msix_mmappable_cap(struct vfio_pci_core_device *vdev,
return vfio_info_add_capability(caps, &header, sizeof(header));
}
-int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
- unsigned int type, unsigned int subtype,
- const struct vfio_pci_regops *ops,
- size_t size, u32 flags, void *data)
+int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data)
{
struct vfio_pci_region *region;
@@ -670,508 +872,532 @@ int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
return 0;
}
-EXPORT_SYMBOL_GPL(vfio_pci_register_dev_region);
+EXPORT_SYMBOL_GPL(vfio_pci_core_register_dev_region);
-long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
- unsigned long arg)
+static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
+ struct vfio_device_info __user *arg)
{
- struct vfio_pci_core_device *vdev =
- container_of(core_vdev, struct vfio_pci_core_device, vdev);
- unsigned long minsz;
-
- if (cmd == VFIO_DEVICE_GET_INFO) {
- struct vfio_device_info info;
- struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- unsigned long capsz;
- int ret;
-
- minsz = offsetofend(struct vfio_device_info, num_irqs);
+ unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
+ struct vfio_device_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ unsigned long capsz;
+ int ret;
- /* For backward compatibility, cannot require this */
- capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
+ /* For backward compatibility, cannot require this */
+ capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
- if (info.argsz < minsz)
- return -EINVAL;
+ if (info.argsz < minsz)
+ return -EINVAL;
- if (info.argsz >= capsz) {
- minsz = capsz;
- info.cap_offset = 0;
- }
+ if (info.argsz >= capsz) {
+ minsz = capsz;
+ info.cap_offset = 0;
+ }
- info.flags = VFIO_DEVICE_FLAGS_PCI;
+ info.flags = VFIO_DEVICE_FLAGS_PCI;
- if (vdev->reset_works)
- info.flags |= VFIO_DEVICE_FLAGS_RESET;
+ if (vdev->reset_works)
+ info.flags |= VFIO_DEVICE_FLAGS_RESET;
- info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
- info.num_irqs = VFIO_PCI_NUM_IRQS;
+ info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
+ info.num_irqs = VFIO_PCI_NUM_IRQS;
- ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
- if (ret && ret != -ENODEV) {
- pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n");
- return ret;
- }
+ ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
+ if (ret && ret != -ENODEV) {
+ pci_warn(vdev->pdev,
+ "Failed to setup zPCI info capabilities\n");
+ return ret;
+ }
- if (caps.size) {
- info.flags |= VFIO_DEVICE_FLAGS_CAPS;
- if (info.argsz < sizeof(info) + caps.size) {
- info.argsz = sizeof(info) + caps.size;
- } else {
- vfio_info_cap_shift(&caps, sizeof(info));
- if (copy_to_user((void __user *)arg +
- sizeof(info), caps.buf,
- caps.size)) {
- kfree(caps.buf);
- return -EFAULT;
- }
- info.cap_offset = sizeof(info);
+ if (caps.size) {
+ info.flags |= VFIO_DEVICE_FLAGS_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user(arg + 1, caps.buf, caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
}
-
- kfree(caps.buf);
+ info.cap_offset = sizeof(*arg);
}
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
+ kfree(caps.buf);
+ }
- } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
- struct pci_dev *pdev = vdev->pdev;
- struct vfio_region_info info;
- struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- int i, ret;
+ return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
+}
- minsz = offsetofend(struct vfio_region_info, offset);
+static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
+ struct vfio_region_info __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_region_info, offset);
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_region_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ int i, ret;
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
- if (info.argsz < minsz)
- return -EINVAL;
+ if (info.argsz < minsz)
+ return -EINVAL;
- switch (info.index) {
- case VFIO_PCI_CONFIG_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = pdev->cfg_size;
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
+ switch (info.index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = pdev->cfg_size;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = pci_resource_len(pdev, info.index);
+ if (!info.size) {
+ info.flags = 0;
break;
- case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = pci_resource_len(pdev, info.index);
- if (!info.size) {
- info.flags = 0;
- break;
- }
+ }
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
- if (vdev->bar_mmap_supported[info.index]) {
- info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
- if (info.index == vdev->msix_bar) {
- ret = msix_mmappable_cap(vdev, &caps);
- if (ret)
- return ret;
- }
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ if (vdev->bar_mmap_supported[info.index]) {
+ info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
+ if (info.index == vdev->msix_bar) {
+ ret = msix_mmappable_cap(vdev, &caps);
+ if (ret)
+ return ret;
}
+ }
- break;
- case VFIO_PCI_ROM_REGION_INDEX:
- {
- void __iomem *io;
- size_t size;
- u16 cmd;
-
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.flags = 0;
+ break;
+ case VFIO_PCI_ROM_REGION_INDEX: {
+ void __iomem *io;
+ size_t size;
+ u16 cmd;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = 0;
+
+ /* Report the BAR size, not the ROM size */
+ info.size = pci_resource_len(pdev, info.index);
+ if (!info.size) {
+ /* Shadow ROMs appear as PCI option ROMs */
+ if (pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW)
+ info.size = 0x20000;
+ else
+ break;
+ }
- /* Report the BAR size, not the ROM size */
- info.size = pci_resource_len(pdev, info.index);
- if (!info.size) {
- /* Shadow ROMs appear as PCI option ROMs */
- if (pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW)
- info.size = 0x20000;
- else
- break;
- }
+ /*
+ * Is it really there? Enable memory decode for implicit access
+ * in pci_map_rom().
+ */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ io = pci_map_rom(pdev, &size);
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_unmap_rom(pdev, io);
+ } else {
+ info.size = 0;
+ }
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
- /*
- * Is it really there? Enable memory decode for
- * implicit access in pci_map_rom().
- */
- cmd = vfio_pci_memory_lock_and_enable(vdev);
- io = pci_map_rom(pdev, &size);
- if (io) {
- info.flags = VFIO_REGION_INFO_FLAG_READ;
- pci_unmap_rom(pdev, io);
- } else {
- info.size = 0;
- }
- vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ break;
+ }
+ case VFIO_PCI_VGA_REGION_INDEX:
+ if (!vdev->has_vga)
+ return -EINVAL;
- break;
- }
- case VFIO_PCI_VGA_REGION_INDEX:
- if (!vdev->has_vga)
- return -EINVAL;
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0xc0000;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = 0xc0000;
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ default: {
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1
+ };
- break;
- default:
- {
- struct vfio_region_info_cap_type cap_type = {
- .header.id = VFIO_REGION_INFO_CAP_TYPE,
- .header.version = 1 };
+ if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
+ info.index = array_index_nospec(
+ info.index, VFIO_PCI_NUM_REGIONS + vdev->num_regions);
- if (info.index >=
- VFIO_PCI_NUM_REGIONS + vdev->num_regions)
- return -EINVAL;
- info.index = array_index_nospec(info.index,
- VFIO_PCI_NUM_REGIONS +
- vdev->num_regions);
+ i = info.index - VFIO_PCI_NUM_REGIONS;
- i = info.index - VFIO_PCI_NUM_REGIONS;
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vdev->region[i].size;
+ info.flags = vdev->region[i].flags;
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vdev->region[i].size;
- info.flags = vdev->region[i].flags;
+ cap_type.type = vdev->region[i].type;
+ cap_type.subtype = vdev->region[i].subtype;
- cap_type.type = vdev->region[i].type;
- cap_type.subtype = vdev->region[i].subtype;
+ ret = vfio_info_add_capability(&caps, &cap_type.header,
+ sizeof(cap_type));
+ if (ret)
+ return ret;
- ret = vfio_info_add_capability(&caps, &cap_type.header,
- sizeof(cap_type));
+ if (vdev->region[i].ops->add_capability) {
+ ret = vdev->region[i].ops->add_capability(
+ vdev, &vdev->region[i], &caps);
if (ret)
return ret;
-
- if (vdev->region[i].ops->add_capability) {
- ret = vdev->region[i].ops->add_capability(vdev,
- &vdev->region[i], &caps);
- if (ret)
- return ret;
- }
- }
}
+ }
+ }
- if (caps.size) {
- info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
- if (info.argsz < sizeof(info) + caps.size) {
- info.argsz = sizeof(info) + caps.size;
- info.cap_offset = 0;
- } else {
- vfio_info_cap_shift(&caps, sizeof(info));
- if (copy_to_user((void __user *)arg +
- sizeof(info), caps.buf,
- caps.size)) {
- kfree(caps.buf);
- return -EFAULT;
- }
- info.cap_offset = sizeof(info);
+ if (caps.size) {
+ info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ info.cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user(arg + 1, caps.buf, caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
}
-
- kfree(caps.buf);
+ info.cap_offset = sizeof(*arg);
}
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
+ kfree(caps.buf);
+ }
- } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
- struct vfio_irq_info info;
+ return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
+}
- minsz = offsetofend(struct vfio_irq_info, count);
+static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
+ struct vfio_irq_info __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_irq_info, count);
+ struct vfio_irq_info info;
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
- if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
- return -EINVAL;
+ if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
+ return -EINVAL;
- switch (info.index) {
- case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
- case VFIO_PCI_REQ_IRQ_INDEX:
+ switch (info.index) {
+ case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+ case VFIO_PCI_REQ_IRQ_INDEX:
+ break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ if (pci_is_pcie(vdev->pdev))
break;
- case VFIO_PCI_ERR_IRQ_INDEX:
- if (pci_is_pcie(vdev->pdev))
- break;
- fallthrough;
- default:
- return -EINVAL;
- }
-
- info.flags = VFIO_IRQ_INFO_EVENTFD;
-
- info.count = vfio_pci_get_irq_count(vdev, info.index);
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
- if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
- info.flags |= (VFIO_IRQ_INFO_MASKABLE |
- VFIO_IRQ_INFO_AUTOMASKED);
- else
- info.flags |= VFIO_IRQ_INFO_NORESIZE;
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
+ info.count = vfio_pci_get_irq_count(vdev, info.index);
- } else if (cmd == VFIO_DEVICE_SET_IRQS) {
- struct vfio_irq_set hdr;
- u8 *data = NULL;
- int max, ret = 0;
- size_t data_size = 0;
+ if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
+ info.flags |=
+ (VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED);
+ else
+ info.flags |= VFIO_IRQ_INFO_NORESIZE;
- minsz = offsetofend(struct vfio_irq_set, count);
+ return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
+}
- if (copy_from_user(&hdr, (void __user *)arg, minsz))
- return -EFAULT;
+static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev,
+ struct vfio_irq_set __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_irq_set, count);
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int max, ret = 0;
+ size_t data_size = 0;
- max = vfio_pci_get_irq_count(vdev, hdr.index);
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
- ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
- VFIO_PCI_NUM_IRQS, &data_size);
- if (ret)
- return ret;
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
- if (data_size) {
- data = memdup_user((void __user *)(arg + minsz),
- data_size);
- if (IS_ERR(data))
- return PTR_ERR(data);
- }
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS,
+ &data_size);
+ if (ret)
+ return ret;
- mutex_lock(&vdev->igate);
+ if (data_size) {
+ data = memdup_user(&arg->data, data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
- ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
- hdr.start, hdr.count, data);
+ mutex_lock(&vdev->igate);
- mutex_unlock(&vdev->igate);
- kfree(data);
+ ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, hdr.start,
+ hdr.count, data);
- return ret;
+ mutex_unlock(&vdev->igate);
+ kfree(data);
- } else if (cmd == VFIO_DEVICE_RESET) {
- int ret;
+ return ret;
+}
- if (!vdev->reset_works)
- return -EINVAL;
+static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev,
+ void __user *arg)
+{
+ int ret;
- vfio_pci_zap_and_down_write_memory_lock(vdev);
+ if (!vdev->reset_works)
+ return -EINVAL;
- /*
- * This function can be invoked while the power state is non-D0.
- * If pci_try_reset_function() has been called while the power
- * state is non-D0, then pci_try_reset_function() will
- * internally set the power state to D0 without vfio driver
- * involvement. For the devices which have NoSoftRst-, the
- * reset function can cause the PCI config space reset without
- * restoring the original state (saved locally in
- * 'vdev->pm_save').
- */
- vfio_pci_set_power_state(vdev, PCI_D0);
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
- ret = pci_try_reset_function(vdev->pdev);
- up_write(&vdev->memory_lock);
+ /*
+ * This function can be invoked while the power state is non-D0. If
+ * pci_try_reset_function() has been called while the power state is
+ * non-D0, then pci_try_reset_function() will internally set the power
+ * state to D0 without vfio driver involvement. For the devices which
+ * have NoSoftRst-, the reset function can cause the PCI config space
+ * reset without restoring the original state (saved locally in
+ * 'vdev->pm_save').
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
- return ret;
+ ret = pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
- } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
- struct vfio_pci_hot_reset_info hdr;
- struct vfio_pci_fill_info fill = { 0 };
- struct vfio_pci_dependent_device *devices = NULL;
- bool slot = false;
- int ret = 0;
+ return ret;
+}
- minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
+static int vfio_pci_ioctl_get_pci_hot_reset_info(
+ struct vfio_pci_core_device *vdev,
+ struct vfio_pci_hot_reset_info __user *arg)
+{
+ unsigned long minsz =
+ offsetofend(struct vfio_pci_hot_reset_info, count);
+ struct vfio_pci_hot_reset_info hdr;
+ struct vfio_pci_fill_info fill = { 0 };
+ struct vfio_pci_dependent_device *devices = NULL;
+ bool slot = false;
+ int ret = 0;
- if (copy_from_user(&hdr, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
- if (hdr.argsz < minsz)
- return -EINVAL;
+ if (hdr.argsz < minsz)
+ return -EINVAL;
- hdr.flags = 0;
+ hdr.flags = 0;
- /* Can we do a slot or bus reset or neither? */
- if (!pci_probe_reset_slot(vdev->pdev->slot))
- slot = true;
- else if (pci_probe_reset_bus(vdev->pdev->bus))
- return -ENODEV;
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
- /* How many devices are affected? */
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_count_devs,
- &fill.max, slot);
- if (ret)
- return ret;
+ /* How many devices are affected? */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
+ &fill.max, slot);
+ if (ret)
+ return ret;
- WARN_ON(!fill.max); /* Should always be at least one */
+ WARN_ON(!fill.max); /* Should always be at least one */
- /*
- * If there's enough space, fill it now, otherwise return
- * -ENOSPC and the number of devices affected.
- */
- if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
- ret = -ENOSPC;
- hdr.count = fill.max;
- goto reset_info_exit;
- }
+ /*
+ * If there's enough space, fill it now, otherwise return -ENOSPC and
+ * the number of devices affected.
+ */
+ if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
+ ret = -ENOSPC;
+ hdr.count = fill.max;
+ goto reset_info_exit;
+ }
- devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
- if (!devices)
- return -ENOMEM;
+ devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
+ if (!devices)
+ return -ENOMEM;
- fill.devices = devices;
+ fill.devices = devices;
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_fill_devs,
- &fill, slot);
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs,
+ &fill, slot);
- /*
- * If a device was removed between counting and filling,
- * we may come up short of fill.max. If a device was
- * added, we'll have a return of -EAGAIN above.
- */
- if (!ret)
- hdr.count = fill.cur;
+ /*
+ * If a device was removed between counting and filling, we may come up
+ * short of fill.max. If a device was added, we'll have a return of
+ * -EAGAIN above.
+ */
+ if (!ret)
+ hdr.count = fill.cur;
reset_info_exit:
- if (copy_to_user((void __user *)arg, &hdr, minsz))
+ if (copy_to_user(arg, &hdr, minsz))
+ ret = -EFAULT;
+
+ if (!ret) {
+ if (copy_to_user(&arg->devices, devices,
+ hdr.count * sizeof(*devices)))
ret = -EFAULT;
+ }
- if (!ret) {
- if (copy_to_user((void __user *)(arg + minsz), devices,
- hdr.count * sizeof(*devices)))
- ret = -EFAULT;
- }
+ kfree(devices);
+ return ret;
+}
- kfree(devices);
- return ret;
+static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_hot_reset __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
+ struct vfio_pci_hot_reset hdr;
+ int32_t *group_fds;
+ struct file **files;
+ struct vfio_pci_group_info info;
+ bool slot = false;
+ int file_idx, count = 0, ret = 0;
- } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
- struct vfio_pci_hot_reset hdr;
- int32_t *group_fds;
- struct file **files;
- struct vfio_pci_group_info info;
- bool slot = false;
- int file_idx, count = 0, ret = 0;
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
- minsz = offsetofend(struct vfio_pci_hot_reset, count);
+ if (hdr.argsz < minsz || hdr.flags)
+ return -EINVAL;
- if (copy_from_user(&hdr, (void __user *)arg, minsz))
- return -EFAULT;
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
- if (hdr.argsz < minsz || hdr.flags)
- return -EINVAL;
+ /*
+ * We can't let userspace give us an arbitrarily large buffer to copy,
+ * so verify how many we think there could be. Note groups can have
+ * multiple devices so one group per device is the max.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
+ &count, slot);
+ if (ret)
+ return ret;
- /* Can we do a slot or bus reset or neither? */
- if (!pci_probe_reset_slot(vdev->pdev->slot))
- slot = true;
- else if (pci_probe_reset_bus(vdev->pdev->bus))
- return -ENODEV;
+ /* Somewhere between 1 and count is OK */
+ if (!hdr.count || hdr.count > count)
+ return -EINVAL;
- /*
- * We can't let userspace give us an arbitrarily large
- * buffer to copy, so verify how many we think there
- * could be. Note groups can have multiple devices so
- * one group per device is the max.
- */
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_count_devs,
- &count, slot);
- if (ret)
- return ret;
+ group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
+ files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
+ if (!group_fds || !files) {
+ kfree(group_fds);
+ kfree(files);
+ return -ENOMEM;
+ }
- /* Somewhere between 1 and count is OK */
- if (!hdr.count || hdr.count > count)
- return -EINVAL;
+ if (copy_from_user(group_fds, arg->group_fds,
+ hdr.count * sizeof(*group_fds))) {
+ kfree(group_fds);
+ kfree(files);
+ return -EFAULT;
+ }
- group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
- files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
- if (!group_fds || !files) {
- kfree(group_fds);
- kfree(files);
- return -ENOMEM;
- }
+ /*
+ * For each group_fd, get the group through the vfio external user
+ * interface and store the group and iommu ID. This ensures the group
+ * is held across the reset.
+ */
+ for (file_idx = 0; file_idx < hdr.count; file_idx++) {
+ struct file *file = fget(group_fds[file_idx]);
- if (copy_from_user(group_fds, (void __user *)(arg + minsz),
- hdr.count * sizeof(*group_fds))) {
- kfree(group_fds);
- kfree(files);
- return -EFAULT;
+ if (!file) {
+ ret = -EBADF;
+ break;
}
- /*
- * For each group_fd, get the group through the vfio external
- * user interface and store the group and iommu ID. This
- * ensures the group is held across the reset.
- */
- for (file_idx = 0; file_idx < hdr.count; file_idx++) {
- struct file *file = fget(group_fds[file_idx]);
-
- if (!file) {
- ret = -EBADF;
- break;
- }
-
- /* Ensure the FD is a vfio group FD.*/
- if (!vfio_file_iommu_group(file)) {
- fput(file);
- ret = -EINVAL;
- break;
- }
-
- files[file_idx] = file;
+ /* Ensure the FD is a vfio group FD.*/
+ if (!vfio_file_is_group(file)) {
+ fput(file);
+ ret = -EINVAL;
+ break;
}
- kfree(group_fds);
+ files[file_idx] = file;
+ }
- /* release reference to groups on error */
- if (ret)
- goto hot_reset_release;
+ kfree(group_fds);
+
+ /* release reference to groups on error */
+ if (ret)
+ goto hot_reset_release;
- info.count = hdr.count;
- info.files = files;
+ info.count = hdr.count;
+ info.files = files;
- ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
+ ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
hot_reset_release:
- for (file_idx--; file_idx >= 0; file_idx--)
- fput(files[file_idx]);
+ for (file_idx--; file_idx >= 0; file_idx--)
+ fput(files[file_idx]);
- kfree(files);
- return ret;
- } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
- struct vfio_device_ioeventfd ioeventfd;
- int count;
+ kfree(files);
+ return ret;
+}
- minsz = offsetofend(struct vfio_device_ioeventfd, fd);
+static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
+ struct vfio_device_ioeventfd __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_device_ioeventfd, fd);
+ struct vfio_device_ioeventfd ioeventfd;
+ int count;
- if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&ioeventfd, arg, minsz))
+ return -EFAULT;
- if (ioeventfd.argsz < minsz)
- return -EINVAL;
+ if (ioeventfd.argsz < minsz)
+ return -EINVAL;
- if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
- return -EINVAL;
+ if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
+ return -EINVAL;
- count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
+ count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
- if (hweight8(count) != 1 || ioeventfd.fd < -1)
- return -EINVAL;
+ if (hweight8(count) != 1 || ioeventfd.fd < -1)
+ return -EINVAL;
- return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
- ioeventfd.data, count, ioeventfd.fd);
+ return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count,
+ ioeventfd.fd);
+}
+
+long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ void __user *uarg = (void __user *)arg;
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ return vfio_pci_ioctl_get_info(vdev, uarg);
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ return vfio_pci_ioctl_get_irq_info(vdev, uarg);
+ case VFIO_DEVICE_GET_PCI_HOT_RESET_INFO:
+ return vfio_pci_ioctl_get_pci_hot_reset_info(vdev, uarg);
+ case VFIO_DEVICE_GET_REGION_INFO:
+ return vfio_pci_ioctl_get_region_info(vdev, uarg);
+ case VFIO_DEVICE_IOEVENTFD:
+ return vfio_pci_ioctl_ioeventfd(vdev, uarg);
+ case VFIO_DEVICE_PCI_HOT_RESET:
+ return vfio_pci_ioctl_pci_hot_reset(vdev, uarg);
+ case VFIO_DEVICE_RESET:
+ return vfio_pci_ioctl_reset(vdev, uarg);
+ case VFIO_DEVICE_SET_IRQS:
+ return vfio_pci_ioctl_set_irqs(vdev, uarg);
+ default:
+ return -ENOTTY;
}
- return -ENOTTY;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags,
- void __user *arg, size_t argsz)
+ uuid_t __user *arg, size_t argsz)
{
struct vfio_pci_core_device *vdev =
container_of(device, struct vfio_pci_core_device, vdev);
@@ -1202,6 +1428,13 @@ int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
void __user *arg, size_t argsz)
{
switch (flags & VFIO_DEVICE_FEATURE_MASK) {
+ case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY:
+ return vfio_pci_core_pm_entry(device, flags, arg, argsz);
+ case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP:
+ return vfio_pci_core_pm_entry_with_wakeup(device, flags,
+ arg, argsz);
+ case VFIO_DEVICE_FEATURE_LOW_POWER_EXIT:
+ return vfio_pci_core_pm_exit(device, flags, arg, argsz);
case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
return vfio_pci_core_feature_token(device, flags, arg, argsz);
default:
@@ -1214,31 +1447,47 @@ static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ int ret;
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
+ ret = pm_runtime_resume_and_get(&vdev->pdev->dev);
+ if (ret) {
+ pci_info_ratelimited(vdev->pdev, "runtime resume failed %d\n",
+ ret);
+ return -EIO;
+ }
+
switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
- return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
+ ret = vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
+ break;
case VFIO_PCI_ROM_REGION_INDEX:
if (iswrite)
- return -EINVAL;
- return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
+ ret = -EINVAL;
+ else
+ ret = vfio_pci_bar_rw(vdev, buf, count, ppos, false);
+ break;
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
- return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
+ ret = vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
+ break;
case VFIO_PCI_VGA_REGION_INDEX:
- return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+ ret = vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+ break;
+
default:
index -= VFIO_PCI_NUM_REGIONS;
- return vdev->region[index].ops->rw(vdev, buf,
+ ret = vdev->region[index].ops->rw(vdev, buf,
count, ppos, iswrite);
+ break;
}
- return -EINVAL;
+ pm_runtime_put(&vdev->pdev->dev);
+ return ret;
}
ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
@@ -1433,7 +1682,11 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
mutex_lock(&vdev->vma_lock);
down_read(&vdev->memory_lock);
- if (!__vfio_pci_memory_enabled(vdev)) {
+ /*
+ * Memory region cannot be accessed if the low power feature is engaged
+ * or memory access is disabled.
+ */
+ if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
ret = VM_FAULT_SIGBUS;
goto up_out;
}
@@ -1825,12 +2078,12 @@ static void vfio_pci_vga_uninit(struct vfio_pci_core_device *vdev)
VGA_RSRC_LEGACY_MEM);
}
-void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
- struct pci_dev *pdev,
- const struct vfio_device_ops *vfio_pci_ops)
+int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
{
- vfio_init_group_dev(&vdev->vdev, &pdev->dev, vfio_pci_ops);
- vdev->pdev = pdev;
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ vdev->pdev = to_pci_dev(core_vdev->dev);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock);
@@ -1841,19 +2094,24 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
INIT_LIST_HEAD(&vdev->vma_list);
INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
+EXPORT_SYMBOL_GPL(vfio_pci_core_init_dev);
-void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev)
+void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
mutex_destroy(&vdev->igate);
mutex_destroy(&vdev->ioeventfds_lock);
mutex_destroy(&vdev->vma_lock);
- vfio_uninit_group_dev(&vdev->vdev);
kfree(vdev->region);
kfree(vdev->pm_save);
+ vfio_free_device(core_vdev);
}
-EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device);
+EXPORT_SYMBOL_GPL(vfio_pci_core_release_dev);
int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
{
@@ -1875,6 +2133,11 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
return -EINVAL;
}
+ if (vdev->vdev.log_ops && !(vdev->vdev.log_ops->log_start &&
+ vdev->vdev.log_ops->log_stop &&
+ vdev->vdev.log_ops->log_read_and_clear))
+ return -EINVAL;
+
/*
* Prevent binding to PFs with VFs enabled, the VFs might be in use
* by the host or other users. We cannot capture the VFs if they
@@ -2148,6 +2411,15 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
goto err_unlock;
}
+ /*
+ * Some of the devices in the dev_set can be in the runtime suspended
+ * state. Increment the usage count for all the devices in the dev_set
+ * before reset and decrement the same after reset.
+ */
+ ret = vfio_pci_dev_set_pm_runtime_get(dev_set);
+ if (ret)
+ goto err_unlock;
+
list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
/*
* Test whether all the affected devices are contained by the
@@ -2203,6 +2475,9 @@ err_undo:
else
mutex_unlock(&cur->vma_lock);
}
+
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+ pm_runtime_put(&cur->pdev->dev);
err_unlock:
mutex_unlock(&dev_set->lock);
return ret;
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index 352c725ccf18..5e6ca5926954 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -15,7 +15,7 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define OPREGION_SIZE (8 * 1024)
@@ -257,7 +257,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
}
}
- ret = vfio_pci_register_dev_region(vdev,
+ ret = vfio_pci_core_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops,
size, VFIO_REGION_INFO_FLAG_READ, opregionvbt);
@@ -402,7 +402,7 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
return -EINVAL;
}
- ret = vfio_pci_register_dev_region(vdev,
+ ret = vfio_pci_core_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
&vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
@@ -422,7 +422,7 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
return -EINVAL;
}
- ret = vfio_pci_register_dev_region(vdev,
+ ret = vfio_pci_core_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
&vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 6069a11fb51a..40c3d7cf163f 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -20,7 +20,33 @@
#include <linux/wait.h>
#include <linux/slab.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
+
+struct vfio_pci_irq_ctx {
+ struct eventfd_ctx *trigger;
+ struct virqfd *unmask;
+ struct virqfd *mask;
+ char *name;
+ bool masked;
+ struct irq_bypass_producer producer;
+};
+
+static bool irq_is(struct vfio_pci_core_device *vdev, int type)
+{
+ return vdev->irq_type == type;
+}
+
+static bool is_intx(struct vfio_pci_core_device *vdev)
+{
+ return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
+}
+
+static bool is_irq_none(struct vfio_pci_core_device *vdev)
+{
+ return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
+ vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
+ vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
+}
/*
* INTx
@@ -33,10 +59,12 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
eventfd_signal(vdev->ctx[0].trigger, 1);
}
-void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+/* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
+bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
unsigned long flags;
+ bool masked_changed = false;
spin_lock_irqsave(&vdev->irqlock, flags);
@@ -60,9 +88,11 @@ void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
disable_irq_nosync(pdev->irq);
vdev->ctx[0].masked = true;
+ masked_changed = true;
}
spin_unlock_irqrestore(&vdev->irqlock, flags);
+ return masked_changed;
}
/*
diff --git a/drivers/vfio/pci/vfio_pci_priv.h b/drivers/vfio/pci/vfio_pci_priv.h
new file mode 100644
index 000000000000..5e4fa69aee16
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_priv.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef VFIO_PCI_PRIV_H
+#define VFIO_PCI_PRIV_H
+
+#include <linux/vfio_pci_core.h>
+
+/* Special capability IDs predefined access */
+#define PCI_CAP_ID_INVALID 0xFF /* default raw access */
+#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */
+
+/* Cap maximum number of ioeventfds per device (arbitrary) */
+#define VFIO_PCI_IOEVENTFD_MAX 1000
+
+struct vfio_pci_ioeventfd {
+ struct list_head next;
+ struct vfio_pci_core_device *vdev;
+ struct virqfd *virqfd;
+ void __iomem *addr;
+ uint64_t data;
+ loff_t pos;
+ int bar;
+ int count;
+ bool test_mem;
+};
+
+bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev);
+void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev);
+
+int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
+ unsigned index, unsigned start, unsigned count,
+ void *data);
+
+ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+
+ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+
+#ifdef CONFIG_VFIO_PCI_VGA
+ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+#else
+static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite)
+{
+ return -EINVAL;
+}
+#endif
+
+int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
+ uint64_t data, int count, int fd);
+
+int vfio_pci_init_perm_bits(void);
+void vfio_pci_uninit_perm_bits(void);
+
+int vfio_config_init(struct vfio_pci_core_device *vdev);
+void vfio_config_free(struct vfio_pci_core_device *vdev);
+
+int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev,
+ pci_power_t state);
+
+bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev);
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev);
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev,
+ u16 cmd);
+
+#ifdef CONFIG_VFIO_PCI_IGD
+int vfio_pci_igd_init(struct vfio_pci_core_device *vdev);
+#else
+static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
+{
+ return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_VFIO_PCI_ZDEV_KVM
+int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
+ struct vfio_info_cap *caps);
+int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev);
+void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev);
+#else
+static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ return -ENODEV;
+}
+
+static inline int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
+{
+ return 0;
+}
+
+static inline void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
+{}
+#endif
+
+static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
+{
+ return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+}
+
+#endif
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 82ac1569deb0..e352a033b4ae 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -17,7 +17,7 @@
#include <linux/vfio.h>
#include <linux/vgaarb.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
#ifdef __LITTLE_ENDIAN
#define vfio_ioread64 ioread64
@@ -412,8 +412,8 @@ static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
}
-long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
- uint64_t data, int count, int fd)
+int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
+ uint64_t data, int count, int fd)
{
struct pci_dev *pdev = vdev->pdev;
loff_t pos = offset & VFIO_PCI_OFFSET_MASK;
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
index 0cbdcd14f1c8..0990fdb146b7 100644
--- a/drivers/vfio/pci/vfio_pci_zdev.c
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -15,7 +15,7 @@
#include <asm/pci_clp.h>
#include <asm/pci_io.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
/*
* Add the Base PCI Function information to the device info region.
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index 1aaa4f721bd2..eaea63e5294c 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vfio.h>
+#include <linux/pm_runtime.h>
#include <linux/amba/bus.h>
#include "vfio_platform_private.h"
@@ -40,20 +41,16 @@ static int get_amba_irq(struct vfio_platform_device *vdev, int i)
return ret ? ret : -ENXIO;
}
-static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
+static int vfio_amba_init_dev(struct vfio_device *core_vdev)
{
- struct vfio_platform_device *vdev;
+ struct vfio_platform_device *vdev =
+ container_of(core_vdev, struct vfio_platform_device, vdev);
+ struct amba_device *adev = to_amba_device(core_vdev->dev);
int ret;
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
-
vdev->name = kasprintf(GFP_KERNEL, "vfio-amba-%08x", adev->periphid);
- if (!vdev->name) {
- kfree(vdev);
+ if (!vdev->name)
return -ENOMEM;
- }
vdev->opaque = (void *) adev;
vdev->flags = VFIO_DEVICE_FLAGS_AMBA;
@@ -61,26 +58,67 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
vdev->get_irq = get_amba_irq;
vdev->reset_required = false;
- ret = vfio_platform_probe_common(vdev, &adev->dev);
- if (ret) {
+ ret = vfio_platform_init_common(vdev);
+ if (ret)
kfree(vdev->name);
- kfree(vdev);
- return ret;
- }
+ return ret;
+}
+
+static const struct vfio_device_ops vfio_amba_ops;
+static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct vfio_platform_device *vdev;
+ int ret;
+
+ vdev = vfio_alloc_device(vfio_platform_device, vdev, &adev->dev,
+ &vfio_amba_ops);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
+ ret = vfio_register_group_dev(&vdev->vdev);
+ if (ret)
+ goto out_put_vdev;
+
+ pm_runtime_enable(&adev->dev);
dev_set_drvdata(&adev->dev, vdev);
return 0;
+
+out_put_vdev:
+ vfio_put_device(&vdev->vdev);
+ return ret;
+}
+
+static void vfio_amba_release_dev(struct vfio_device *core_vdev)
+{
+ struct vfio_platform_device *vdev =
+ container_of(core_vdev, struct vfio_platform_device, vdev);
+
+ vfio_platform_release_common(vdev);
+ kfree(vdev->name);
+ vfio_free_device(core_vdev);
}
static void vfio_amba_remove(struct amba_device *adev)
{
struct vfio_platform_device *vdev = dev_get_drvdata(&adev->dev);
- vfio_platform_remove_common(vdev);
- kfree(vdev->name);
- kfree(vdev);
+ vfio_unregister_group_dev(&vdev->vdev);
+ pm_runtime_disable(vdev->device);
+ vfio_put_device(&vdev->vdev);
}
+static const struct vfio_device_ops vfio_amba_ops = {
+ .name = "vfio-amba",
+ .init = vfio_amba_init_dev,
+ .release = vfio_amba_release_dev,
+ .open_device = vfio_platform_open_device,
+ .close_device = vfio_platform_close_device,
+ .ioctl = vfio_platform_ioctl,
+ .read = vfio_platform_read,
+ .write = vfio_platform_write,
+ .mmap = vfio_platform_mmap,
+};
+
static const struct amba_id pl330_ids[] = {
{ 0, 0 },
};
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index 04f40c5acfd6..82cedcebfd90 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vfio.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include "vfio_platform_private.h"
@@ -36,14 +37,11 @@ static int get_platform_irq(struct vfio_platform_device *vdev, int i)
return platform_get_irq_optional(pdev, i);
}
-static int vfio_platform_probe(struct platform_device *pdev)
+static int vfio_platform_init_dev(struct vfio_device *core_vdev)
{
- struct vfio_platform_device *vdev;
- int ret;
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
+ struct vfio_platform_device *vdev =
+ container_of(core_vdev, struct vfio_platform_device, vdev);
+ struct platform_device *pdev = to_platform_device(core_vdev->dev);
vdev->opaque = (void *) pdev;
vdev->name = pdev->name;
@@ -52,24 +50,64 @@ static int vfio_platform_probe(struct platform_device *pdev)
vdev->get_irq = get_platform_irq;
vdev->reset_required = reset_required;
- ret = vfio_platform_probe_common(vdev, &pdev->dev);
- if (ret) {
- kfree(vdev);
- return ret;
- }
+ return vfio_platform_init_common(vdev);
+}
+
+static const struct vfio_device_ops vfio_platform_ops;
+static int vfio_platform_probe(struct platform_device *pdev)
+{
+ struct vfio_platform_device *vdev;
+ int ret;
+
+ vdev = vfio_alloc_device(vfio_platform_device, vdev, &pdev->dev,
+ &vfio_platform_ops);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
+
+ ret = vfio_register_group_dev(&vdev->vdev);
+ if (ret)
+ goto out_put_vdev;
+
+ pm_runtime_enable(&pdev->dev);
dev_set_drvdata(&pdev->dev, vdev);
return 0;
+
+out_put_vdev:
+ vfio_put_device(&vdev->vdev);
+ return ret;
+}
+
+static void vfio_platform_release_dev(struct vfio_device *core_vdev)
+{
+ struct vfio_platform_device *vdev =
+ container_of(core_vdev, struct vfio_platform_device, vdev);
+
+ vfio_platform_release_common(vdev);
+ vfio_free_device(core_vdev);
}
static int vfio_platform_remove(struct platform_device *pdev)
{
struct vfio_platform_device *vdev = dev_get_drvdata(&pdev->dev);
- vfio_platform_remove_common(vdev);
- kfree(vdev);
+ vfio_unregister_group_dev(&vdev->vdev);
+ pm_runtime_disable(vdev->device);
+ vfio_put_device(&vdev->vdev);
return 0;
}
+static const struct vfio_device_ops vfio_platform_ops = {
+ .name = "vfio-platform",
+ .init = vfio_platform_init_dev,
+ .release = vfio_platform_release_dev,
+ .open_device = vfio_platform_open_device,
+ .close_device = vfio_platform_close_device,
+ .ioctl = vfio_platform_ioctl,
+ .read = vfio_platform_read,
+ .write = vfio_platform_write,
+ .mmap = vfio_platform_mmap,
+};
+
static struct platform_driver vfio_platform_driver = {
.probe = vfio_platform_probe,
.remove = vfio_platform_remove,
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 256f55b84e70..55dc4f43c31e 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -218,7 +218,7 @@ static int vfio_platform_call_reset(struct vfio_platform_device *vdev,
return -EINVAL;
}
-static void vfio_platform_close_device(struct vfio_device *core_vdev)
+void vfio_platform_close_device(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -236,8 +236,9 @@ static void vfio_platform_close_device(struct vfio_device *core_vdev)
vfio_platform_regions_cleanup(vdev);
vfio_platform_irq_cleanup(vdev);
}
+EXPORT_SYMBOL_GPL(vfio_platform_close_device);
-static int vfio_platform_open_device(struct vfio_device *core_vdev)
+int vfio_platform_open_device(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -273,9 +274,10 @@ err_irq:
vfio_platform_regions_cleanup(vdev);
return ret;
}
+EXPORT_SYMBOL_GPL(vfio_platform_open_device);
-static long vfio_platform_ioctl(struct vfio_device *core_vdev,
- unsigned int cmd, unsigned long arg)
+long vfio_platform_ioctl(struct vfio_device *core_vdev,
+ unsigned int cmd, unsigned long arg)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -382,6 +384,7 @@ static long vfio_platform_ioctl(struct vfio_device *core_vdev,
return -ENOTTY;
}
+EXPORT_SYMBOL_GPL(vfio_platform_ioctl);
static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
char __user *buf, size_t count,
@@ -438,8 +441,8 @@ err:
return -EFAULT;
}
-static ssize_t vfio_platform_read(struct vfio_device *core_vdev,
- char __user *buf, size_t count, loff_t *ppos)
+ssize_t vfio_platform_read(struct vfio_device *core_vdev,
+ char __user *buf, size_t count, loff_t *ppos)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -460,6 +463,7 @@ static ssize_t vfio_platform_read(struct vfio_device *core_vdev,
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(vfio_platform_read);
static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
const char __user *buf, size_t count,
@@ -515,8 +519,8 @@ err:
return -EFAULT;
}
-static ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __user *buf,
- size_t count, loff_t *ppos)
+ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __user *buf,
+ size_t count, loff_t *ppos)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -537,6 +541,7 @@ static ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __u
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(vfio_platform_write);
static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
struct vm_area_struct *vma)
@@ -558,7 +563,7 @@ static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
req_len, vma->vm_page_prot);
}
-static int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
+int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -598,16 +603,7 @@ static int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_stru
return -EINVAL;
}
-
-static const struct vfio_device_ops vfio_platform_ops = {
- .name = "vfio-platform",
- .open_device = vfio_platform_open_device,
- .close_device = vfio_platform_close_device,
- .ioctl = vfio_platform_ioctl,
- .read = vfio_platform_read,
- .write = vfio_platform_write,
- .mmap = vfio_platform_mmap,
-};
+EXPORT_SYMBOL_GPL(vfio_platform_mmap);
static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
struct device *dev)
@@ -639,55 +635,34 @@ static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
* If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
* valid checks. We cannot claim that this system is DT.
*/
-int vfio_platform_probe_common(struct vfio_platform_device *vdev,
- struct device *dev)
+int vfio_platform_init_common(struct vfio_platform_device *vdev)
{
int ret;
-
- vfio_init_group_dev(&vdev->vdev, dev, &vfio_platform_ops);
+ struct device *dev = vdev->vdev.dev;
ret = vfio_platform_acpi_probe(vdev, dev);
if (ret)
ret = vfio_platform_of_probe(vdev, dev);
if (ret)
- goto out_uninit;
+ return ret;
vdev->device = dev;
+ mutex_init(&vdev->igate);
ret = vfio_platform_get_reset(vdev);
- if (ret && vdev->reset_required) {
+ if (ret && vdev->reset_required)
dev_err(dev, "No reset function found for device %s\n",
vdev->name);
- goto out_uninit;
- }
-
- ret = vfio_register_group_dev(&vdev->vdev);
- if (ret)
- goto put_reset;
-
- mutex_init(&vdev->igate);
-
- pm_runtime_enable(dev);
- return 0;
-
-put_reset:
- vfio_platform_put_reset(vdev);
-out_uninit:
- vfio_uninit_group_dev(&vdev->vdev);
return ret;
}
-EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
+EXPORT_SYMBOL_GPL(vfio_platform_init_common);
-void vfio_platform_remove_common(struct vfio_platform_device *vdev)
+void vfio_platform_release_common(struct vfio_platform_device *vdev)
{
- vfio_unregister_group_dev(&vdev->vdev);
-
- pm_runtime_disable(vdev->device);
vfio_platform_put_reset(vdev);
- vfio_uninit_group_dev(&vdev->vdev);
}
-EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
+EXPORT_SYMBOL_GPL(vfio_platform_release_common);
void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
{
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h
index 691b43f4b2b2..8d8fab516849 100644
--- a/drivers/vfio/platform/vfio_platform_private.h
+++ b/drivers/vfio/platform/vfio_platform_private.h
@@ -78,9 +78,21 @@ struct vfio_platform_reset_node {
vfio_platform_reset_fn_t of_reset;
};
-int vfio_platform_probe_common(struct vfio_platform_device *vdev,
- struct device *dev);
-void vfio_platform_remove_common(struct vfio_platform_device *vdev);
+int vfio_platform_init_common(struct vfio_platform_device *vdev);
+void vfio_platform_release_common(struct vfio_platform_device *vdev);
+
+int vfio_platform_open_device(struct vfio_device *core_vdev);
+void vfio_platform_close_device(struct vfio_device *core_vdev);
+long vfio_platform_ioctl(struct vfio_device *core_vdev,
+ unsigned int cmd, unsigned long arg);
+ssize_t vfio_platform_read(struct vfio_device *core_vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos);
+ssize_t vfio_platform_write(struct vfio_device *core_vdev,
+ const char __user *buf,
+ size_t count, loff_t *ppos);
+int vfio_platform_mmap(struct vfio_device *core_vdev,
+ struct vm_area_struct *vma);
int vfio_platform_irq_init(struct vfio_platform_device *vdev);
void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index 503bea6c843d..bcad54bbab08 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -3,6 +3,16 @@
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*/
+#ifndef __VFIO_VFIO_H__
+#define __VFIO_VFIO_H__
+
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+
+struct iommu_group;
+struct vfio_device;
+struct vfio_container;
enum vfio_group_type {
/*
@@ -28,6 +38,30 @@ enum vfio_group_type {
VFIO_NO_IOMMU,
};
+struct vfio_group {
+ struct device dev;
+ struct cdev cdev;
+ /*
+ * When drivers is non-zero a driver is attached to the struct device
+ * that provided the iommu_group and thus the iommu_group is a valid
+ * pointer. When drivers is 0 the driver is being detached. Once users
+ * reaches 0 then the iommu_group is invalid.
+ */
+ refcount_t drivers;
+ unsigned int container_users;
+ struct iommu_group *iommu_group;
+ struct vfio_container *container;
+ struct list_head device_list;
+ struct mutex device_lock;
+ struct list_head vfio_next;
+ struct list_head container_next;
+ enum vfio_group_type type;
+ struct mutex group_lock;
+ struct kvm *kvm;
+ struct file *opened_file;
+ struct blocking_notifier_head notifier;
+};
+
/* events for the backend driver notify callback */
enum vfio_iommu_notify_type {
VFIO_IOMMU_CONTAINER_CLOSE = 0,
@@ -67,5 +101,33 @@ struct vfio_iommu_driver_ops {
enum vfio_iommu_notify_type event);
};
+struct vfio_iommu_driver {
+ const struct vfio_iommu_driver_ops *ops;
+ struct list_head vfio_next;
+};
+
int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
+
+bool vfio_assert_device_open(struct vfio_device *device);
+
+struct vfio_container *vfio_container_from_file(struct file *filep);
+int vfio_device_assign_container(struct vfio_device *device);
+void vfio_device_unassign_container(struct vfio_device *device);
+int vfio_container_attach_group(struct vfio_container *container,
+ struct vfio_group *group);
+void vfio_group_detach_container(struct vfio_group *group);
+void vfio_device_container_register(struct vfio_device *device);
+void vfio_device_container_unregister(struct vfio_device *device);
+long vfio_container_ioctl_check_extension(struct vfio_container *container,
+ unsigned long arg);
+int __init vfio_container_init(void);
+void vfio_container_cleanup(void);
+
+#ifdef CONFIG_VFIO_NOIOMMU
+extern bool vfio_noiommu __read_mostly;
+#else
+enum { vfio_noiommu = false };
+#endif
+
+#endif
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 8706482665d1..23c24fe98c00 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -37,7 +37,6 @@
#include <linux/vfio.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
-#include <linux/dma-iommu.h>
#include <linux/irqdomain.h>
#include "vfio.h"
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index 7cb56c382c97..2d168793d4e1 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -32,6 +32,9 @@
#include <linux/vfio.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
+#include <linux/pm_runtime.h>
+#include <linux/interval_tree.h>
+#include <linux/iova_bitmap.h>
#include "vfio.h"
#define DRIVER_VERSION "0.3"
@@ -40,54 +43,14 @@
static struct vfio {
struct class *class;
- struct list_head iommu_drivers_list;
- struct mutex iommu_drivers_lock;
struct list_head group_list;
struct mutex group_lock; /* locks group_list */
struct ida group_ida;
dev_t group_devt;
+ struct class *device_class;
+ struct ida device_ida;
} vfio;
-struct vfio_iommu_driver {
- const struct vfio_iommu_driver_ops *ops;
- struct list_head vfio_next;
-};
-
-struct vfio_container {
- struct kref kref;
- struct list_head group_list;
- struct rw_semaphore group_lock;
- struct vfio_iommu_driver *iommu_driver;
- void *iommu_data;
- bool noiommu;
-};
-
-struct vfio_group {
- struct device dev;
- struct cdev cdev;
- refcount_t users;
- unsigned int container_users;
- struct iommu_group *iommu_group;
- struct vfio_container *container;
- struct list_head device_list;
- struct mutex device_lock;
- struct list_head vfio_next;
- struct list_head container_next;
- enum vfio_group_type type;
- unsigned int dev_counter;
- struct rw_semaphore group_rwsem;
- struct kvm *kvm;
- struct file *opened_file;
- struct blocking_notifier_head notifier;
-};
-
-#ifdef CONFIG_VFIO_NOIOMMU
-static bool noiommu __read_mostly;
-module_param_named(enable_unsafe_noiommu_mode,
- noiommu, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
-#endif
-
static DEFINE_XARRAY(vfio_device_set_xa);
static const struct file_operations vfio_group_fops;
@@ -162,146 +125,6 @@ static void vfio_release_device_set(struct vfio_device *device)
xa_unlock(&vfio_device_set_xa);
}
-#ifdef CONFIG_VFIO_NOIOMMU
-static void *vfio_noiommu_open(unsigned long arg)
-{
- if (arg != VFIO_NOIOMMU_IOMMU)
- return ERR_PTR(-EINVAL);
- if (!capable(CAP_SYS_RAWIO))
- return ERR_PTR(-EPERM);
-
- return NULL;
-}
-
-static void vfio_noiommu_release(void *iommu_data)
-{
-}
-
-static long vfio_noiommu_ioctl(void *iommu_data,
- unsigned int cmd, unsigned long arg)
-{
- if (cmd == VFIO_CHECK_EXTENSION)
- return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
-
- return -ENOTTY;
-}
-
-static int vfio_noiommu_attach_group(void *iommu_data,
- struct iommu_group *iommu_group, enum vfio_group_type type)
-{
- return 0;
-}
-
-static void vfio_noiommu_detach_group(void *iommu_data,
- struct iommu_group *iommu_group)
-{
-}
-
-static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
- .name = "vfio-noiommu",
- .owner = THIS_MODULE,
- .open = vfio_noiommu_open,
- .release = vfio_noiommu_release,
- .ioctl = vfio_noiommu_ioctl,
- .attach_group = vfio_noiommu_attach_group,
- .detach_group = vfio_noiommu_detach_group,
-};
-
-/*
- * Only noiommu containers can use vfio-noiommu and noiommu containers can only
- * use vfio-noiommu.
- */
-static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
- const struct vfio_iommu_driver *driver)
-{
- return container->noiommu == (driver->ops == &vfio_noiommu_ops);
-}
-#else
-static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
- const struct vfio_iommu_driver *driver)
-{
- return true;
-}
-#endif /* CONFIG_VFIO_NOIOMMU */
-
-/*
- * IOMMU driver registration
- */
-int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
-{
- struct vfio_iommu_driver *driver, *tmp;
-
- if (WARN_ON(!ops->register_device != !ops->unregister_device))
- return -EINVAL;
-
- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
- if (!driver)
- return -ENOMEM;
-
- driver->ops = ops;
-
- mutex_lock(&vfio.iommu_drivers_lock);
-
- /* Check for duplicates */
- list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
- if (tmp->ops == ops) {
- mutex_unlock(&vfio.iommu_drivers_lock);
- kfree(driver);
- return -EINVAL;
- }
- }
-
- list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
-
- mutex_unlock(&vfio.iommu_drivers_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
-
-void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
-{
- struct vfio_iommu_driver *driver;
-
- mutex_lock(&vfio.iommu_drivers_lock);
- list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
- if (driver->ops == ops) {
- list_del(&driver->vfio_next);
- mutex_unlock(&vfio.iommu_drivers_lock);
- kfree(driver);
- return;
- }
- }
- mutex_unlock(&vfio.iommu_drivers_lock);
-}
-EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
-
-static void vfio_group_get(struct vfio_group *group);
-
-/*
- * Container objects - containers are created when /dev/vfio/vfio is
- * opened, but their lifecycle extends until the last user is done, so
- * it's freed via kref. Must support container/group/device being
- * closed in any order.
- */
-static void vfio_container_get(struct vfio_container *container)
-{
- kref_get(&container->kref);
-}
-
-static void vfio_container_release(struct kref *kref)
-{
- struct vfio_container *container;
- container = container_of(kref, struct vfio_container, kref);
-
- kfree(container);
-}
-
-static void vfio_container_put(struct vfio_container *container)
-{
- kref_put(&container->kref, vfio_container_release);
-}
-
/*
* Group objects - create, release, get, put, search
*/
@@ -310,9 +133,13 @@ __vfio_group_get_from_iommu(struct iommu_group *iommu_group)
{
struct vfio_group *group;
+ /*
+ * group->iommu_group from the vfio.group_list cannot be NULL
+ * under the vfio.group_lock.
+ */
list_for_each_entry(group, &vfio.group_list, vfio_next) {
if (group->iommu_group == iommu_group) {
- vfio_group_get(group);
+ refcount_inc(&group->drivers);
return group;
}
}
@@ -335,7 +162,8 @@ static void vfio_group_release(struct device *dev)
struct vfio_group *group = container_of(dev, struct vfio_group, dev);
mutex_destroy(&group->device_lock);
- iommu_group_put(group->iommu_group);
+ mutex_destroy(&group->group_lock);
+ WARN_ON(group->iommu_group);
ida_free(&vfio.group_ida, MINOR(group->dev.devt));
kfree(group);
}
@@ -363,8 +191,8 @@ static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
cdev_init(&group->cdev, &vfio_group_fops);
group->cdev.owner = THIS_MODULE;
- refcount_set(&group->users, 1);
- init_rwsem(&group->group_rwsem);
+ refcount_set(&group->drivers, 1);
+ mutex_init(&group->group_lock);
INIT_LIST_HEAD(&group->device_list);
mutex_init(&group->device_lock);
group->iommu_group = iommu_group;
@@ -420,44 +248,64 @@ err_put:
return ret;
}
-static void vfio_group_put(struct vfio_group *group)
+static void vfio_device_remove_group(struct vfio_device *device)
{
- if (!refcount_dec_and_mutex_lock(&group->users, &vfio.group_lock))
+ struct vfio_group *group = device->group;
+ struct iommu_group *iommu_group;
+
+ if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
+ iommu_group_remove_device(device->dev);
+
+ /* Pairs with vfio_create_group() / vfio_group_get_from_iommu() */
+ if (!refcount_dec_and_mutex_lock(&group->drivers, &vfio.group_lock))
return;
+ list_del(&group->vfio_next);
/*
+ * We could concurrently probe another driver in the group that might
+ * race vfio_device_remove_group() with vfio_get_group(), so we have to
+ * ensure that the sysfs is all cleaned up under lock otherwise the
+ * cdev_device_add() will fail due to the name aready existing.
+ */
+ cdev_device_del(&group->cdev, &group->dev);
+
+ mutex_lock(&group->group_lock);
+ /*
* These data structures all have paired operations that can only be
- * undone when the caller holds a live reference on the group. Since all
- * pairs must be undone these WARN_ON's indicate some caller did not
+ * undone when the caller holds a live reference on the device. Since
+ * all pairs must be undone these WARN_ON's indicate some caller did not
* properly hold the group reference.
*/
WARN_ON(!list_empty(&group->device_list));
- WARN_ON(group->container || group->container_users);
WARN_ON(group->notifier.head);
- list_del(&group->vfio_next);
- cdev_device_del(&group->cdev, &group->dev);
+ /*
+ * Revoke all users of group->iommu_group. At this point we know there
+ * are no devices active because we are unplugging the last one. Setting
+ * iommu_group to NULL blocks all new users.
+ */
+ if (group->container)
+ vfio_group_detach_container(group);
+ iommu_group = group->iommu_group;
+ group->iommu_group = NULL;
+ mutex_unlock(&group->group_lock);
mutex_unlock(&vfio.group_lock);
+ iommu_group_put(iommu_group);
put_device(&group->dev);
}
-static void vfio_group_get(struct vfio_group *group)
-{
- refcount_inc(&group->users);
-}
-
/*
* Device objects - create, release, get, put, search
*/
/* Device reference always implies a group reference */
-static void vfio_device_put(struct vfio_device *device)
+static void vfio_device_put_registration(struct vfio_device *device)
{
if (refcount_dec_and_test(&device->refcount))
complete(&device->comp);
}
-static bool vfio_device_try_get(struct vfio_device *device)
+static bool vfio_device_try_get_registration(struct vfio_device *device)
{
return refcount_inc_not_zero(&device->refcount);
}
@@ -469,7 +317,8 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
mutex_lock(&group->device_lock);
list_for_each_entry(device, &group->device_list, group_next) {
- if (device->dev == dev && vfio_device_try_get(device)) {
+ if (device->dev == dev &&
+ vfio_device_try_get_registration(device)) {
mutex_unlock(&group->device_lock);
return device;
}
@@ -481,20 +330,110 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
/*
* VFIO driver API
*/
-void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
- const struct vfio_device_ops *ops)
+/* Release helper called by vfio_put_device() */
+static void vfio_device_release(struct device *dev)
+{
+ struct vfio_device *device =
+ container_of(dev, struct vfio_device, device);
+
+ vfio_release_device_set(device);
+ ida_free(&vfio.device_ida, device->index);
+
+ /*
+ * kvfree() cannot be done here due to a life cycle mess in
+ * vfio-ccw. Before the ccw part is fixed all drivers are
+ * required to support @release and call vfio_free_device()
+ * from there.
+ */
+ device->ops->release(device);
+}
+
+/*
+ * Allocate and initialize vfio_device so it can be registered to vfio
+ * core.
+ *
+ * Drivers should use the wrapper vfio_alloc_device() for allocation.
+ * @size is the size of the structure to be allocated, including any
+ * private data used by the driver.
+ *
+ * Driver may provide an @init callback to cover device private data.
+ *
+ * Use vfio_put_device() to release the structure after success return.
+ */
+struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
+ const struct vfio_device_ops *ops)
{
+ struct vfio_device *device;
+ int ret;
+
+ if (WARN_ON(size < sizeof(struct vfio_device)))
+ return ERR_PTR(-EINVAL);
+
+ device = kvzalloc(size, GFP_KERNEL);
+ if (!device)
+ return ERR_PTR(-ENOMEM);
+
+ ret = vfio_init_device(device, dev, ops);
+ if (ret)
+ goto out_free;
+ return device;
+
+out_free:
+ kvfree(device);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(_vfio_alloc_device);
+
+/*
+ * Initialize a vfio_device so it can be registered to vfio core.
+ *
+ * Only vfio-ccw driver should call this interface.
+ */
+int vfio_init_device(struct vfio_device *device, struct device *dev,
+ const struct vfio_device_ops *ops)
+{
+ int ret;
+
+ ret = ida_alloc_max(&vfio.device_ida, MINORMASK, GFP_KERNEL);
+ if (ret < 0) {
+ dev_dbg(dev, "Error to alloc index\n");
+ return ret;
+ }
+
+ device->index = ret;
init_completion(&device->comp);
device->dev = dev;
device->ops = ops;
+
+ if (ops->init) {
+ ret = ops->init(device);
+ if (ret)
+ goto out_uninit;
+ }
+
+ device_initialize(&device->device);
+ device->device.release = vfio_device_release;
+ device->device.class = vfio.device_class;
+ device->device.parent = device->dev;
+ return 0;
+
+out_uninit:
+ vfio_release_device_set(device);
+ ida_free(&vfio.device_ida, device->index);
+ return ret;
}
-EXPORT_SYMBOL_GPL(vfio_init_group_dev);
+EXPORT_SYMBOL_GPL(vfio_init_device);
-void vfio_uninit_group_dev(struct vfio_device *device)
+/*
+ * The helper called by driver @release callback to free the device
+ * structure. Drivers which don't have private data to clean can
+ * simply use this helper as its @release.
+ */
+void vfio_free_device(struct vfio_device *device)
{
- vfio_release_device_set(device);
+ kvfree(device);
}
-EXPORT_SYMBOL_GPL(vfio_uninit_group_dev);
+EXPORT_SYMBOL_GPL(vfio_free_device);
static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev,
enum vfio_group_type type)
@@ -535,8 +474,7 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
struct vfio_group *group;
iommu_group = iommu_group_get(dev);
-#ifdef CONFIG_VFIO_NOIOMMU
- if (!iommu_group && noiommu) {
+ if (!iommu_group && vfio_noiommu) {
/*
* With noiommu enabled, create an IOMMU group for devices that
* don't already have one, implying no IOMMU hardware/driver
@@ -550,7 +488,7 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
}
return group;
}
-#endif
+
if (!iommu_group)
return ERR_PTR(-EINVAL);
@@ -577,7 +515,12 @@ static int __vfio_register_dev(struct vfio_device *device,
struct vfio_group *group)
{
struct vfio_device *existing_device;
+ int ret;
+ /*
+ * In all cases group is the output of one of the group allocation
+ * functions and we have group->drivers incremented for us.
+ */
if (IS_ERR(group))
return PTR_ERR(group);
@@ -590,28 +533,39 @@ static int __vfio_register_dev(struct vfio_device *device,
existing_device = vfio_group_get_device(group, device->dev);
if (existing_device) {
+ /*
+ * group->iommu_group is non-NULL because we hold the drivers
+ * refcount.
+ */
dev_WARN(device->dev, "Device already exists on group %d\n",
iommu_group_id(group->iommu_group));
- vfio_device_put(existing_device);
- if (group->type == VFIO_NO_IOMMU ||
- group->type == VFIO_EMULATED_IOMMU)
- iommu_group_remove_device(device->dev);
- vfio_group_put(group);
- return -EBUSY;
+ vfio_device_put_registration(existing_device);
+ ret = -EBUSY;
+ goto err_out;
}
/* Our reference on group is moved to the device */
device->group = group;
+ ret = dev_set_name(&device->device, "vfio%d", device->index);
+ if (ret)
+ goto err_out;
+
+ ret = device_add(&device->device);
+ if (ret)
+ goto err_out;
+
/* Refcounting can't start until the driver calls register */
refcount_set(&device->refcount, 1);
mutex_lock(&group->device_lock);
list_add(&device->group_next, &group->device_list);
- group->dev_counter++;
mutex_unlock(&group->device_lock);
return 0;
+err_out:
+ vfio_device_remove_group(device);
+ return ret;
}
int vfio_register_group_dev(struct vfio_device *device)
@@ -651,7 +605,7 @@ static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
ret = !strcmp(dev_name(it->dev), buf);
}
- if (ret && vfio_device_try_get(it)) {
+ if (ret && vfio_device_try_get_registration(it)) {
device = it;
break;
}
@@ -671,7 +625,7 @@ void vfio_unregister_group_dev(struct vfio_device *device)
bool interrupted = false;
long rc;
- vfio_device_put(device);
+ vfio_device_put_registration(device);
rc = try_wait_for_completion(&device->comp);
while (rc <= 0) {
if (device->ops->request)
@@ -696,356 +650,78 @@ void vfio_unregister_group_dev(struct vfio_device *device)
mutex_lock(&group->device_lock);
list_del(&device->group_next);
- group->dev_counter--;
mutex_unlock(&group->device_lock);
- if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
- iommu_group_remove_device(device->dev);
+ /* Balances device_add in register path */
+ device_del(&device->device);
- /* Matches the get in vfio_register_group_dev() */
- vfio_group_put(group);
+ vfio_device_remove_group(device);
}
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
/*
- * VFIO base fd, /dev/vfio/vfio
- */
-static long vfio_ioctl_check_extension(struct vfio_container *container,
- unsigned long arg)
-{
- struct vfio_iommu_driver *driver;
- long ret = 0;
-
- down_read(&container->group_lock);
-
- driver = container->iommu_driver;
-
- switch (arg) {
- /* No base extensions yet */
- default:
- /*
- * If no driver is set, poll all registered drivers for
- * extensions and return the first positive result. If
- * a driver is already set, further queries will be passed
- * only to that driver.
- */
- if (!driver) {
- mutex_lock(&vfio.iommu_drivers_lock);
- list_for_each_entry(driver, &vfio.iommu_drivers_list,
- vfio_next) {
-
- if (!list_empty(&container->group_list) &&
- !vfio_iommu_driver_allowed(container,
- driver))
- continue;
- if (!try_module_get(driver->ops->owner))
- continue;
-
- ret = driver->ops->ioctl(NULL,
- VFIO_CHECK_EXTENSION,
- arg);
- module_put(driver->ops->owner);
- if (ret > 0)
- break;
- }
- mutex_unlock(&vfio.iommu_drivers_lock);
- } else
- ret = driver->ops->ioctl(container->iommu_data,
- VFIO_CHECK_EXTENSION, arg);
- }
-
- up_read(&container->group_lock);
-
- return ret;
-}
-
-/* hold write lock on container->group_lock */
-static int __vfio_container_attach_groups(struct vfio_container *container,
- struct vfio_iommu_driver *driver,
- void *data)
-{
- struct vfio_group *group;
- int ret = -ENODEV;
-
- list_for_each_entry(group, &container->group_list, container_next) {
- ret = driver->ops->attach_group(data, group->iommu_group,
- group->type);
- if (ret)
- goto unwind;
- }
-
- return ret;
-
-unwind:
- list_for_each_entry_continue_reverse(group, &container->group_list,
- container_next) {
- driver->ops->detach_group(data, group->iommu_group);
- }
-
- return ret;
-}
-
-static long vfio_ioctl_set_iommu(struct vfio_container *container,
- unsigned long arg)
-{
- struct vfio_iommu_driver *driver;
- long ret = -ENODEV;
-
- down_write(&container->group_lock);
-
- /*
- * The container is designed to be an unprivileged interface while
- * the group can be assigned to specific users. Therefore, only by
- * adding a group to a container does the user get the privilege of
- * enabling the iommu, which may allocate finite resources. There
- * is no unset_iommu, but by removing all the groups from a container,
- * the container is deprivileged and returns to an unset state.
- */
- if (list_empty(&container->group_list) || container->iommu_driver) {
- up_write(&container->group_lock);
- return -EINVAL;
- }
-
- mutex_lock(&vfio.iommu_drivers_lock);
- list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
- void *data;
-
- if (!vfio_iommu_driver_allowed(container, driver))
- continue;
- if (!try_module_get(driver->ops->owner))
- continue;
-
- /*
- * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
- * so test which iommu driver reported support for this
- * extension and call open on them. We also pass them the
- * magic, allowing a single driver to support multiple
- * interfaces if they'd like.
- */
- if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
- module_put(driver->ops->owner);
- continue;
- }
-
- data = driver->ops->open(arg);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- module_put(driver->ops->owner);
- continue;
- }
-
- ret = __vfio_container_attach_groups(container, driver, data);
- if (ret) {
- driver->ops->release(data);
- module_put(driver->ops->owner);
- continue;
- }
-
- container->iommu_driver = driver;
- container->iommu_data = data;
- break;
- }
-
- mutex_unlock(&vfio.iommu_drivers_lock);
- up_write(&container->group_lock);
-
- return ret;
-}
-
-static long vfio_fops_unl_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
-{
- struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver;
- void *data;
- long ret = -EINVAL;
-
- if (!container)
- return ret;
-
- switch (cmd) {
- case VFIO_GET_API_VERSION:
- ret = VFIO_API_VERSION;
- break;
- case VFIO_CHECK_EXTENSION:
- ret = vfio_ioctl_check_extension(container, arg);
- break;
- case VFIO_SET_IOMMU:
- ret = vfio_ioctl_set_iommu(container, arg);
- break;
- default:
- driver = container->iommu_driver;
- data = container->iommu_data;
-
- if (driver) /* passthrough all unrecognized ioctls */
- ret = driver->ops->ioctl(data, cmd, arg);
- }
-
- return ret;
-}
-
-static int vfio_fops_open(struct inode *inode, struct file *filep)
-{
- struct vfio_container *container;
-
- container = kzalloc(sizeof(*container), GFP_KERNEL);
- if (!container)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&container->group_list);
- init_rwsem(&container->group_lock);
- kref_init(&container->kref);
-
- filep->private_data = container;
-
- return 0;
-}
-
-static int vfio_fops_release(struct inode *inode, struct file *filep)
-{
- struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver = container->iommu_driver;
-
- if (driver && driver->ops->notify)
- driver->ops->notify(container->iommu_data,
- VFIO_IOMMU_CONTAINER_CLOSE);
-
- filep->private_data = NULL;
-
- vfio_container_put(container);
-
- return 0;
-}
-
-static const struct file_operations vfio_fops = {
- .owner = THIS_MODULE,
- .open = vfio_fops_open,
- .release = vfio_fops_release,
- .unlocked_ioctl = vfio_fops_unl_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
-};
-
-/*
* VFIO Group fd, /dev/vfio/$GROUP
*/
-static void __vfio_group_unset_container(struct vfio_group *group)
-{
- struct vfio_container *container = group->container;
- struct vfio_iommu_driver *driver;
-
- lockdep_assert_held_write(&group->group_rwsem);
-
- down_write(&container->group_lock);
-
- driver = container->iommu_driver;
- if (driver)
- driver->ops->detach_group(container->iommu_data,
- group->iommu_group);
-
- if (group->type == VFIO_IOMMU)
- iommu_group_release_dma_owner(group->iommu_group);
-
- group->container = NULL;
- group->container_users = 0;
- list_del(&group->container_next);
-
- /* Detaching the last group deprivileges a container, remove iommu */
- if (driver && list_empty(&container->group_list)) {
- driver->ops->release(container->iommu_data);
- module_put(driver->ops->owner);
- container->iommu_driver = NULL;
- container->iommu_data = NULL;
- }
-
- up_write(&container->group_lock);
-
- vfio_container_put(container);
-}
-
/*
* VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
* if there was no container to unset. Since the ioctl is called on
* the group, we know that still exists, therefore the only valid
* transition here is 1->0.
*/
-static int vfio_group_unset_container(struct vfio_group *group)
+static int vfio_group_ioctl_unset_container(struct vfio_group *group)
{
- lockdep_assert_held_write(&group->group_rwsem);
+ int ret = 0;
- if (!group->container)
- return -EINVAL;
- if (group->container_users != 1)
- return -EBUSY;
- __vfio_group_unset_container(group);
- return 0;
+ mutex_lock(&group->group_lock);
+ if (!group->container) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (group->container_users != 1) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ vfio_group_detach_container(group);
+
+out_unlock:
+ mutex_unlock(&group->group_lock);
+ return ret;
}
-static int vfio_group_set_container(struct vfio_group *group, int container_fd)
+static int vfio_group_ioctl_set_container(struct vfio_group *group,
+ int __user *arg)
{
- struct fd f;
struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret = 0;
-
- lockdep_assert_held_write(&group->group_rwsem);
-
- if (group->container || WARN_ON(group->container_users))
- return -EINVAL;
+ struct fd f;
+ int ret;
+ int fd;
- if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
- return -EPERM;
+ if (get_user(fd, arg))
+ return -EFAULT;
- f = fdget(container_fd);
+ f = fdget(fd);
if (!f.file)
return -EBADF;
- /* Sanity check, is this really our fd? */
- if (f.file->f_op != &vfio_fops) {
- fdput(f);
- return -EINVAL;
+ mutex_lock(&group->group_lock);
+ if (group->container || WARN_ON(group->container_users)) {
+ ret = -EINVAL;
+ goto out_unlock;
}
-
- container = f.file->private_data;
- WARN_ON(!container); /* fget ensures we don't race vfio_release */
-
- down_write(&container->group_lock);
-
- /* Real groups and fake groups cannot mix */
- if (!list_empty(&container->group_list) &&
- container->noiommu != (group->type == VFIO_NO_IOMMU)) {
- ret = -EPERM;
- goto unlock_out;
- }
-
- if (group->type == VFIO_IOMMU) {
- ret = iommu_group_claim_dma_owner(group->iommu_group, f.file);
- if (ret)
- goto unlock_out;
+ if (!group->iommu_group) {
+ ret = -ENODEV;
+ goto out_unlock;
}
- driver = container->iommu_driver;
- if (driver) {
- ret = driver->ops->attach_group(container->iommu_data,
- group->iommu_group,
- group->type);
- if (ret) {
- if (group->type == VFIO_IOMMU)
- iommu_group_release_dma_owner(
- group->iommu_group);
- goto unlock_out;
- }
+ container = vfio_container_from_file(f.file);
+ ret = -EINVAL;
+ if (container) {
+ ret = vfio_container_attach_group(container, group);
+ goto out_unlock;
}
- group->container = container;
- group->container_users = 1;
- container->noiommu = (group->type == VFIO_NO_IOMMU);
- list_add(&group->container_next, &container->group_list);
-
- /* Get a reference on the container and mark a user within the group */
- vfio_container_get(container);
-
-unlock_out:
- up_write(&container->group_lock);
+out_unlock:
+ mutex_unlock(&group->group_lock);
fdput(f);
return ret;
}
@@ -1053,47 +729,19 @@ unlock_out:
static const struct file_operations vfio_device_fops;
/* true if the vfio_device has open_device() called but not close_device() */
-static bool vfio_assert_device_open(struct vfio_device *device)
+bool vfio_assert_device_open(struct vfio_device *device)
{
return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
}
-static int vfio_device_assign_container(struct vfio_device *device)
-{
- struct vfio_group *group = device->group;
-
- lockdep_assert_held_write(&group->group_rwsem);
-
- if (!group->container || !group->container->iommu_driver ||
- WARN_ON(!group->container_users))
- return -EINVAL;
-
- if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- get_file(group->opened_file);
- group->container_users++;
- return 0;
-}
-
-static void vfio_device_unassign_container(struct vfio_device *device)
-{
- down_write(&device->group->group_rwsem);
- WARN_ON(device->group->container_users <= 1);
- device->group->container_users--;
- fput(device->group->opened_file);
- up_write(&device->group->group_rwsem);
-}
-
static struct file *vfio_device_open(struct vfio_device *device)
{
- struct vfio_iommu_driver *iommu_driver;
struct file *filep;
int ret;
- down_write(&device->group->group_rwsem);
+ mutex_lock(&device->group->group_lock);
ret = vfio_device_assign_container(device);
- up_write(&device->group->group_rwsem);
+ mutex_unlock(&device->group->group_lock);
if (ret)
return ERR_PTR(ret);
@@ -1110,7 +758,7 @@ static struct file *vfio_device_open(struct vfio_device *device)
* lock. If the device driver will use it, it must obtain a
* reference and release it during close_device.
*/
- down_read(&device->group->group_rwsem);
+ mutex_lock(&device->group->group_lock);
device->kvm = device->group->kvm;
if (device->ops->open_device) {
@@ -1118,13 +766,8 @@ static struct file *vfio_device_open(struct vfio_device *device)
if (ret)
goto err_undo_count;
}
-
- iommu_driver = device->group->container->iommu_driver;
- if (iommu_driver && iommu_driver->ops->register_device)
- iommu_driver->ops->register_device(
- device->group->container->iommu_data, device);
-
- up_read(&device->group->group_rwsem);
+ vfio_device_container_register(device);
+ mutex_unlock(&device->group->group_lock);
}
mutex_unlock(&device->dev_set->lock);
@@ -1157,17 +800,14 @@ static struct file *vfio_device_open(struct vfio_device *device)
err_close_device:
mutex_lock(&device->dev_set->lock);
- down_read(&device->group->group_rwsem);
+ mutex_lock(&device->group->group_lock);
if (device->open_count == 1 && device->ops->close_device) {
device->ops->close_device(device);
- iommu_driver = device->group->container->iommu_driver;
- if (iommu_driver && iommu_driver->ops->unregister_device)
- iommu_driver->ops->unregister_device(
- device->group->container->iommu_data, device);
+ vfio_device_container_unregister(device);
}
err_undo_count:
- up_read(&device->group->group_rwsem);
+ mutex_unlock(&device->group->group_lock);
device->open_count--;
if (device->open_count == 0 && device->kvm)
device->kvm = NULL;
@@ -1178,14 +818,21 @@ err_unassign_container:
return ERR_PTR(ret);
}
-static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
+static int vfio_group_ioctl_get_device_fd(struct vfio_group *group,
+ char __user *arg)
{
struct vfio_device *device;
struct file *filep;
+ char *buf;
int fdno;
int ret;
+ buf = strndup_user(arg, PAGE_SIZE);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
device = vfio_device_get_from_name(group, buf);
+ kfree(buf);
if (IS_ERR(device))
return PTR_ERR(device);
@@ -1207,81 +854,60 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
err_put_fdno:
put_unused_fd(fdno);
err_put_device:
- vfio_device_put(device);
+ vfio_device_put_registration(device);
return ret;
}
-static long vfio_group_fops_unl_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
+static int vfio_group_ioctl_get_status(struct vfio_group *group,
+ struct vfio_group_status __user *arg)
{
- struct vfio_group *group = filep->private_data;
- long ret = -ENOTTY;
+ unsigned long minsz = offsetofend(struct vfio_group_status, flags);
+ struct vfio_group_status status;
- switch (cmd) {
- case VFIO_GROUP_GET_STATUS:
- {
- struct vfio_group_status status;
- unsigned long minsz;
+ if (copy_from_user(&status, arg, minsz))
+ return -EFAULT;
- minsz = offsetofend(struct vfio_group_status, flags);
+ if (status.argsz < minsz)
+ return -EINVAL;
- if (copy_from_user(&status, (void __user *)arg, minsz))
- return -EFAULT;
+ status.flags = 0;
- if (status.argsz < minsz)
- return -EINVAL;
+ mutex_lock(&group->group_lock);
+ if (!group->iommu_group) {
+ mutex_unlock(&group->group_lock);
+ return -ENODEV;
+ }
- status.flags = 0;
+ if (group->container)
+ status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET |
+ VFIO_GROUP_FLAGS_VIABLE;
+ else if (!iommu_group_dma_owner_claimed(group->iommu_group))
+ status.flags |= VFIO_GROUP_FLAGS_VIABLE;
+ mutex_unlock(&group->group_lock);
- down_read(&group->group_rwsem);
- if (group->container)
- status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET |
- VFIO_GROUP_FLAGS_VIABLE;
- else if (!iommu_group_dma_owner_claimed(group->iommu_group))
- status.flags |= VFIO_GROUP_FLAGS_VIABLE;
- up_read(&group->group_rwsem);
+ if (copy_to_user(arg, &status, minsz))
+ return -EFAULT;
+ return 0;
+}
- if (copy_to_user((void __user *)arg, &status, minsz))
- return -EFAULT;
+static long vfio_group_fops_unl_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_group *group = filep->private_data;
+ void __user *uarg = (void __user *)arg;
- ret = 0;
- break;
- }
+ switch (cmd) {
+ case VFIO_GROUP_GET_DEVICE_FD:
+ return vfio_group_ioctl_get_device_fd(group, uarg);
+ case VFIO_GROUP_GET_STATUS:
+ return vfio_group_ioctl_get_status(group, uarg);
case VFIO_GROUP_SET_CONTAINER:
- {
- int fd;
-
- if (get_user(fd, (int __user *)arg))
- return -EFAULT;
-
- if (fd < 0)
- return -EINVAL;
-
- down_write(&group->group_rwsem);
- ret = vfio_group_set_container(group, fd);
- up_write(&group->group_rwsem);
- break;
- }
+ return vfio_group_ioctl_set_container(group, uarg);
case VFIO_GROUP_UNSET_CONTAINER:
- down_write(&group->group_rwsem);
- ret = vfio_group_unset_container(group);
- up_write(&group->group_rwsem);
- break;
- case VFIO_GROUP_GET_DEVICE_FD:
- {
- char *buf;
-
- buf = strndup_user((const char __user *)arg, PAGE_SIZE);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = vfio_group_get_device_fd(group, buf);
- kfree(buf);
- break;
- }
+ return vfio_group_ioctl_unset_container(group);
+ default:
+ return -ENOTTY;
}
-
- return ret;
}
static int vfio_group_fops_open(struct inode *inode, struct file *filep)
@@ -1290,17 +916,20 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
container_of(inode->i_cdev, struct vfio_group, cdev);
int ret;
- down_write(&group->group_rwsem);
+ mutex_lock(&group->group_lock);
- /* users can be zero if this races with vfio_group_put() */
- if (!refcount_inc_not_zero(&group->users)) {
+ /*
+ * drivers can be zero if this races with vfio_device_remove_group(), it
+ * will be stable at 0 under the group rwsem
+ */
+ if (refcount_read(&group->drivers) == 0) {
ret = -ENODEV;
- goto err_unlock;
+ goto out_unlock;
}
if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) {
ret = -EPERM;
- goto err_put;
+ goto out_unlock;
}
/*
@@ -1308,17 +937,13 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
*/
if (group->opened_file) {
ret = -EBUSY;
- goto err_put;
+ goto out_unlock;
}
group->opened_file = filep;
filep->private_data = group;
-
- up_write(&group->group_rwsem);
- return 0;
-err_put:
- vfio_group_put(group);
-err_unlock:
- up_write(&group->group_rwsem);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&group->group_lock);
return ret;
}
@@ -1328,21 +953,16 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
filep->private_data = NULL;
- down_write(&group->group_rwsem);
+ mutex_lock(&group->group_lock);
/*
* Device FDs hold a group file reference, therefore the group release
* is only called when there are no open devices.
*/
WARN_ON(group->notifier.head);
- if (group->container) {
- WARN_ON(group->container_users != 1);
- __vfio_group_unset_container(group);
- }
+ if (group->container)
+ vfio_group_detach_container(group);
group->opened_file = NULL;
- up_write(&group->group_rwsem);
-
- vfio_group_put(group);
-
+ mutex_unlock(&group->group_lock);
return 0;
}
@@ -1355,24 +975,53 @@ static const struct file_operations vfio_group_fops = {
};
/*
+ * Wrapper around pm_runtime_resume_and_get().
+ * Return error code on failure or 0 on success.
+ */
+static inline int vfio_device_pm_runtime_get(struct vfio_device *device)
+{
+ struct device *dev = device->dev;
+
+ if (dev->driver && dev->driver->pm) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_info_ratelimited(dev,
+ "vfio: runtime resume failed %d\n", ret);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Wrapper around pm_runtime_put().
+ */
+static inline void vfio_device_pm_runtime_put(struct vfio_device *device)
+{
+ struct device *dev = device->dev;
+
+ if (dev->driver && dev->driver->pm)
+ pm_runtime_put(dev);
+}
+
+/*
* VFIO Device fd
*/
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_device *device = filep->private_data;
- struct vfio_iommu_driver *iommu_driver;
mutex_lock(&device->dev_set->lock);
vfio_assert_device_open(device);
- down_read(&device->group->group_rwsem);
+ mutex_lock(&device->group->group_lock);
if (device->open_count == 1 && device->ops->close_device)
device->ops->close_device(device);
- iommu_driver = device->group->container->iommu_driver;
- if (iommu_driver && iommu_driver->ops->unregister_device)
- iommu_driver->ops->unregister_device(
- device->group->container->iommu_data, device);
- up_read(&device->group->group_rwsem);
+ vfio_device_container_unregister(device);
+ mutex_unlock(&device->group->group_lock);
device->open_count--;
if (device->open_count == 0)
device->kvm = NULL;
@@ -1382,7 +1031,7 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
vfio_device_unassign_container(device);
- vfio_device_put(device);
+ vfio_device_put_registration(device);
return 0;
}
@@ -1628,6 +1277,167 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
return 0;
}
+/* Ranges should fit into a single kernel page */
+#define LOG_MAX_RANGES \
+ (PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range))
+
+static int
+vfio_ioctl_device_feature_logging_start(struct vfio_device *device,
+ u32 flags, void __user *arg,
+ size_t argsz)
+{
+ size_t minsz =
+ offsetofend(struct vfio_device_feature_dma_logging_control,
+ ranges);
+ struct vfio_device_feature_dma_logging_range __user *ranges;
+ struct vfio_device_feature_dma_logging_control control;
+ struct vfio_device_feature_dma_logging_range range;
+ struct rb_root_cached root = RB_ROOT_CACHED;
+ struct interval_tree_node *nodes;
+ u64 iova_end;
+ u32 nnodes;
+ int i, ret;
+
+ if (!device->log_ops)
+ return -ENOTTY;
+
+ ret = vfio_check_feature(flags, argsz,
+ VFIO_DEVICE_FEATURE_SET,
+ sizeof(control));
+ if (ret != 1)
+ return ret;
+
+ if (copy_from_user(&control, arg, minsz))
+ return -EFAULT;
+
+ nnodes = control.num_ranges;
+ if (!nnodes)
+ return -EINVAL;
+
+ if (nnodes > LOG_MAX_RANGES)
+ return -E2BIG;
+
+ ranges = u64_to_user_ptr(control.ranges);
+ nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
+ GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ for (i = 0; i < nnodes; i++) {
+ if (copy_from_user(&range, &ranges[i], sizeof(range))) {
+ ret = -EFAULT;
+ goto end;
+ }
+ if (!IS_ALIGNED(range.iova, control.page_size) ||
+ !IS_ALIGNED(range.length, control.page_size)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (check_add_overflow(range.iova, range.length, &iova_end) ||
+ iova_end > ULONG_MAX) {
+ ret = -EOVERFLOW;
+ goto end;
+ }
+
+ nodes[i].start = range.iova;
+ nodes[i].last = range.iova + range.length - 1;
+ if (interval_tree_iter_first(&root, nodes[i].start,
+ nodes[i].last)) {
+ /* Range overlapping */
+ ret = -EINVAL;
+ goto end;
+ }
+ interval_tree_insert(nodes + i, &root);
+ }
+
+ ret = device->log_ops->log_start(device, &root, nnodes,
+ &control.page_size);
+ if (ret)
+ goto end;
+
+ if (copy_to_user(arg, &control, sizeof(control))) {
+ ret = -EFAULT;
+ device->log_ops->log_stop(device);
+ }
+
+end:
+ kfree(nodes);
+ return ret;
+}
+
+static int
+vfio_ioctl_device_feature_logging_stop(struct vfio_device *device,
+ u32 flags, void __user *arg,
+ size_t argsz)
+{
+ int ret;
+
+ if (!device->log_ops)
+ return -ENOTTY;
+
+ ret = vfio_check_feature(flags, argsz,
+ VFIO_DEVICE_FEATURE_SET, 0);
+ if (ret != 1)
+ return ret;
+
+ return device->log_ops->log_stop(device);
+}
+
+static int vfio_device_log_read_and_clear(struct iova_bitmap *iter,
+ unsigned long iova, size_t length,
+ void *opaque)
+{
+ struct vfio_device *device = opaque;
+
+ return device->log_ops->log_read_and_clear(device, iova, length, iter);
+}
+
+static int
+vfio_ioctl_device_feature_logging_report(struct vfio_device *device,
+ u32 flags, void __user *arg,
+ size_t argsz)
+{
+ size_t minsz =
+ offsetofend(struct vfio_device_feature_dma_logging_report,
+ bitmap);
+ struct vfio_device_feature_dma_logging_report report;
+ struct iova_bitmap *iter;
+ u64 iova_end;
+ int ret;
+
+ if (!device->log_ops)
+ return -ENOTTY;
+
+ ret = vfio_check_feature(flags, argsz,
+ VFIO_DEVICE_FEATURE_GET,
+ sizeof(report));
+ if (ret != 1)
+ return ret;
+
+ if (copy_from_user(&report, arg, minsz))
+ return -EFAULT;
+
+ if (report.page_size < SZ_4K || !is_power_of_2(report.page_size))
+ return -EINVAL;
+
+ if (check_add_overflow(report.iova, report.length, &iova_end) ||
+ iova_end > ULONG_MAX)
+ return -EOVERFLOW;
+
+ iter = iova_bitmap_alloc(report.iova, report.length,
+ report.page_size,
+ u64_to_user_ptr(report.bitmap));
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = iova_bitmap_for_each(iter, device,
+ vfio_device_log_read_and_clear);
+
+ iova_bitmap_free(iter);
+ return ret;
+}
+
static int vfio_ioctl_device_feature(struct vfio_device *device,
struct vfio_device_feature __user *arg)
{
@@ -1661,6 +1471,18 @@ static int vfio_ioctl_device_feature(struct vfio_device *device,
return vfio_ioctl_device_feature_mig_device_state(
device, feature.flags, arg->data,
feature.argsz - minsz);
+ case VFIO_DEVICE_FEATURE_DMA_LOGGING_START:
+ return vfio_ioctl_device_feature_logging_start(
+ device, feature.flags, arg->data,
+ feature.argsz - minsz);
+ case VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP:
+ return vfio_ioctl_device_feature_logging_stop(
+ device, feature.flags, arg->data,
+ feature.argsz - minsz);
+ case VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT:
+ return vfio_ioctl_device_feature_logging_report(
+ device, feature.flags, arg->data,
+ feature.argsz - minsz);
default:
if (unlikely(!device->ops->device_feature))
return -EINVAL;
@@ -1674,15 +1496,27 @@ static long vfio_device_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
struct vfio_device *device = filep->private_data;
+ int ret;
+
+ ret = vfio_device_pm_runtime_get(device);
+ if (ret)
+ return ret;
switch (cmd) {
case VFIO_DEVICE_FEATURE:
- return vfio_ioctl_device_feature(device, (void __user *)arg);
+ ret = vfio_ioctl_device_feature(device, (void __user *)arg);
+ break;
+
default:
if (unlikely(!device->ops->ioctl))
- return -EINVAL;
- return device->ops->ioctl(device, cmd, arg);
+ ret = -EINVAL;
+ else
+ ret = device->ops->ioctl(device, cmd, arg);
+ break;
}
+
+ vfio_device_pm_runtime_put(device);
+ return ret;
}
static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
@@ -1732,19 +1566,42 @@ static const struct file_operations vfio_device_fops = {
* vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
* @file: VFIO group file
*
- * The returned iommu_group is valid as long as a ref is held on the file.
+ * The returned iommu_group is valid as long as a ref is held on the file. This
+ * returns a reference on the group. This function is deprecated, only the SPAPR
+ * path in kvm should call it.
*/
struct iommu_group *vfio_file_iommu_group(struct file *file)
{
struct vfio_group *group = file->private_data;
+ struct iommu_group *iommu_group = NULL;
- if (file->f_op != &vfio_group_fops)
+ if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU))
return NULL;
- return group->iommu_group;
+
+ if (!vfio_file_is_group(file))
+ return NULL;
+
+ mutex_lock(&group->group_lock);
+ if (group->iommu_group) {
+ iommu_group = group->iommu_group;
+ iommu_group_ref_get(iommu_group);
+ }
+ mutex_unlock(&group->group_lock);
+ return iommu_group;
}
EXPORT_SYMBOL_GPL(vfio_file_iommu_group);
/**
+ * vfio_file_is_group - True if the file is usable with VFIO aPIS
+ * @file: VFIO group file
+ */
+bool vfio_file_is_group(struct file *file)
+{
+ return file->f_op == &vfio_group_fops;
+}
+EXPORT_SYMBOL_GPL(vfio_file_is_group);
+
+/**
* vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
* is always CPU cache coherent
* @file: VFIO group file
@@ -1758,13 +1615,13 @@ bool vfio_file_enforced_coherent(struct file *file)
struct vfio_group *group = file->private_data;
bool ret;
- if (file->f_op != &vfio_group_fops)
+ if (!vfio_file_is_group(file))
return true;
- down_read(&group->group_rwsem);
+ mutex_lock(&group->group_lock);
if (group->container) {
- ret = vfio_ioctl_check_extension(group->container,
- VFIO_DMA_CC_IOMMU);
+ ret = vfio_container_ioctl_check_extension(group->container,
+ VFIO_DMA_CC_IOMMU);
} else {
/*
* Since the coherency state is determined only once a container
@@ -1773,7 +1630,7 @@ bool vfio_file_enforced_coherent(struct file *file)
*/
ret = true;
}
- up_read(&group->group_rwsem);
+ mutex_unlock(&group->group_lock);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
@@ -1790,12 +1647,12 @@ void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
{
struct vfio_group *group = file->private_data;
- if (file->f_op != &vfio_group_fops)
+ if (!vfio_file_is_group(file))
return;
- down_write(&group->group_rwsem);
+ mutex_lock(&group->group_lock);
group->kvm = kvm;
- up_write(&group->group_rwsem);
+ mutex_unlock(&group->group_lock);
}
EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
@@ -1810,7 +1667,7 @@ bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
{
struct vfio_group *group = file->private_data;
- if (file->f_op != &vfio_group_fops)
+ if (!vfio_file_is_group(file))
return false;
return group == device->group;
@@ -1937,114 +1794,6 @@ int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
/*
- * Pin contiguous user pages and return their associated host pages for local
- * domain only.
- * @device [in] : device
- * @iova [in] : starting IOVA of user pages to be pinned.
- * @npage [in] : count of pages to be pinned. This count should not
- * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- * @prot [in] : protection flags
- * @pages[out] : array of host pages
- * Return error or number of pages pinned.
- */
-int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
- int npage, int prot, struct page **pages)
-{
- struct vfio_container *container;
- struct vfio_group *group = device->group;
- struct vfio_iommu_driver *driver;
- int ret;
-
- if (!pages || !npage || !vfio_assert_device_open(device))
- return -EINVAL;
-
- if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
- return -E2BIG;
-
- if (group->dev_counter > 1)
- return -EINVAL;
-
- /* group->container cannot change while a vfio device is open */
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->pin_pages))
- ret = driver->ops->pin_pages(container->iommu_data,
- group->iommu_group, iova,
- npage, prot, pages);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-EXPORT_SYMBOL(vfio_pin_pages);
-
-/*
- * Unpin contiguous host pages for local domain only.
- * @device [in] : device
- * @iova [in] : starting address of user pages to be unpinned.
- * @npage [in] : count of pages to be unpinned. This count should not
- * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- */
-void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
-
- if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
- return;
-
- if (WARN_ON(!vfio_assert_device_open(device)))
- return;
-
- /* group->container cannot change while a vfio device is open */
- container = device->group->container;
- driver = container->iommu_driver;
-
- driver->ops->unpin_pages(container->iommu_data, iova, npage);
-}
-EXPORT_SYMBOL(vfio_unpin_pages);
-
-/*
- * This interface allows the CPUs to perform some sort of virtual DMA on
- * behalf of the device.
- *
- * CPUs read/write from/into a range of IOVAs pointing to user space memory
- * into/from a kernel buffer.
- *
- * As the read/write of user space memory is conducted via the CPUs and is
- * not a real device DMA, it is not necessary to pin the user space memory.
- *
- * @device [in] : VFIO device
- * @iova [in] : base IOVA of a user space buffer
- * @data [in] : pointer to kernel buffer
- * @len [in] : kernel buffer length
- * @write : indicate read or write
- * Return error code on failure or 0 on success.
- */
-int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
- size_t len, bool write)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret = 0;
-
- if (!data || len <= 0 || !vfio_assert_device_open(device))
- return -EINVAL;
-
- /* group->container cannot change while a vfio device is open */
- container = device->group->container;
- driver = container->iommu_driver;
-
- if (likely(driver && driver->ops->dma_rw))
- ret = driver->ops->dma_rw(container->iommu_data,
- iova, data, len, write);
- else
- ret = -ENOTTY;
- return ret;
-}
-EXPORT_SYMBOL(vfio_dma_rw);
-
-/*
* Module/class support
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
@@ -2052,59 +1801,50 @@ static char *vfio_devnode(struct device *dev, umode_t *mode)
return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
}
-static struct miscdevice vfio_dev = {
- .minor = VFIO_MINOR,
- .name = "vfio",
- .fops = &vfio_fops,
- .nodename = "vfio/vfio",
- .mode = S_IRUGO | S_IWUGO,
-};
-
static int __init vfio_init(void)
{
int ret;
ida_init(&vfio.group_ida);
+ ida_init(&vfio.device_ida);
mutex_init(&vfio.group_lock);
- mutex_init(&vfio.iommu_drivers_lock);
INIT_LIST_HEAD(&vfio.group_list);
- INIT_LIST_HEAD(&vfio.iommu_drivers_list);
- ret = misc_register(&vfio_dev);
- if (ret) {
- pr_err("vfio: misc device register failed\n");
+ ret = vfio_container_init();
+ if (ret)
return ret;
- }
/* /dev/vfio/$GROUP */
vfio.class = class_create(THIS_MODULE, "vfio");
if (IS_ERR(vfio.class)) {
ret = PTR_ERR(vfio.class);
- goto err_class;
+ goto err_group_class;
}
vfio.class->devnode = vfio_devnode;
+ /* /sys/class/vfio-dev/vfioX */
+ vfio.device_class = class_create(THIS_MODULE, "vfio-dev");
+ if (IS_ERR(vfio.device_class)) {
+ ret = PTR_ERR(vfio.device_class);
+ goto err_dev_class;
+ }
+
ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
if (ret)
goto err_alloc_chrdev;
-#ifdef CONFIG_VFIO_NOIOMMU
- ret = vfio_register_iommu_driver(&vfio_noiommu_ops);
-#endif
- if (ret)
- goto err_driver_register;
-
pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return 0;
-err_driver_register:
- unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
err_alloc_chrdev:
+ class_destroy(vfio.device_class);
+ vfio.device_class = NULL;
+err_dev_class:
class_destroy(vfio.class);
vfio.class = NULL;
-err_class:
- misc_deregister(&vfio_dev);
+err_group_class:
+ vfio_container_cleanup();
return ret;
}
@@ -2112,14 +1852,14 @@ static void __exit vfio_cleanup(void)
{
WARN_ON(!list_empty(&vfio.group_list));
-#ifdef CONFIG_VFIO_NOIOMMU
- vfio_unregister_iommu_driver(&vfio_noiommu_ops);
-#endif
+ ida_destroy(&vfio.device_ida);
ida_destroy(&vfio.group_ida);
unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
+ class_destroy(vfio.device_class);
+ vfio.device_class = NULL;
class_destroy(vfio.class);
+ vfio_container_cleanup();
vfio.class = NULL;
- misc_deregister(&vfio_dev);
xa_destroy(&vfio_device_set_xa);
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index d7a04d573988..20265393aee7 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1782,7 +1782,7 @@ static struct miscdevice vhost_net_misc = {
.fops = &vhost_net_fops,
};
-static int vhost_net_init(void)
+static int __init vhost_net_init(void)
{
if (experimental_zcopytx)
vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
@@ -1790,7 +1790,7 @@ static int vhost_net_init(void)
}
module_init(vhost_net_init);
-static void vhost_net_exit(void)
+static void __exit vhost_net_exit(void)
{
misc_deregister(&vhost_net_misc);
}
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index a317d9fe1d67..5f8fec9e5fd4 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -318,14 +318,6 @@ struct dac_info
void *data;
};
-
-static inline u8 dac_read_reg(struct dac_info *info, u8 reg)
-{
- u8 code[2] = {reg, 0};
- info->dac_read_regs(info->data, code, 1);
- return code[1];
-}
-
static inline void dac_read_regs(struct dac_info *info, u8 *code, int count)
{
info->dac_read_regs(info->data, code, count);
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index aba46118b208..6bbcd9fc864e 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -108,13 +108,6 @@ static inline int PAR_EQUAL(struct fb_par_control *x, struct fb_par_control *y)
return (!DIRTY(cmode) && !DIRTY(xres) && !DIRTY(yres)
&& !DIRTY(vxres) && !DIRTY(vyres));
}
-static inline int VAR_MATCH(struct fb_var_screeninfo *x, struct fb_var_screeninfo *y)
-{
- return (!DIRTY(bits_per_pixel) && !DIRTY(xres)
- && !DIRTY(yres) && !DIRTY(xres_virtual)
- && !DIRTY(yres_virtual)
- && !DIRTY_CMAP(red) && !DIRTY_CMAP(green) && !DIRTY_CMAP(blue));
-}
struct fb_info_control {
struct fb_info info;
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 6b4d5a7f3e15..1582c718329c 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1072,17 +1072,12 @@ static ssize_t gbefb_show_rev(struct device *device, struct device_attribute *at
static DEVICE_ATTR(revision, S_IRUGO, gbefb_show_rev, NULL);
-static void gbefb_remove_sysfs(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_size);
- device_remove_file(dev, &dev_attr_revision);
-}
-
-static void gbefb_create_sysfs(struct device *dev)
-{
- device_create_file(dev, &dev_attr_size);
- device_create_file(dev, &dev_attr_revision);
-}
+static struct attribute *gbefb_attrs[] = {
+ &dev_attr_size.attr,
+ &dev_attr_revision.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gbefb);
/*
* Initialization
@@ -1221,7 +1216,6 @@ static int gbefb_probe(struct platform_device *p_dev)
}
platform_set_drvdata(p_dev, info);
- gbefb_create_sysfs(&p_dev->dev);
fb_info(info, "%s rev %d @ 0x%08x using %dkB memory\n",
info->fix.id, gbe_revision, (unsigned)GBE_BASE,
@@ -1248,7 +1242,6 @@ static int gbefb_remove(struct platform_device* p_dev)
gbe_turn_off();
arch_phys_wc_del(par->wc_cookie);
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
- gbefb_remove_sysfs(&p_dev->dev);
framebuffer_release(info);
return 0;
@@ -1259,6 +1252,7 @@ static struct platform_driver gbefb_driver = {
.remove = gbefb_remove,
.driver = {
.name = "gbefb",
+ .dev_groups = gbefb_groups,
},
};
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 94f3bc637fc8..51fde1b2a793 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -972,7 +972,6 @@ static int imxfb_probe(struct platform_device *pdev)
fbi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fbi->regs)) {
- dev_err(&pdev->dev, "Cannot map frame buffer registers\n");
ret = PTR_ERR(fbi->regs);
goto failed_ioremap;
}
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 96800c9c9cd9..90c79e8c1157 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -693,7 +693,7 @@ static int of_platform_mb862xx_probe(struct platform_device *ofdev)
par->dev = dev;
par->irq = irq_of_parse_and_map(np, 0);
- if (par->irq == NO_IRQ) {
+ if (!par->irq) {
dev_err(dev, "failed to map irq\n");
ret = -ENODEV;
goto fbrel;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index b2d6e6df2161..92fb6b7e1f68 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -519,11 +519,9 @@ int dispc_runtime_get(void)
DSSDBG("dispc_runtime_get\n");
- r = pm_runtime_get_sync(&dispc.pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&dispc.pdev->dev);
+ r = pm_runtime_resume_and_get(&dispc.pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
EXPORT_SYMBOL(dispc_runtime_get);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index d43b081d592f..54b0f034c2ed 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -1136,11 +1136,9 @@ static int dsi_runtime_get(struct platform_device *dsidev)
DSSDBG("dsi_runtime_get\n");
- r = pm_runtime_get_sync(&dsi->pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&dsi->pdev->dev);
+ r = pm_runtime_resume_and_get(&dsi->pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
index 45b9d3cf3860..335e0af4eec1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
@@ -767,11 +767,9 @@ int dss_runtime_get(void)
DSSDBG("dss_runtime_get\n");
- r = pm_runtime_get_sync(&dss.pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&dss.pdev->dev);
+ r = pm_runtime_resume_and_get(&dss.pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 800bd108e834..0f39612e002e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -38,11 +38,9 @@ static int hdmi_runtime_get(void)
DSSDBG("hdmi_runtime_get\n");
- r = pm_runtime_get_sync(&hdmi.pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&hdmi.pdev->dev);
+ r = pm_runtime_resume_and_get(&hdmi.pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index 2c03608addcd..bfccc2cb917a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -42,11 +42,9 @@ static int hdmi_runtime_get(void)
DSSDBG("hdmi_runtime_get\n");
- r = pm_runtime_get_sync(&hdmi.pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&hdmi.pdev->dev);
+ r = pm_runtime_resume_and_get(&hdmi.pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index 905d642ff9ed..78a7309d25dd 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -347,11 +347,9 @@ static int venc_runtime_get(void)
DSSDBG("venc_runtime_get\n");
- r = pm_runtime_get_sync(&venc.pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&venc.pdev->dev);
+ r = pm_runtime_resume_and_get(&venc.pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index d7aa5511c361..e65bdc499c23 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -137,6 +137,8 @@ static int ufx_submit_urb(struct ufx_data *dev, struct urb * urb, size_t len);
static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size);
static void ufx_free_urb_list(struct ufx_data *dev);
+static DEFINE_MUTEX(disconnect_mutex);
+
/* reads a control register */
static int ufx_reg_read(struct ufx_data *dev, u32 index, u32 *data)
{
@@ -1071,9 +1073,13 @@ static int ufx_ops_open(struct fb_info *info, int user)
if (user == 0 && !console)
return -EBUSY;
+ mutex_lock(&disconnect_mutex);
+
/* If the USB device is gone, we don't accept new opens */
- if (dev->virtualized)
+ if (dev->virtualized) {
+ mutex_unlock(&disconnect_mutex);
return -ENODEV;
+ }
dev->fb_count++;
@@ -1097,6 +1103,8 @@ static int ufx_ops_open(struct fb_info *info, int user)
pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d",
info->node, user, info, dev->fb_count);
+ mutex_unlock(&disconnect_mutex);
+
return 0;
}
@@ -1741,6 +1749,8 @@ static void ufx_usb_disconnect(struct usb_interface *interface)
{
struct ufx_data *dev;
+ mutex_lock(&disconnect_mutex);
+
dev = usb_get_intfdata(interface);
pr_debug("USB disconnect starting\n");
@@ -1761,6 +1771,8 @@ static void ufx_usb_disconnect(struct usb_interface *interface)
kref_put(&dev->kref, ufx_free);
/* consider ufx_data freed */
+
+ mutex_unlock(&disconnect_mutex);
}
static struct usb_driver ufx_driver = {
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index f9c3b1d38fc2..219ce7292337 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -1128,11 +1128,6 @@ static inline void shadowmode_on(struct tridentfb_par *par)
write3CE(par, CyberControl, read3CE(par, CyberControl) | 0x81);
}
-static inline void shadowmode_off(struct tridentfb_par *par)
-{
- write3CE(par, CyberControl, read3CE(par, CyberControl) & 0x7E);
-}
-
/* Set the hardware to the requested video mode */
static int tridentfb_set_par(struct fb_info *info)
{
@@ -1475,7 +1470,7 @@ static int trident_pci_probe(struct pci_dev *dev,
if (err)
return err;
- err = pci_enable_device(dev);
+ err = pcim_enable_device(dev);
if (err)
return err;
@@ -1715,12 +1710,10 @@ out_unmap2:
kfree(info->pixmap.addr);
if (info->screen_base)
iounmap(info->screen_base);
- release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
disable_mmio(info->par);
out_unmap1:
if (default_par->io_virt)
iounmap(default_par->io_virt);
- release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
framebuffer_release(info);
return err;
}
@@ -1735,8 +1728,6 @@ static void trident_pci_remove(struct pci_dev *dev)
i2c_del_adapter(&par->ddc_adapter);
iounmap(par->io_virt);
iounmap(info->screen_base);
- release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
- release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
kfree(info->pixmap.addr);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index c863244ef12c..216d49c9d47e 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -370,7 +370,7 @@ static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
const unsigned long *back = (const unsigned long *) bback;
const unsigned long *front = (const unsigned long *) *bfront;
const int width = *width_bytes / sizeof(unsigned long);
- int identical = width;
+ int identical;
int start = width;
int end = width;
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 4df6772802d7..fd5d701106e1 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1580,7 +1580,7 @@ static ssize_t uvesafb_show_vendor(struct device *dev,
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_vendor_name_ptr)
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_vendor_name_ptr);
else
return 0;
@@ -1595,7 +1595,7 @@ static ssize_t uvesafb_show_product_name(struct device *dev,
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_name_ptr)
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_product_name_ptr);
else
return 0;
@@ -1610,7 +1610,7 @@ static ssize_t uvesafb_show_product_rev(struct device *dev,
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_rev_ptr)
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_product_rev_ptr);
else
return 0;
@@ -1625,7 +1625,7 @@ static ssize_t uvesafb_show_oem_string(struct device *dev,
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_string_ptr)
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
(char *)(&par->vbe_ib) + par->vbe_ib.oem_string_ptr);
else
return 0;
@@ -1639,7 +1639,7 @@ static ssize_t uvesafb_show_nocrtc(struct device *dev,
struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
- return snprintf(buf, PAGE_SIZE, "%d\n", par->nocrtc);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", par->nocrtc);
}
static ssize_t uvesafb_store_nocrtc(struct device *dev,
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 35cf51ae3292..af47f8217095 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1421,6 +1421,7 @@ static const struct platform_device_id vga16fb_driver_id_table[] = {
{"vga-framebuffer", 0},
{ }
};
+MODULE_DEVICE_TABLE(platform, vga16fb_driver_id_table);
static struct platform_driver vga16fb_driver = {
.probe = vga16fb_probe,
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index ad258a9d3b9f..a6c86f916dbd 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -409,6 +409,9 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
if (!err)
return 0;
+ /* Is there an interrupt? If not give up. */
+ if (!(to_vp_device(vdev)->pci_dev->irq))
+ return err;
/* Finally fall back to regular interrupts. */
return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 4620e9d79dde..2e7689bb933b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
+#include <linux/kmsan.h>
#include <linux/spinlock.h>
#include <xen/xen.h>
@@ -352,8 +353,15 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
struct scatterlist *sg,
enum dma_data_direction direction)
{
- if (!vq->use_dma_api)
+ if (!vq->use_dma_api) {
+ /*
+ * If DMA is not used, KMSAN doesn't know that the scatterlist
+ * is initialized by the hardware. Explicitly check/unpoison it
+ * depending on the direction.
+ */
+ kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
return (dma_addr_t)sg_phys(sg);
+ }
/*
* We can't use dma_map_sg, because we don't use scatterlists in
@@ -1066,7 +1074,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
if (!queue) {
/* Try to get a single page. You are my only hope! */
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
- &dma_addr, GFP_KERNEL|__GFP_ZERO);
+ &dma_addr, GFP_KERNEL | __GFP_ZERO);
}
if (!queue)
return -ENOMEM;
@@ -1867,7 +1875,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
ring = vring_alloc_queue(vdev, ring_size_in_bytes,
&ring_dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!ring)
goto err;
@@ -1879,7 +1887,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
driver = vring_alloc_queue(vdev, event_size_in_bytes,
&driver_event_dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!driver)
goto err;
@@ -1889,7 +1897,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!device)
goto err;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 688922fc4edb..b64bc49c7f30 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1089,6 +1089,17 @@ config EBC_C384_WDT
WinSystems EBC-C384 motherboard. The timeout may be configured via
the timeout module parameter.
+config EXAR_WDT
+ tristate "Exar Watchdog Timer"
+ depends on X86
+ select WATCHDOG_CORE
+ help
+ Enables watchdog timer support for the watchdog timer present
+ in some Exar/MaxLinear UART chips like the XR28V38x.
+
+ To compile this driver as a module, choose M here: the
+ module will be called exar_wdt.
+
config F71808E_WDT
tristate "Fintek F718xx, F818xx Super I/O Watchdog"
depends on X86
@@ -1315,7 +1326,7 @@ config IT87_WDT
config HP_WATCHDOG
tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
select WATCHDOG_CORE
- depends on X86 && PCI
+ depends on (ARM64 || X86) && PCI
help
A software monitoring watchdog and NMI handling driver. This driver
will detect lockups and provide a stack trace. This is a driver that
@@ -1325,7 +1336,7 @@ config HP_WATCHDOG
config HPWDT_NMI_DECODING
bool "NMI support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
- depends on HP_WATCHDOG
+ depends on X86 && HP_WATCHDOG
default y
help
Enables the NMI handler for the watchdog pretimeout NMI and the iLO
@@ -1935,10 +1946,10 @@ config BOOKE_WDT
config BOOKE_WDT_DEFAULT_TIMEOUT
int "PowerPC Book-E Watchdog Timer Default Timeout"
depends on BOOKE_WDT
- default 38 if PPC_FSL_BOOK3E
- range 0 63 if PPC_FSL_BOOK3E
- default 3 if !PPC_FSL_BOOK3E
- range 0 3 if !PPC_FSL_BOOK3E
+ default 38 if PPC_E500
+ range 0 63 if PPC_E500
+ default 3 if !PPC_E500
+ range 0 3 if !PPC_E500
help
Select the default watchdog timer period to be used by the PowerPC
Book-E watchdog driver. A watchdog "event" occurs when the bit
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index cdeb119e6e61..d41e5f830ae7 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -105,6 +105,7 @@ obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o
obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
obj-$(CONFIG_EBC_C384_WDT) += ebc-c384_wdt.o
+obj-$(CONFIG_EXAR_WDT) += exar_wdt.o
obj-$(CONFIG_F71808E_WDT) += f71808e_wdt.o
obj-$(CONFIG_SP5100_TCO) += sp5100_tco.o
obj-$(CONFIG_GEODE_WDT) += geodewdt.o
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index 854b1cc723cb..ac9fed1ef681 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -179,6 +179,8 @@ static int armada_37xx_wdt_set_timeout(struct watchdog_device *wdt,
dev->timeout = (u64)dev->clk_rate * timeout;
do_div(dev->timeout, CNTR_CTRL_PRESCALE_MIN);
+ set_counter_value(dev, CNTR_ID_WDOG, dev->timeout);
+
return 0;
}
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index bd06622813eb..0cff2adfbfc9 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -332,18 +332,18 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
reg &= config->ext_pulse_width_mask;
- if (of_property_read_bool(np, "aspeed,ext-push-pull"))
- reg |= WDT_PUSH_PULL_MAGIC;
+ if (of_property_read_bool(np, "aspeed,ext-active-high"))
+ reg |= WDT_ACTIVE_HIGH_MAGIC;
else
- reg |= WDT_OPEN_DRAIN_MAGIC;
+ reg |= WDT_ACTIVE_LOW_MAGIC;
writel(reg, wdt->base + WDT_RESET_WIDTH);
reg &= config->ext_pulse_width_mask;
- if (of_property_read_bool(np, "aspeed,ext-active-high"))
- reg |= WDT_ACTIVE_HIGH_MAGIC;
+ if (of_property_read_bool(np, "aspeed,ext-push-pull"))
+ reg |= WDT_PUSH_PULL_MAGIC;
else
- reg |= WDT_ACTIVE_LOW_MAGIC;
+ reg |= WDT_OPEN_DRAIN_MAGIC;
writel(reg, wdt->base + WDT_RESET_WIDTH);
}
diff --git a/drivers/watchdog/bd9576_wdt.c b/drivers/watchdog/bd9576_wdt.c
index 0b6999f3b6e8..4a20e07fbb69 100644
--- a/drivers/watchdog/bd9576_wdt.c
+++ b/drivers/watchdog/bd9576_wdt.c
@@ -9,8 +9,8 @@
#include <linux/gpio/consumer.h>
#include <linux/mfd/rohm-bd957x.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
@@ -202,10 +202,10 @@ static int bd957x_set_wdt_mode(struct bd9576_wdt_priv *priv, int hw_margin,
static int bd9576_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->parent->of_node;
struct bd9576_wdt_priv *priv;
u32 hw_margin[2];
u32 hw_margin_max = BD957X_WDT_DEFAULT_MARGIN, hw_margin_min = 0;
+ int count;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -221,40 +221,51 @@ static int bd9576_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- priv->gpiod_en = devm_gpiod_get_from_of_node(dev, dev->parent->of_node,
- "rohm,watchdog-enable-gpios",
- 0, GPIOD_OUT_LOW,
- "watchdog-enable");
+ priv->gpiod_en = devm_fwnode_gpiod_get(dev, dev_fwnode(dev->parent),
+ "rohm,watchdog-enable",
+ GPIOD_OUT_LOW,
+ "watchdog-enable");
if (IS_ERR(priv->gpiod_en))
return dev_err_probe(dev, PTR_ERR(priv->gpiod_en),
"getting watchdog-enable GPIO failed\n");
- priv->gpiod_ping = devm_gpiod_get_from_of_node(dev, dev->parent->of_node,
- "rohm,watchdog-ping-gpios",
- 0, GPIOD_OUT_LOW,
- "watchdog-ping");
+ priv->gpiod_ping = devm_fwnode_gpiod_get(dev, dev_fwnode(dev->parent),
+ "rohm,watchdog-ping",
+ GPIOD_OUT_LOW,
+ "watchdog-ping");
if (IS_ERR(priv->gpiod_ping))
return dev_err_probe(dev, PTR_ERR(priv->gpiod_ping),
"getting watchdog-ping GPIO failed\n");
- ret = of_property_read_variable_u32_array(np, "rohm,hw-timeout-ms",
- &hw_margin[0], 1, 2);
- if (ret < 0 && ret != -EINVAL)
- return ret;
+ count = device_property_count_u32(dev->parent, "rohm,hw-timeout-ms");
+ if (count < 0 && count != -EINVAL)
+ return count;
+
+ if (count > 0) {
+ if (count > ARRAY_SIZE(hw_margin))
+ return -EINVAL;
- if (ret == 1)
- hw_margin_max = hw_margin[0];
+ ret = device_property_read_u32_array(dev->parent,
+ "rohm,hw-timeout-ms",
+ hw_margin, count);
+ if (ret < 0)
+ return ret;
- if (ret == 2) {
- hw_margin_max = hw_margin[1];
- hw_margin_min = hw_margin[0];
+ if (count == 1)
+ hw_margin_max = hw_margin[0];
+
+ if (count == 2) {
+ hw_margin_max = hw_margin[1];
+ hw_margin_min = hw_margin[0];
+ }
}
ret = bd957x_set_wdt_mode(priv, hw_margin_max, hw_margin_min);
if (ret)
return ret;
- priv->always_running = of_property_read_bool(np, "always-running");
+ priv->always_running = device_property_read_bool(dev->parent,
+ "always-running");
watchdog_set_drvdata(&priv->wdd, priv);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 75da5cd02615..932a03f4436a 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -27,7 +27,7 @@
*/
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
#define WDTP_MASK (WDTP(0x3f))
#else
@@ -45,7 +45,7 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
/* For the specified period, determine the number of seconds
* corresponding to the reset time. There will be a watchdog
@@ -88,7 +88,7 @@ static unsigned int sec_to_period(unsigned int secs)
#define MAX_WDT_TIMEOUT period_to_sec(1)
-#else /* CONFIG_PPC_FSL_BOOK3E */
+#else /* CONFIG_PPC_E500 */
static unsigned long long period_to_sec(unsigned int period)
{
@@ -102,7 +102,7 @@ static unsigned int sec_to_period(unsigned int secs)
#define MAX_WDT_TIMEOUT 3 /* from Kconfig */
-#endif /* !CONFIG_PPC_FSL_BOOK3E */
+#endif /* !CONFIG_PPC_E500 */
static void __booke_wdt_set(void *data)
{
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index ce682942662c..e26609ad4c17 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -192,7 +192,7 @@ static void eurwdt_ping(void)
* @ppos: pointer to the position to write. No seeks allowed
*
* A write to a watchdog device is defined as a keepalive signal. Any
- * write of data will do, as we we don't define content meaning.
+ * write of data will do, as we don't define content meaning.
*/
static ssize_t eurwdt_write(struct file *file, const char __user *buf,
diff --git a/drivers/watchdog/exar_wdt.c b/drivers/watchdog/exar_wdt.c
new file mode 100644
index 000000000000..35058d8b21bc
--- /dev/null
+++ b/drivers/watchdog/exar_wdt.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * exar_wdt.c - Driver for the watchdog present in some
+ * Exar/MaxLinear UART chips like the XR28V38x.
+ *
+ * (c) Copyright 2022 D. Müller <d.mueller@elsoft.ch>.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+#define DRV_NAME "exar_wdt"
+
+static const unsigned short sio_config_ports[] = { 0x2e, 0x4e };
+static const unsigned char sio_enter_keys[] = { 0x67, 0x77, 0x87, 0xA0 };
+#define EXAR_EXIT_KEY 0xAA
+
+#define EXAR_LDN 0x07
+#define EXAR_DID 0x20
+#define EXAR_VID 0x23
+#define EXAR_WDT 0x26
+#define EXAR_ACT 0x30
+#define EXAR_RTBASE 0x60
+
+#define EXAR_WDT_LDEV 0x08
+
+#define EXAR_VEN_ID 0x13A8
+#define EXAR_DEV_382 0x0382
+#define EXAR_DEV_384 0x0384
+
+/* WDT runtime registers */
+#define WDT_CTRL 0x00
+#define WDT_VAL 0x01
+
+#define WDT_UNITS_10MS 0x0 /* the 10 millisec unit of the HW is not used */
+#define WDT_UNITS_SEC 0x2
+#define WDT_UNITS_MIN 0x4
+
+/* default WDT control for WDTOUT signal activ / rearm by read */
+#define EXAR_WDT_DEF_CONF 0
+
+struct wdt_pdev_node {
+ struct list_head list;
+ struct platform_device *pdev;
+ const char name[16];
+};
+
+struct wdt_priv {
+ /* the lock for WDT io operations */
+ spinlock_t io_lock;
+ struct resource wdt_res;
+ struct watchdog_device wdt_dev;
+ unsigned short did;
+ unsigned short config_port;
+ unsigned char enter_key;
+ unsigned char unit;
+ unsigned char timeout;
+};
+
+#define WATCHDOG_TIMEOUT 60
+
+static int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. 1<=timeout<=15300, default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int exar_sio_enter(const unsigned short config_port,
+ const unsigned char key)
+{
+ if (!request_muxed_region(config_port, 2, DRV_NAME))
+ return -EBUSY;
+
+ /* write the ENTER-KEY twice */
+ outb(key, config_port);
+ outb(key, config_port);
+
+ return 0;
+}
+
+static void exar_sio_exit(const unsigned short config_port)
+{
+ outb(EXAR_EXIT_KEY, config_port);
+ release_region(config_port, 2);
+}
+
+static unsigned char exar_sio_read(const unsigned short config_port,
+ const unsigned char reg)
+{
+ outb(reg, config_port);
+ return inb(config_port + 1);
+}
+
+static void exar_sio_write(const unsigned short config_port,
+ const unsigned char reg, const unsigned char val)
+{
+ outb(reg, config_port);
+ outb(val, config_port + 1);
+}
+
+static unsigned short exar_sio_read16(const unsigned short config_port,
+ const unsigned char reg)
+{
+ unsigned char msb, lsb;
+
+ msb = exar_sio_read(config_port, reg);
+ lsb = exar_sio_read(config_port, reg + 1);
+
+ return (msb << 8) | lsb;
+}
+
+static void exar_sio_select_wdt(const unsigned short config_port)
+{
+ exar_sio_write(config_port, EXAR_LDN, EXAR_WDT_LDEV);
+}
+
+static void exar_wdt_arm(const struct wdt_priv *priv)
+{
+ unsigned short rt_base = priv->wdt_res.start;
+
+ /* write timeout value twice to arm watchdog */
+ outb(priv->timeout, rt_base + WDT_VAL);
+ outb(priv->timeout, rt_base + WDT_VAL);
+}
+
+static void exar_wdt_disarm(const struct wdt_priv *priv)
+{
+ unsigned short rt_base = priv->wdt_res.start;
+
+ /*
+ * use two accesses with different values to make sure
+ * that a combination of a previous single access and
+ * the ones below with the same value are not falsely
+ * interpreted as "arm watchdog"
+ */
+ outb(0xFF, rt_base + WDT_VAL);
+ outb(0, rt_base + WDT_VAL);
+}
+
+static int exar_wdt_start(struct watchdog_device *wdog)
+{
+ struct wdt_priv *priv = watchdog_get_drvdata(wdog);
+ unsigned short rt_base = priv->wdt_res.start;
+
+ spin_lock(&priv->io_lock);
+
+ exar_wdt_disarm(priv);
+ outb(priv->unit, rt_base + WDT_CTRL);
+ exar_wdt_arm(priv);
+
+ spin_unlock(&priv->io_lock);
+ return 0;
+}
+
+static int exar_wdt_stop(struct watchdog_device *wdog)
+{
+ struct wdt_priv *priv = watchdog_get_drvdata(wdog);
+
+ spin_lock(&priv->io_lock);
+
+ exar_wdt_disarm(priv);
+
+ spin_unlock(&priv->io_lock);
+ return 0;
+}
+
+static int exar_wdt_keepalive(struct watchdog_device *wdog)
+{
+ struct wdt_priv *priv = watchdog_get_drvdata(wdog);
+ unsigned short rt_base = priv->wdt_res.start;
+
+ spin_lock(&priv->io_lock);
+
+ /* reading the WDT_VAL reg will feed the watchdog */
+ inb(rt_base + WDT_VAL);
+
+ spin_unlock(&priv->io_lock);
+ return 0;
+}
+
+static int exar_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t)
+{
+ struct wdt_priv *priv = watchdog_get_drvdata(wdog);
+ bool unit_min = false;
+
+ /*
+ * if new timeout is bigger then 255 seconds, change the
+ * unit to minutes and round the timeout up to the next whole minute
+ */
+ if (t > 255) {
+ unit_min = true;
+ t = DIV_ROUND_UP(t, 60);
+ }
+
+ /* save for later use in exar_wdt_start() */
+ priv->unit = unit_min ? WDT_UNITS_MIN : WDT_UNITS_SEC;
+ priv->timeout = t;
+
+ wdog->timeout = unit_min ? t * 60 : t;
+
+ if (watchdog_hw_running(wdog))
+ exar_wdt_start(wdog);
+
+ return 0;
+}
+
+static const struct watchdog_info exar_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE,
+ .identity = "Exar/MaxLinear XR28V38x Watchdog",
+};
+
+static const struct watchdog_ops exar_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = exar_wdt_start,
+ .stop = exar_wdt_stop,
+ .ping = exar_wdt_keepalive,
+ .set_timeout = exar_wdt_set_timeout,
+};
+
+static int exar_wdt_config(struct watchdog_device *wdog,
+ const unsigned char conf)
+{
+ struct wdt_priv *priv = watchdog_get_drvdata(wdog);
+ int ret;
+
+ ret = exar_sio_enter(priv->config_port, priv->enter_key);
+ if (ret)
+ return ret;
+
+ exar_sio_select_wdt(priv->config_port);
+ exar_sio_write(priv->config_port, EXAR_WDT, conf);
+
+ exar_sio_exit(priv->config_port);
+
+ return 0;
+}
+
+static int __init exar_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wdt_priv *priv = dev->platform_data;
+ struct watchdog_device *wdt_dev = &priv->wdt_dev;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -ENXIO;
+
+ spin_lock_init(&priv->io_lock);
+
+ wdt_dev->info = &exar_wdt_info;
+ wdt_dev->ops = &exar_wdt_ops;
+ wdt_dev->min_timeout = 1;
+ wdt_dev->max_timeout = 255 * 60;
+
+ watchdog_init_timeout(wdt_dev, timeout, NULL);
+ watchdog_set_nowayout(wdt_dev, nowayout);
+ watchdog_stop_on_reboot(wdt_dev);
+ watchdog_stop_on_unregister(wdt_dev);
+ watchdog_set_drvdata(wdt_dev, priv);
+
+ ret = exar_wdt_config(wdt_dev, EXAR_WDT_DEF_CONF);
+ if (ret)
+ return ret;
+
+ exar_wdt_set_timeout(wdt_dev, timeout);
+ /* Make sure that the watchdog is not running */
+ exar_wdt_stop(wdt_dev);
+
+ ret = devm_watchdog_register_device(dev, wdt_dev);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "XR28V%X WDT initialized. timeout=%d sec (nowayout=%d)\n",
+ priv->did, timeout, nowayout);
+
+ return 0;
+}
+
+static unsigned short __init exar_detect(const unsigned short config_port,
+ const unsigned char key,
+ unsigned short *rt_base)
+{
+ int ret;
+ unsigned short base = 0;
+ unsigned short vid, did;
+
+ ret = exar_sio_enter(config_port, key);
+ if (ret)
+ return 0;
+
+ vid = exar_sio_read16(config_port, EXAR_VID);
+ did = exar_sio_read16(config_port, EXAR_DID);
+
+ /* check for the vendor and device IDs we currently know about */
+ if (vid == EXAR_VEN_ID &&
+ (did == EXAR_DEV_382 ||
+ did == EXAR_DEV_384)) {
+ exar_sio_select_wdt(config_port);
+ /* is device active? */
+ if (exar_sio_read(config_port, EXAR_ACT) == 0x01)
+ base = exar_sio_read16(config_port, EXAR_RTBASE);
+ }
+
+ exar_sio_exit(config_port);
+
+ if (base) {
+ pr_debug("Found a XR28V%X WDT (conf: 0x%x / rt: 0x%04x)\n",
+ did, config_port, base);
+ *rt_base = base;
+ return did;
+ }
+
+ return 0;
+}
+
+static struct platform_driver exar_wdt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+static LIST_HEAD(pdev_list);
+
+static int __init exar_wdt_register(struct wdt_priv *priv, const int idx)
+{
+ struct wdt_pdev_node *n;
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&n->list);
+
+ scnprintf((char *)n->name, sizeof(n->name), DRV_NAME ".%d", idx);
+ priv->wdt_res.name = n->name;
+
+ n->pdev = platform_device_register_resndata(NULL, DRV_NAME, idx,
+ &priv->wdt_res, 1,
+ priv, sizeof(*priv));
+ if (IS_ERR(n->pdev)) {
+ kfree(n);
+ return PTR_ERR(n->pdev);
+ }
+
+ list_add_tail(&n->list, &pdev_list);
+
+ return 0;
+}
+
+static void exar_wdt_unregister(void)
+{
+ struct wdt_pdev_node *n, *t;
+
+ list_for_each_entry_safe(n, t, &pdev_list, list) {
+ platform_device_unregister(n->pdev);
+ list_del(&n->list);
+ kfree(n);
+ }
+}
+
+static int __init exar_wdt_init(void)
+{
+ int ret, i, j, idx = 0;
+
+ /* search for active Exar watchdogs on all possible locations */
+ for (i = 0; i < ARRAY_SIZE(sio_config_ports); i++) {
+ for (j = 0; j < ARRAY_SIZE(sio_enter_keys); j++) {
+ unsigned short did, rt_base = 0;
+
+ did = exar_detect(sio_config_ports[i],
+ sio_enter_keys[j],
+ &rt_base);
+
+ if (did) {
+ struct wdt_priv priv = {
+ .wdt_res = DEFINE_RES_IO(rt_base, 2),
+ .did = did,
+ .config_port = sio_config_ports[i],
+ .enter_key = sio_enter_keys[j],
+ };
+
+ ret = exar_wdt_register(&priv, idx);
+ if (!ret)
+ idx++;
+ }
+ }
+ }
+
+ if (!idx)
+ return -ENODEV;
+
+ ret = platform_driver_probe(&exar_wdt_driver, exar_wdt_probe);
+ if (ret)
+ exar_wdt_unregister();
+
+ return ret;
+}
+
+static void __exit exar_wdt_exit(void)
+{
+ exar_wdt_unregister();
+ platform_driver_unregister(&exar_wdt_driver);
+}
+
+module_init(exar_wdt_init);
+module_exit(exar_wdt_exit);
+
+MODULE_AUTHOR("David Müller <d.mueller@elsoft.ch>");
+MODULE_DESCRIPTION("Exar/MaxLinear Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/ftwdt010_wdt.c b/drivers/watchdog/ftwdt010_wdt.c
index 21dcc7765688..442c5bf63ff4 100644
--- a/drivers/watchdog/ftwdt010_wdt.c
+++ b/drivers/watchdog/ftwdt010_wdt.c
@@ -47,21 +47,28 @@ struct ftwdt010_wdt *to_ftwdt010_wdt(struct watchdog_device *wdd)
return container_of(wdd, struct ftwdt010_wdt, wdd);
}
-static int ftwdt010_wdt_start(struct watchdog_device *wdd)
+static void ftwdt010_enable(struct ftwdt010_wdt *gwdt,
+ unsigned int timeout,
+ bool need_irq)
{
- struct ftwdt010_wdt *gwdt = to_ftwdt010_wdt(wdd);
u32 enable;
- writel(wdd->timeout * WDT_CLOCK, gwdt->base + FTWDT010_WDLOAD);
+ writel(timeout * WDT_CLOCK, gwdt->base + FTWDT010_WDLOAD);
writel(WDRESTART_MAGIC, gwdt->base + FTWDT010_WDRESTART);
/* set clock before enabling */
enable = WDCR_CLOCK_5MHZ | WDCR_SYS_RST;
writel(enable, gwdt->base + FTWDT010_WDCR);
- if (gwdt->has_irq)
+ if (need_irq)
enable |= WDCR_WDINTR;
enable |= WDCR_ENABLE;
writel(enable, gwdt->base + FTWDT010_WDCR);
+}
+static int ftwdt010_wdt_start(struct watchdog_device *wdd)
+{
+ struct ftwdt010_wdt *gwdt = to_ftwdt010_wdt(wdd);
+
+ ftwdt010_enable(gwdt, wdd->timeout, gwdt->has_irq);
return 0;
}
@@ -93,6 +100,13 @@ static int ftwdt010_wdt_set_timeout(struct watchdog_device *wdd,
return 0;
}
+static int ftwdt010_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ ftwdt010_enable(to_ftwdt010_wdt(wdd), 0, false);
+ return 0;
+}
+
static irqreturn_t ftwdt010_wdt_interrupt(int irq, void *data)
{
struct ftwdt010_wdt *gwdt = data;
@@ -107,6 +121,7 @@ static const struct watchdog_ops ftwdt010_wdt_ops = {
.stop = ftwdt010_wdt_stop,
.ping = ftwdt010_wdt_ping,
.set_timeout = ftwdt010_wdt_set_timeout,
+ .restart = ftwdt010_wdt_restart,
.owner = THIS_MODULE,
};
@@ -156,7 +171,7 @@ static int ftwdt010_wdt_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq) {
+ if (irq > 0) {
ret = devm_request_irq(dev, irq, ftwdt010_wdt_interrupt, 0,
"watchdog bark", gwdt);
if (ret)
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index a5006a58e0db..f79f932bca14 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -20,7 +20,9 @@
#include <linux/pci_ids.h>
#include <linux/types.h>
#include <linux/watchdog.h>
+#ifdef CONFIG_HPWDT_NMI_DECODING
#include <asm/nmi.h>
+#endif
#include <linux/crash_dump.h>
#define HPWDT_VERSION "2.0.4"
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 922b60374295..2897902090b3 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -9,12 +9,15 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/watchdog.h>
#define WDOG_CS 0x0
+#define WDOG_CS_FLG BIT(14)
#define WDOG_CS_CMD32EN BIT(13)
+#define WDOG_CS_PRES BIT(12)
#define WDOG_CS_ULK BIT(11)
#define WDOG_CS_RCS BIT(10)
#define LPO_CLK 0x1
@@ -39,60 +42,105 @@
#define DEFAULT_TIMEOUT 60
#define MAX_TIMEOUT 128
#define WDOG_CLOCK_RATE 1000
-#define WDOG_WAIT_TIMEOUT 20
+#define WDOG_ULK_WAIT_TIMEOUT 1000
+#define WDOG_RCS_WAIT_TIMEOUT 10000
+#define WDOG_RCS_POST_WAIT 3000
+
+#define RETRY_MAX 5
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0000);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+struct imx_wdt_hw_feature {
+ bool prescaler_enable;
+ u32 wdog_clock_rate;
+};
+
struct imx7ulp_wdt_device {
struct watchdog_device wdd;
void __iomem *base;
struct clk *clk;
+ bool post_rcs_wait;
+ const struct imx_wdt_hw_feature *hw;
};
-static int imx7ulp_wdt_wait(void __iomem *base, u32 mask)
+static int imx7ulp_wdt_wait_ulk(void __iomem *base)
{
u32 val = readl(base + WDOG_CS);
- if (!(val & mask) && readl_poll_timeout_atomic(base + WDOG_CS, val,
- val & mask, 0,
- WDOG_WAIT_TIMEOUT))
+ if (!(val & WDOG_CS_ULK) &&
+ readl_poll_timeout_atomic(base + WDOG_CS, val,
+ val & WDOG_CS_ULK, 0,
+ WDOG_ULK_WAIT_TIMEOUT))
return -ETIMEDOUT;
return 0;
}
-static int imx7ulp_wdt_enable(struct watchdog_device *wdog, bool enable)
+static int imx7ulp_wdt_wait_rcs(struct imx7ulp_wdt_device *wdt)
{
- struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+ int ret = 0;
+ u32 val = readl(wdt->base + WDOG_CS);
+ u64 timeout = (val & WDOG_CS_PRES) ?
+ WDOG_RCS_WAIT_TIMEOUT * 256 : WDOG_RCS_WAIT_TIMEOUT;
+ unsigned long wait_min = (val & WDOG_CS_PRES) ?
+ WDOG_RCS_POST_WAIT * 256 : WDOG_RCS_POST_WAIT;
+
+ if (!(val & WDOG_CS_RCS) &&
+ readl_poll_timeout(wdt->base + WDOG_CS, val, val & WDOG_CS_RCS, 100,
+ timeout))
+ ret = -ETIMEDOUT;
+
+ /* Wait 2.5 clocks after RCS done */
+ if (wdt->post_rcs_wait)
+ usleep_range(wait_min, wait_min + 2000);
+
+ return ret;
+}
+static int _imx7ulp_wdt_enable(struct imx7ulp_wdt_device *wdt, bool enable)
+{
u32 val = readl(wdt->base + WDOG_CS);
int ret;
local_irq_disable();
writel(UNLOCK, wdt->base + WDOG_CNT);
- ret = imx7ulp_wdt_wait(wdt->base, WDOG_CS_ULK);
+ ret = imx7ulp_wdt_wait_ulk(wdt->base);
if (ret)
goto enable_out;
if (enable)
writel(val | WDOG_CS_EN, wdt->base + WDOG_CS);
else
writel(val & ~WDOG_CS_EN, wdt->base + WDOG_CS);
- imx7ulp_wdt_wait(wdt->base, WDOG_CS_RCS);
-enable_out:
local_irq_enable();
+ ret = imx7ulp_wdt_wait_rcs(wdt);
return ret;
+
+enable_out:
+ local_irq_enable();
+ return ret;
}
-static bool imx7ulp_wdt_is_enabled(void __iomem *base)
+static int imx7ulp_wdt_enable(struct watchdog_device *wdog, bool enable)
{
- u32 val = readl(base + WDOG_CS);
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+ int ret;
+ u32 val;
+ u32 loop = RETRY_MAX;
+
+ do {
+ ret = _imx7ulp_wdt_enable(wdt, enable);
+ val = readl(wdt->base + WDOG_CS);
+ } while (--loop > 0 && ((!!(val & WDOG_CS_EN)) != enable || ret));
- return val & WDOG_CS_EN;
+ if (loop == 0)
+ return -EBUSY;
+
+ return ret;
}
static int imx7ulp_wdt_ping(struct watchdog_device *wdog)
@@ -114,26 +162,44 @@ static int imx7ulp_wdt_stop(struct watchdog_device *wdog)
return imx7ulp_wdt_enable(wdog, false);
}
-static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
- unsigned int timeout)
+static int _imx7ulp_wdt_set_timeout(struct imx7ulp_wdt_device *wdt,
+ unsigned int toval)
{
- struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
- u32 val = WDOG_CLOCK_RATE * timeout;
int ret;
local_irq_disable();
writel(UNLOCK, wdt->base + WDOG_CNT);
- ret = imx7ulp_wdt_wait(wdt->base, WDOG_CS_ULK);
+ ret = imx7ulp_wdt_wait_ulk(wdt->base);
if (ret)
goto timeout_out;
- writel(val, wdt->base + WDOG_TOVAL);
- imx7ulp_wdt_wait(wdt->base, WDOG_CS_RCS);
-
- wdog->timeout = timeout;
+ writel(toval, wdt->base + WDOG_TOVAL);
+ local_irq_enable();
+ ret = imx7ulp_wdt_wait_rcs(wdt);
+ return ret;
timeout_out:
local_irq_enable();
+ return ret;
+}
+static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int timeout)
+{
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+ u32 toval = wdt->hw->wdog_clock_rate * timeout;
+ u32 val;
+ int ret;
+ u32 loop = RETRY_MAX;
+
+ do {
+ ret = _imx7ulp_wdt_set_timeout(wdt, toval);
+ val = readl(wdt->base + WDOG_TOVAL);
+ } while (--loop > 0 && (val != toval || ret));
+
+ if (loop == 0)
+ return -EBUSY;
+
+ wdog->timeout = timeout;
return ret;
}
@@ -173,29 +239,62 @@ static const struct watchdog_info imx7ulp_wdt_info = {
WDIOF_MAGICCLOSE,
};
-static int imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
+static int _imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout, unsigned int cs)
{
u32 val;
int ret;
local_irq_disable();
- /* unlock the wdog for reconfiguration */
- writel_relaxed(UNLOCK_SEQ0, base + WDOG_CNT);
- writel_relaxed(UNLOCK_SEQ1, base + WDOG_CNT);
- ret = imx7ulp_wdt_wait(base, WDOG_CS_ULK);
+
+ val = readl(wdt->base + WDOG_CS);
+ if (val & WDOG_CS_CMD32EN) {
+ writel(UNLOCK, wdt->base + WDOG_CNT);
+ } else {
+ mb();
+ /* unlock the wdog for reconfiguration */
+ writel_relaxed(UNLOCK_SEQ0, wdt->base + WDOG_CNT);
+ writel_relaxed(UNLOCK_SEQ1, wdt->base + WDOG_CNT);
+ mb();
+ }
+
+ ret = imx7ulp_wdt_wait_ulk(wdt->base);
if (ret)
goto init_out;
/* set an initial timeout value in TOVAL */
- writel(timeout, base + WDOG_TOVAL);
- /* enable 32bit command sequence and reconfigure */
- val = WDOG_CS_CMD32EN | WDOG_CS_CLK | WDOG_CS_UPDATE |
- WDOG_CS_WAIT | WDOG_CS_STOP;
- writel(val, base + WDOG_CS);
- imx7ulp_wdt_wait(base, WDOG_CS_RCS);
+ writel(timeout, wdt->base + WDOG_TOVAL);
+ writel(cs, wdt->base + WDOG_CS);
+ local_irq_enable();
+ ret = imx7ulp_wdt_wait_rcs(wdt);
+
+ return ret;
init_out:
local_irq_enable();
+ return ret;
+}
+
+static int imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout)
+{
+ /* enable 32bit command sequence and reconfigure */
+ u32 val = WDOG_CS_CMD32EN | WDOG_CS_CLK | WDOG_CS_UPDATE |
+ WDOG_CS_WAIT | WDOG_CS_STOP;
+ u32 cs, toval;
+ int ret;
+ u32 loop = RETRY_MAX;
+
+ if (wdt->hw->prescaler_enable)
+ val |= WDOG_CS_PRES;
+
+ do {
+ ret = _imx7ulp_wdt_init(wdt, timeout, val);
+ toval = readl(wdt->base + WDOG_TOVAL);
+ cs = readl(wdt->base + WDOG_CS);
+ cs &= ~(WDOG_CS_FLG | WDOG_CS_ULK | WDOG_CS_RCS);
+ } while (--loop > 0 && (cs != val || toval != timeout || ret));
+
+ if (loop == 0)
+ return -EBUSY;
return ret;
}
@@ -228,6 +327,15 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
return PTR_ERR(imx7ulp_wdt->clk);
}
+ imx7ulp_wdt->post_rcs_wait = true;
+ if (of_device_is_compatible(dev->of_node,
+ "fsl,imx8ulp-wdt")) {
+ dev_info(dev, "imx8ulp wdt probe\n");
+ imx7ulp_wdt->post_rcs_wait = false;
+ } else {
+ dev_info(dev, "imx7ulp wdt probe\n");
+ }
+
ret = clk_prepare_enable(imx7ulp_wdt->clk);
if (ret)
return ret;
@@ -248,14 +356,16 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
watchdog_stop_on_reboot(wdog);
watchdog_stop_on_unregister(wdog);
watchdog_set_drvdata(wdog, imx7ulp_wdt);
- ret = imx7ulp_wdt_init(imx7ulp_wdt->base, wdog->timeout * WDOG_CLOCK_RATE);
+
+ imx7ulp_wdt->hw = of_device_get_match_data(dev);
+ ret = imx7ulp_wdt_init(imx7ulp_wdt, wdog->timeout * imx7ulp_wdt->hw->wdog_clock_rate);
if (ret)
return ret;
return devm_watchdog_register_device(dev, wdog);
}
-static int __maybe_unused imx7ulp_wdt_suspend(struct device *dev)
+static int __maybe_unused imx7ulp_wdt_suspend_noirq(struct device *dev)
{
struct imx7ulp_wdt_device *imx7ulp_wdt = dev_get_drvdata(dev);
@@ -267,30 +377,44 @@ static int __maybe_unused imx7ulp_wdt_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused imx7ulp_wdt_resume(struct device *dev)
+static int __maybe_unused imx7ulp_wdt_resume_noirq(struct device *dev)
{
struct imx7ulp_wdt_device *imx7ulp_wdt = dev_get_drvdata(dev);
- u32 timeout = imx7ulp_wdt->wdd.timeout * WDOG_CLOCK_RATE;
+ u32 timeout = imx7ulp_wdt->wdd.timeout * imx7ulp_wdt->hw->wdog_clock_rate;
int ret;
ret = clk_prepare_enable(imx7ulp_wdt->clk);
if (ret)
return ret;
- if (imx7ulp_wdt_is_enabled(imx7ulp_wdt->base))
- imx7ulp_wdt_init(imx7ulp_wdt->base, timeout);
-
- if (watchdog_active(&imx7ulp_wdt->wdd))
+ if (watchdog_active(&imx7ulp_wdt->wdd)) {
+ imx7ulp_wdt_init(imx7ulp_wdt, timeout);
imx7ulp_wdt_start(&imx7ulp_wdt->wdd);
+ imx7ulp_wdt_ping(&imx7ulp_wdt->wdd);
+ }
return 0;
}
-static SIMPLE_DEV_PM_OPS(imx7ulp_wdt_pm_ops, imx7ulp_wdt_suspend,
- imx7ulp_wdt_resume);
+static const struct dev_pm_ops imx7ulp_wdt_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx7ulp_wdt_suspend_noirq,
+ imx7ulp_wdt_resume_noirq)
+};
+
+static const struct imx_wdt_hw_feature imx7ulp_wdt_hw = {
+ .prescaler_enable = false,
+ .wdog_clock_rate = 1000,
+};
+
+static const struct imx_wdt_hw_feature imx93_wdt_hw = {
+ .prescaler_enable = true,
+ .wdog_clock_rate = 125,
+};
static const struct of_device_id imx7ulp_wdt_dt_ids[] = {
- { .compatible = "fsl,imx7ulp-wdt", },
+ { .compatible = "fsl,imx8ulp-wdt", .data = &imx7ulp_wdt_hw, },
+ { .compatible = "fsl,imx7ulp-wdt", .data = &imx7ulp_wdt_hw, },
+ { .compatible = "fsl,imx93-wdt", .data = &imx93_wdt_hw, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx7ulp_wdt_dt_ids);
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index d3c9e2f6e63b..981a2f7c3bec 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -156,6 +156,7 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct meson_gxbb_wdt *data;
int ret;
+ u32 ctrl_reg;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -189,13 +190,26 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&data->wdt_dev, nowayout);
watchdog_set_drvdata(&data->wdt_dev, data);
+ ctrl_reg = readl(data->reg_base + GXBB_WDT_CTRL_REG) &
+ GXBB_WDT_CTRL_EN;
+
+ if (ctrl_reg) {
+ /* Watchdog is running - keep it running but extend timeout
+ * to the maximum while setting the timebase
+ */
+ set_bit(WDOG_HW_RUNNING, &data->wdt_dev.status);
+ meson_gxbb_wdt_set_timeout(&data->wdt_dev,
+ GXBB_WDT_TCNT_SETUP_MASK / 1000);
+ }
+
/* Setup with 1ms timebase */
- writel(((clk_get_rate(data->clk) / 1000) & GXBB_WDT_CTRL_DIV_MASK) |
- GXBB_WDT_CTRL_EE_RESET |
- GXBB_WDT_CTRL_CLK_EN |
- GXBB_WDT_CTRL_CLKDIV_EN,
- data->reg_base + GXBB_WDT_CTRL_REG);
+ ctrl_reg |= ((clk_get_rate(data->clk) / 1000) &
+ GXBB_WDT_CTRL_DIV_MASK) |
+ GXBB_WDT_CTRL_EE_RESET |
+ GXBB_WDT_CTRL_CLK_EN |
+ GXBB_WDT_CTRL_CLKDIV_EN;
+ writel(ctrl_reg, data->reg_base + GXBB_WDT_CTRL_REG);
meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
return devm_watchdog_register_device(dev, &data->wdt_dev);
diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c
index 28a24caa2627..a5dd1c230137 100644
--- a/drivers/watchdog/npcm_wdt.c
+++ b/drivers/watchdog/npcm_wdt.c
@@ -3,6 +3,7 @@
// Copyright (c) 2018 IBM Corp.
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -43,6 +44,7 @@
struct npcm_wdt {
struct watchdog_device wdd;
void __iomem *reg;
+ struct clk *clk;
};
static inline struct npcm_wdt *to_npcm_wdt(struct watchdog_device *wdd)
@@ -66,6 +68,9 @@ static int npcm_wdt_start(struct watchdog_device *wdd)
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
u32 val;
+ if (wdt->clk)
+ clk_prepare_enable(wdt->clk);
+
if (wdd->timeout < 2)
val = 0x800;
else if (wdd->timeout < 3)
@@ -100,6 +105,9 @@ static int npcm_wdt_stop(struct watchdog_device *wdd)
writel(0, wdt->reg);
+ if (wdt->clk)
+ clk_disable_unprepare(wdt->clk);
+
return 0;
}
@@ -147,6 +155,10 @@ static int npcm_wdt_restart(struct watchdog_device *wdd,
{
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+ /* For reset, we start the WDT clock and leave it running. */
+ if (wdt->clk)
+ clk_prepare_enable(wdt->clk);
+
writel(NPCM_WTR | NPCM_WTRE | NPCM_WTE, wdt->reg);
udelay(1000);
@@ -191,6 +203,10 @@ static int npcm_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->reg))
return PTR_ERR(wdt->reg);
+ wdt->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(wdt->clk))
+ return PTR_ERR(wdt->clk);
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 053ef3bde12d..6e9253761fc1 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -225,9 +225,8 @@ static int rti_wdt_probe(struct platform_device *pdev)
wdt->freq = wdt->freq * 9 / 10;
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
pm_runtime_disable(&pdev->dev);
return dev_err_probe(dev, ret, "runtime pm failed\n");
}
diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
index 6eea0ee4af49..974a4194a8fd 100644
--- a/drivers/watchdog/rzg2l_wdt.c
+++ b/drivers/watchdog/rzg2l_wdt.c
@@ -10,7 +10,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -40,6 +40,11 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+enum rz_wdt_type {
+ WDT_RZG2L,
+ WDT_RZV2M,
+};
+
struct rzg2l_wdt_priv {
void __iomem *base;
struct watchdog_device wdev;
@@ -48,6 +53,7 @@ struct rzg2l_wdt_priv {
unsigned long delay;
struct clk *pclk;
struct clk *osc_clk;
+ enum rz_wdt_type devtype;
};
static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
@@ -142,11 +148,29 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
clk_prepare_enable(priv->pclk);
clk_prepare_enable(priv->osc_clk);
- /* Generate Reset (WDTRSTB) Signal on parity error */
- rzg2l_wdt_write(priv, 0, PECR);
+ if (priv->devtype == WDT_RZG2L) {
+ /* Generate Reset (WDTRSTB) Signal on parity error */
+ rzg2l_wdt_write(priv, 0, PECR);
+
+ /* Force parity error */
+ rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
+ } else {
+ /* RZ/V2M doesn't have parity error registers */
+
+ wdev->timeout = 0;
+
+ /* Initialize time out */
+ rzg2l_wdt_init_timeout(wdev);
- /* Force parity error */
- rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
+ /* Initialize watchdog counter register */
+ rzg2l_wdt_write(priv, 0, WDTTIM);
+
+ /* Enable watchdog timer*/
+ rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT);
+
+ /* Wait 2 consecutive overflow cycles for reset */
+ mdelay(DIV_ROUND_UP(2 * 0xFFFFF * 1000, priv->osc_clk_rate));
+ }
return 0;
}
@@ -227,6 +251,8 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to deassert");
+ priv->devtype = (uintptr_t)of_device_get_match_data(dev);
+
pm_runtime_enable(&pdev->dev);
priv->wdev.info = &rzg2l_wdt_ident;
@@ -255,7 +281,8 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
}
static const struct of_device_id rzg2l_wdt_ids[] = {
- { .compatible = "renesas,rzg2l-wdt", },
+ { .compatible = "renesas,rzg2l-wdt", .data = (void *)WDT_RZG2L },
+ { .compatible = "renesas,rzv2m-wdt", .data = (void *)WDT_RZV2M },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_wdt_ids);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 95919392927f..d3fc8ed886ff 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -60,9 +60,13 @@
#define EXYNOS850_CLUSTER0_NONCPU_INT_EN 0x1244
#define EXYNOS850_CLUSTER1_NONCPU_OUT 0x1620
#define EXYNOS850_CLUSTER1_NONCPU_INT_EN 0x1644
+#define EXYNOSAUTOV9_CLUSTER1_NONCPU_OUT 0x1520
+#define EXYNOSAUTOV9_CLUSTER1_NONCPU_INT_EN 0x1544
#define EXYNOS850_CLUSTER0_WDTRESET_BIT 24
#define EXYNOS850_CLUSTER1_WDTRESET_BIT 23
+#define EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT 25
+#define EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT 24
/**
* DOC: Quirk flags for different Samsung watchdog IP-cores
@@ -236,6 +240,30 @@ static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = {
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
};
+static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl0 = {
+ .mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOS850_CLUSTER0_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl1 = {
+ .mask_reset_reg = EXYNOSAUTOV9_CLUSTER1_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOSAUTOV9_CLUSTER1_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
+};
+
static const struct of_device_id s3c2410_wdt_match[] = {
{ .compatible = "samsung,s3c2410-wdt",
.data = &drv_data_s3c2410 },
@@ -249,6 +277,8 @@ static const struct of_device_id s3c2410_wdt_match[] = {
.data = &drv_data_exynos7 },
{ .compatible = "samsung,exynos850-wdt",
.data = &drv_data_exynos850_cl0 },
+ { .compatible = "samsung,exynosautov9-wdt",
+ .data = &drv_data_exynosautov9_cl0 },
{},
};
MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
@@ -630,8 +660,9 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev)
}
#ifdef CONFIG_OF
- /* Choose Exynos850 driver data w.r.t. cluster index */
- if (variant == &drv_data_exynos850_cl0) {
+ /* Choose Exynos850/ExynosAutov9 driver data w.r.t. cluster index */
+ if (variant == &drv_data_exynos850_cl0 ||
+ variant == &drv_data_exynosautov9_cl0) {
u32 index;
int err;
@@ -644,9 +675,11 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev)
switch (index) {
case 0:
- return &drv_data_exynos850_cl0;
+ return variant;
case 1:
- return &drv_data_exynos850_cl1;
+ return (variant == &drv_data_exynos850_cl0) ?
+ &drv_data_exynos850_cl1 :
+ &drv_data_exynosautov9_cl1;
default:
dev_err(dev, "wrong cluster index: %u\n", index);
return NULL;
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 2d0a06a158a8..82ac5d19f519 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -238,7 +238,7 @@ static int sa1100dog_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver sa1100dog_driver = {
+static struct platform_driver sa1100dog_driver = {
.driver.name = "sa1100_wdt",
.probe = sa1100dog_probe,
.remove = sa1100dog_remove,
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index ae54dd33e233..fb426b7d81da 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -65,6 +65,12 @@ static struct pci_dev *sp5100_tco_pci;
/* module parameters */
+#define WATCHDOG_ACTION 0
+static bool action = WATCHDOG_ACTION;
+module_param(action, bool, 0);
+MODULE_PARM_DESC(action, "Action taken when watchdog expires, 0 to reset, 1 to poweroff (default="
+ __MODULE_STRING(WATCHDOG_ACTION) ")");
+
#define WATCHDOG_HEARTBEAT 60 /* 60 sec default heartbeat. */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
module_param(heartbeat, int, 0);
@@ -297,8 +303,11 @@ static int sp5100_tco_timer_init(struct sp5100_tco *tco)
if (val & SP5100_WDT_FIRED)
wdd->bootstatus = WDIOF_CARDRESET;
- /* Set watchdog action to reset the system */
- val &= ~SP5100_WDT_ACTION_RESET;
+ /* Set watchdog action */
+ if (action)
+ val |= SP5100_WDT_ACTION_RESET;
+ else
+ val &= ~SP5100_WDT_ACTION_RESET;
writel(val, SP5100_WDT_CONTROL(tco->tcobase));
/* Set a reasonable heartbeat before we stop the timer */
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 355e428c0b99..36b4a660928d 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/mfd/twl.h>
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 56a4a4030ca9..bc33b63c5a5d 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -113,6 +113,10 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
#define W836X7HF_WDT_CSR 0xf7
#define NCT6102D_WDT_CSR 0xf2
+#define WDT_CSR_STATUS 0x10
+#define WDT_CSR_KBD 0x40
+#define WDT_CSR_MOUSE 0x80
+
static void superio_outb(int reg, int val)
{
outb(reg, WDT_EFER);
@@ -244,8 +248,12 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
t = superio_inb(cr_wdt_control) & ~0x0C;
superio_outb(cr_wdt_control, t);
- /* reset trigger, disable keyboard & mouse turning off watchdog */
- t = superio_inb(cr_wdt_csr) & ~0xD0;
+ t = superio_inb(cr_wdt_csr);
+ if (t & WDT_CSR_STATUS)
+ wdog->bootstatus |= WDIOF_CARDRESET;
+
+ /* reset status, disable keyboard & mouse turning off watchdog */
+ t &= ~(WDT_CSR_STATUS | WDT_CSR_KBD | WDT_CSR_MOUSE);
superio_outb(cr_wdt_csr, t);
superio_exit();
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index fd64ae77780a..31bf21ceaf48 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -321,7 +321,7 @@ static int wdt_release(struct inode *inode, struct file *file)
* @ppos: pointer to the position to write. No seeks allowed
*
* A write to a watchdog device is defined as a keepalive signal. Any
- * write of data will do, as we we don't define content meaning.
+ * write of data will do, as we don't define content meaning.
*/
static ssize_t wdt_write(struct file *file, const char __user *buf,
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 54903f3c851e..744b2ab75288 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -1015,7 +1015,11 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
wd_data->dev.groups = wdd->groups;
wd_data->dev.release = watchdog_core_data_release;
dev_set_drvdata(&wd_data->dev, wdd);
- dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
+ err = dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
+ if (err) {
+ put_device(&wd_data->dev);
+ return err;
+ }
kthread_init_work(&wd_data->work, watchdog_ping_work);
hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index aeadaa07c891..ce7a4a9e4b03 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -342,9 +342,8 @@ static int wdat_wdt_probe(struct platform_device *pdev)
return -EINVAL;
wdat->period = tbl->timer_period;
- wdat->wdd.min_hw_heartbeat_ms = wdat->period * tbl->min_count;
- wdat->wdd.max_hw_heartbeat_ms = wdat->period * tbl->max_count;
- wdat->wdd.min_timeout = 1;
+ wdat->wdd.min_timeout = DIV_ROUND_UP(wdat->period * tbl->min_count, 1000);
+ wdat->wdd.max_timeout = wdat->period * tbl->max_count / 1000;
wdat->stopped_in_sleep = tbl->flags & ACPI_WDAT_STOPPED;
wdat->wdd.info = &wdat_wdt_info;
wdat->wdd.ops = &wdat_wdt_ops;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index a65bd92121a5..d5d7c402b651 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -56,7 +56,7 @@ config XEN_MEMORY_HOTPLUG_LIMIT
depends on XEN_HAVE_PVMMU
depends on MEMORY_HOTPLUG
help
- Maxmium amount of memory (in GiB) that a PV guest can be
+ Maximum amount of memory (in GiB) that a PV guest can be
expanded to when using memory hotplug.
A PV guest can have more memory than this limit if is
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index 40ef379c28ab..9c286b2a1900 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -44,9 +44,10 @@ struct gntdev_unmap_notify {
};
struct gntdev_grant_map {
+ atomic_t in_use;
struct mmu_interval_notifier notifier;
+ bool notifier_init;
struct list_head next;
- struct vm_area_struct *vma;
int index;
int count;
int flags;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 84b143eef395..4d9a3050de6a 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -286,6 +286,9 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
*/
}
+ if (use_ptemod && map->notifier_init)
+ mmu_interval_notifier_remove(&map->notifier);
+
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
@@ -298,7 +301,7 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
{
struct gntdev_grant_map *map = data;
- unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
+ unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT;
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte |
(1 << _GNTMAP_guest_avail0);
u64 pte_maddr;
@@ -367,8 +370,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
- if (!use_ptemod)
- alloced++;
+ alloced++;
} else if (!err)
err = -EINVAL;
@@ -377,8 +379,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
if (use_ptemod) {
if (map->kmap_ops[i].status == GNTST_okay) {
- if (map->map_ops[i].status == GNTST_okay)
- alloced++;
+ alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
} else if (!err)
err = -EINVAL;
@@ -394,8 +395,14 @@ static void __unmap_grant_pages_done(int result,
unsigned int i;
struct gntdev_grant_map *map = data->data;
unsigned int offset = data->unmap_ops - map->unmap_ops;
+ int successful_unmaps = 0;
+ int live_grants;
for (i = 0; i < data->count; i++) {
+ if (map->unmap_ops[offset + i].status == GNTST_okay &&
+ map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
+ successful_unmaps++;
+
WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay &&
map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
pr_debug("unmap handle=%d st=%d\n",
@@ -403,6 +410,10 @@ static void __unmap_grant_pages_done(int result,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
if (use_ptemod) {
+ if (map->kunmap_ops[offset + i].status == GNTST_okay &&
+ map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
+ successful_unmaps++;
+
WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay &&
map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
pr_debug("kunmap handle=%u st=%d\n",
@@ -411,11 +422,15 @@ static void __unmap_grant_pages_done(int result,
map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
}
}
+
/*
* Decrease the live-grant counter. This must happen after the loop to
* prevent premature reuse of the grants by gnttab_mmap().
*/
- atomic_sub(data->count, &map->live_grants);
+ live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
+ if (WARN_ON(live_grants < 0))
+ pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
+ __func__, live_grants, successful_unmaps);
/* Release reference taken by __unmap_grant_pages */
gntdev_put_map(NULL, map);
@@ -496,11 +511,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
struct gntdev_priv *priv = file->private_data;
pr_debug("gntdev_vma_close %p\n", vma);
- if (use_ptemod) {
- WARN_ON(map->vma != vma);
- mmu_interval_notifier_remove(&map->notifier);
- map->vma = NULL;
- }
+
vma->vm_private_data = NULL;
gntdev_put_map(priv, map);
}
@@ -528,29 +539,30 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
struct gntdev_grant_map *map =
container_of(mn, struct gntdev_grant_map, notifier);
unsigned long mstart, mend;
+ unsigned long map_start, map_end;
if (!mmu_notifier_range_blockable(range))
return false;
+ map_start = map->pages_vm_start;
+ map_end = map->pages_vm_start + (map->count << PAGE_SHIFT);
+
/*
* If the VMA is split or otherwise changed the notifier is not
* updated, but we don't want to process VA's outside the modified
* VMA. FIXME: It would be much more understandable to just prevent
* modifying the VMA in the first place.
*/
- if (map->vma->vm_start >= range->end ||
- map->vma->vm_end <= range->start)
+ if (map_start >= range->end || map_end <= range->start)
return true;
- mstart = max(range->start, map->vma->vm_start);
- mend = min(range->end, map->vma->vm_end);
+ mstart = max(range->start, map_start);
+ mend = min(range->end, map_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
- map->index, map->count,
- map->vma->vm_start, map->vma->vm_end,
- range->start, range->end, mstart, mend);
- unmap_grant_pages(map,
- (mstart - map->vma->vm_start) >> PAGE_SHIFT,
- (mend - mstart) >> PAGE_SHIFT);
+ map->index, map->count, map_start, map_end,
+ range->start, range->end, mstart, mend);
+ unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT,
+ (mend - mstart) >> PAGE_SHIFT);
return true;
}
@@ -1030,18 +1042,15 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
return -EINVAL;
pr_debug("map %d+%d at %lx (pgoff %lx)\n",
- index, count, vma->vm_start, vma->vm_pgoff);
+ index, count, vma->vm_start, vma->vm_pgoff);
mutex_lock(&priv->lock);
map = gntdev_find_map_index(priv, index, count);
if (!map)
goto unlock_out;
- if (use_ptemod && map->vma)
+ if (!atomic_add_unless(&map->in_use, 1, 1))
goto unlock_out;
- if (atomic_read(&map->live_grants)) {
- err = -EAGAIN;
- goto unlock_out;
- }
+
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
@@ -1062,15 +1071,16 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map->flags |= GNTMAP_readonly;
}
+ map->pages_vm_start = vma->vm_start;
+
if (use_ptemod) {
- map->vma = vma;
err = mmu_interval_notifier_insert_locked(
&map->notifier, vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
- if (err) {
- map->vma = NULL;
+ if (err)
goto out_unlock_put;
- }
+
+ map->notifier_init = true;
}
mutex_unlock(&priv->lock);
@@ -1087,7 +1097,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
*/
mmu_interval_read_begin(&map->notifier);
- map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
find_grant_ptes, map);
@@ -1116,13 +1125,8 @@ unlock_out:
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod) {
+ if (use_ptemod)
unmap_grant_pages(map, 0, map->count);
- if (map->vma) {
- mmu_interval_notifier_remove(&map->notifier);
- map->vma = NULL;
- }
- }
gntdev_put_map(priv, map);
return err;
}
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 8973fc1e9ccc..860f37c93af4 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -25,7 +25,7 @@ struct xen_grant_dma_data {
bool broken;
};
-static DEFINE_XARRAY(xen_grant_dma_devices);
+static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
@@ -42,14 +42,29 @@ static inline grant_ref_t dma_to_grant(dma_addr_t dma)
static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
{
struct xen_grant_dma_data *data;
+ unsigned long flags;
- xa_lock(&xen_grant_dma_devices);
+ xa_lock_irqsave(&xen_grant_dma_devices, flags);
data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
- xa_unlock(&xen_grant_dma_devices);
+ xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
return data;
}
+static int store_xen_grant_dma_data(struct device *dev,
+ struct xen_grant_dma_data *data)
+{
+ unsigned long flags;
+ int ret;
+
+ xa_lock_irqsave(&xen_grant_dma_devices, flags);
+ ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
+ GFP_ATOMIC));
+ xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
+
+ return ret;
+}
+
/*
* DMA ops for Xen frontends (e.g. virtio).
*
@@ -153,7 +168,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
- unsigned int i, n_pages = PFN_UP(size);
+ unsigned int i, n_pages = PFN_UP(offset + size);
grant_ref_t grant;
dma_addr_t dma_handle;
@@ -185,7 +200,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
- unsigned int i, n_pages = PFN_UP(size);
+ unsigned long offset = dma_handle & (PAGE_SIZE - 1);
+ unsigned int i, n_pages = PFN_UP(offset + size);
grant_ref_t grant;
if (WARN_ON(dir == DMA_NONE))
@@ -273,72 +289,91 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.dma_supported = xen_grant_dma_supported,
};
-bool xen_is_grant_dma_device(struct device *dev)
+static bool xen_is_dt_grant_dma_device(struct device *dev)
{
struct device_node *iommu_np;
bool has_iommu;
- /* XXX Handle only DT devices for now */
- if (!dev->of_node)
- return false;
-
iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
- has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma");
+ has_iommu = iommu_np &&
+ of_device_is_compatible(iommu_np, "xen,grant-dma");
of_node_put(iommu_np);
return has_iommu;
}
+bool xen_is_grant_dma_device(struct device *dev)
+{
+ /* XXX Handle only DT devices for now */
+ if (dev->of_node)
+ return xen_is_dt_grant_dma_device(dev);
+
+ return false;
+}
+
bool xen_virtio_mem_acc(struct virtio_device *dev)
{
- if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT))
+ if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
return true;
return xen_is_grant_dma_device(dev->dev.parent);
}
-void xen_grant_setup_dma_ops(struct device *dev)
+static int xen_dt_grant_init_backend_domid(struct device *dev,
+ struct xen_grant_dma_data *data)
{
- struct xen_grant_dma_data *data;
struct of_phandle_args iommu_spec;
- data = find_xen_grant_dma_data(dev);
- if (data) {
- dev_err(dev, "Xen grant DMA data is already created\n");
- return;
- }
-
- /* XXX ACPI device unsupported for now */
- if (!dev->of_node)
- goto err;
-
if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
0, &iommu_spec)) {
dev_err(dev, "Cannot parse iommus property\n");
- goto err;
+ return -ESRCH;
}
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
iommu_spec.args_count != 1) {
dev_err(dev, "Incompatible IOMMU node\n");
of_node_put(iommu_spec.np);
- goto err;
+ return -ESRCH;
}
of_node_put(iommu_spec.np);
+ /*
+ * The endpoint ID here means the ID of the domain where the
+ * corresponding backend is running
+ */
+ data->backend_domid = iommu_spec.args[0];
+
+ return 0;
+}
+
+void xen_grant_setup_dma_ops(struct device *dev)
+{
+ struct xen_grant_dma_data *data;
+
+ data = find_xen_grant_dma_data(dev);
+ if (data) {
+ dev_err(dev, "Xen grant DMA data is already created\n");
+ return;
+ }
+
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
goto err;
- /*
- * The endpoint ID here means the ID of the domain where the corresponding
- * backend is running
- */
- data->backend_domid = iommu_spec.args[0];
+ if (dev->of_node) {
+ if (xen_dt_grant_init_backend_domid(dev, data))
+ goto err;
+ } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
+ dev_info(dev, "Using dom0 as backend\n");
+ data->backend_domid = 0;
+ } else {
+ /* XXX ACPI device unsupported for now */
+ goto err;
+ }
- if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
- GFP_KERNEL))) {
+ if (store_xen_grant_dma_data(dev, data)) {
dev_err(dev, "Cannot store Xen grant DMA data\n");
goto err;
}
@@ -348,9 +383,20 @@ void xen_grant_setup_dma_ops(struct device *dev)
return;
err:
+ devm_kfree(dev, data);
dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
}
+bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
+{
+ bool ret = xen_virtio_mem_acc(dev);
+
+ if (ret)
+ xen_grant_setup_dma_ops(dev->dev.parent);
+
+ return ret;
+}
+
MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index e88e8f6f0a33..fae50a24630b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -282,7 +282,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
struct page, lru);
struct privcmd_mmap_entry *msg = page_address(page);
- vma = find_vma(mm, msg->va);
+ vma = vma_lookup(mm, msg->va);
rc = -EINVAL;
if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index bde63ef677b8..d171091eec12 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -31,7 +31,7 @@ MODULE_PARM_DESC(passthrough,
" frontend (for example, a device at 06:01.b will still appear at\n"\
" 06:01.b to the frontend). This is similar to how Xen 2.0.x\n"\
" exposed PCI devices to its driver domains. This may be required\n"\
- " for drivers which depend on finding their hardward in certain\n"\
+ " for drivers which depend on finding their hardware in certain\n"\
" bus/slot locations.");
static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)